This commit is contained in:
J. Duke 2017-07-05 17:47:52 +02:00
commit 9cda91e98f
241 changed files with 9518 additions and 3656 deletions

View file

@ -121,3 +121,4 @@ cfbbdb77eac0397b03eb99ee2e07ea00e0a7b81e jdk7-b142
7203965666a4fe63bf82f5e4204f41ce6285e716 jdk7-b144 7203965666a4fe63bf82f5e4204f41ce6285e716 jdk7-b144
55e9ebf032186c333e5964ed044419830ac02693 jdk7-b145 55e9ebf032186c333e5964ed044419830ac02693 jdk7-b145
2d38c2a79c144c30cd04d143d83ee7ec6af40771 jdk7-b146 2d38c2a79c144c30cd04d143d83ee7ec6af40771 jdk7-b146
d91364304d7c4ecd34caffdba2b840aeb0d10b51 jdk7-b147

View file

@ -1 +1 @@
project=jdk7 project=jdk8

View file

@ -172,3 +172,5 @@ d283b82966712b353fa307845a1316da42a355f4 hs21-b10
3aea9e9feb073f5500e031be6186666bcae89aa2 hs21-b11 3aea9e9feb073f5500e031be6186666bcae89aa2 hs21-b11
9ad1548c6b63d596c411afc35147ffd5254426d9 jdk7-b142 9ad1548c6b63d596c411afc35147ffd5254426d9 jdk7-b142
9ad1548c6b63d596c411afc35147ffd5254426d9 hs21-b12 9ad1548c6b63d596c411afc35147ffd5254426d9 hs21-b12
c149193c768b8b7233da4c3a3fdc0756b975848e hs21-b13
c149193c768b8b7233da4c3a3fdc0756b975848e jdk7-b143

View file

@ -1 +1 @@
project=jdk7 project=jdk8

View file

@ -1028,7 +1028,12 @@ public class CommandProcessor {
if (AddressOps.equal(val, value)) { if (AddressOps.equal(val, value)) {
if (!printed) { if (!printed) {
printed = true; printed = true;
try {
blob.printOn(out); blob.printOn(out);
} catch (Exception e) {
out.println("Exception printing blob at " + base);
e.printStackTrace();
}
} }
out.println("found at " + base + "\n"); out.println("found at " + base + "\n");
} }

View file

@ -0,0 +1,58 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.code;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class AdapterBlob extends CodeBlob {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static void initialize(TypeDataBase db) {
// Type type = db.lookupType("AdapterBlob");
// // FIXME: add any needed fields
}
public AdapterBlob(Address addr) {
super(addr);
}
public boolean isAdapterBlob() {
return true;
}
public String getName() {
return "AdapterBlob: " + super.getName();
}
}

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -93,6 +93,8 @@ public class CodeBlob extends VMObject {
public boolean isUncommonTrapStub() { return false; } public boolean isUncommonTrapStub() { return false; }
public boolean isExceptionStub() { return false; } public boolean isExceptionStub() { return false; }
public boolean isSafepointStub() { return false; } public boolean isSafepointStub() { return false; }
public boolean isRicochetBlob() { return false; }
public boolean isAdapterBlob() { return false; }
// Fine grain nmethod support: isNmethod() == isJavaMethod() || isNativeMethod() || isOSRMethod() // Fine grain nmethod support: isNmethod() == isJavaMethod() || isNativeMethod() || isOSRMethod()
public boolean isJavaMethod() { return false; } public boolean isJavaMethod() { return false; }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -57,6 +57,8 @@ public class CodeCache {
virtualConstructor.addMapping("BufferBlob", BufferBlob.class); virtualConstructor.addMapping("BufferBlob", BufferBlob.class);
virtualConstructor.addMapping("nmethod", NMethod.class); virtualConstructor.addMapping("nmethod", NMethod.class);
virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class); virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class);
virtualConstructor.addMapping("RicochetBlob", RicochetBlob.class);
virtualConstructor.addMapping("AdapterBlob", AdapterBlob.class);
virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class); virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class);
virtualConstructor.addMapping("DeoptimizationBlob", DeoptimizationBlob.class); virtualConstructor.addMapping("DeoptimizationBlob", DeoptimizationBlob.class);
if (VM.getVM().isServerCompiler()) { if (VM.getVM().isServerCompiler()) {

View file

@ -0,0 +1,56 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.code;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
/** RicochetBlob (currently only used by Compiler 2) */
public class RicochetBlob extends SingletonBlob {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static void initialize(TypeDataBase db) {
// Type type = db.lookupType("RicochetBlob");
// FIXME: add any needed fields
}
public RicochetBlob(Address addr) {
super(addr);
}
public boolean isRicochetBlob() {
return true;
}
}

View file

@ -33,13 +33,13 @@
# Don't put quotes (fail windows build). # Don't put quotes (fail windows build).
HOTSPOT_VM_COPYRIGHT=Copyright 2011 HOTSPOT_VM_COPYRIGHT=Copyright 2011
HS_MAJOR_VER=21 HS_MAJOR_VER=22
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=13 HS_BUILD_NUMBER=01
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=7 JDK_MINOR_VER=8
JDK_MICRO_VER=0 JDK_MICRO_VER=0
# Previous (bootdir) JDK version # Previous (bootdir) JDK version
JDK_PREVIOUS_VERSION=1.6.0 JDK_PREVIOUS_VERSION=1.7.0

View file

@ -1,5 +1,5 @@
# #
# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,24 @@ else
ZIPFLAGS=-q -y ZIPFLAGS=-q -y
endif endif
jprt_build_productEmb:
$(MAKE) JAVASE_EMBEDDED=true jprt_build_product
jprt_build_debugEmb:
$(MAKE) JAVASE_EMBEDDED=true jprt_build_debug
jprt_build_fastdebugEmb:
$(MAKE) JAVASE_EMBEDDED=true jprt_build_fastdebug
jprt_build_productOpen:
$(MAKE) OPENJDK=true jprt_build_product
jprt_build_debugOpen:
$(MAKE) OPENJDK=true jprt_build_debug
jprt_build_fastdebugOpen:
$(MAKE) OPENJDK=true jprt_build_fastdebug
jprt_build_product: all_product copy_product_jdk export_product_jdk jprt_build_product: all_product copy_product_jdk export_product_jdk
( $(CD) $(JDK_IMAGE_DIR) && \ ( $(CD) $(JDK_IMAGE_DIR) && \
$(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . ) $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )

View file

@ -50,7 +50,7 @@ jprt.sync.push=false
# sparc etc. # sparc etc.
# Define the Solaris platforms we want for the various releases # Define the Solaris platforms we want for the various releases
jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10
jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10 jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
jprt.my.solaris.sparc.jdk7b107=solaris_sparc_5.10 jprt.my.solaris.sparc.jdk7b107=solaris_sparc_5.10
jprt.my.solaris.sparc.jdk7temp=solaris_sparc_5.10 jprt.my.solaris.sparc.jdk7temp=solaris_sparc_5.10
@ -64,6 +64,7 @@ jprt.my.solaris.sparc.ejdk7=${jprt.my.solaris.sparc.jdk7}
jprt.my.solaris.sparc.ejdk6=${jprt.my.solaris.sparc.jdk6} jprt.my.solaris.sparc.ejdk6=${jprt.my.solaris.sparc.jdk6}
jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}} jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7b107=solaris_sparcv9_5.10 jprt.my.solaris.sparcv9.jdk7b107=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7temp=solaris_sparcv9_5.10 jprt.my.solaris.sparcv9.jdk7temp=solaris_sparcv9_5.10
@ -77,6 +78,7 @@ jprt.my.solaris.sparcv9.ejdk7=${jprt.my.solaris.sparcv9.jdk7}
jprt.my.solaris.sparcv9.ejdk6=${jprt.my.solaris.sparcv9.jdk6} jprt.my.solaris.sparcv9.ejdk6=${jprt.my.solaris.sparcv9.jdk6}
jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}} jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
jprt.my.solaris.i586.jdk8=solaris_i586_5.10
jprt.my.solaris.i586.jdk7=solaris_i586_5.10 jprt.my.solaris.i586.jdk7=solaris_i586_5.10
jprt.my.solaris.i586.jdk7b107=solaris_i586_5.10 jprt.my.solaris.i586.jdk7b107=solaris_i586_5.10
jprt.my.solaris.i586.jdk7temp=solaris_i586_5.10 jprt.my.solaris.i586.jdk7temp=solaris_i586_5.10
@ -90,6 +92,7 @@ jprt.my.solaris.i586.ejdk7=${jprt.my.solaris.i586.jdk7}
jprt.my.solaris.i586.ejdk6=${jprt.my.solaris.i586.jdk6} jprt.my.solaris.i586.ejdk6=${jprt.my.solaris.i586.jdk6}
jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}} jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
jprt.my.solaris.x64.jdk8=solaris_x64_5.10
jprt.my.solaris.x64.jdk7=solaris_x64_5.10 jprt.my.solaris.x64.jdk7=solaris_x64_5.10
jprt.my.solaris.x64.jdk7b107=solaris_x64_5.10 jprt.my.solaris.x64.jdk7b107=solaris_x64_5.10
jprt.my.solaris.x64.jdk7temp=solaris_x64_5.10 jprt.my.solaris.x64.jdk7temp=solaris_x64_5.10
@ -103,6 +106,7 @@ jprt.my.solaris.x64.ejdk7=${jprt.my.solaris.x64.jdk7}
jprt.my.solaris.x64.ejdk6=${jprt.my.solaris.x64.jdk6} jprt.my.solaris.x64.ejdk6=${jprt.my.solaris.x64.jdk6}
jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}} jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
jprt.my.linux.i586.jdk8=linux_i586_2.6
jprt.my.linux.i586.jdk7=linux_i586_2.6 jprt.my.linux.i586.jdk7=linux_i586_2.6
jprt.my.linux.i586.jdk7b107=linux_i586_2.6 jprt.my.linux.i586.jdk7b107=linux_i586_2.6
jprt.my.linux.i586.jdk7temp=linux_i586_2.6 jprt.my.linux.i586.jdk7temp=linux_i586_2.6
@ -116,6 +120,7 @@ jprt.my.linux.i586.ejdk7=linux_i586_2.6
jprt.my.linux.i586.ejdk6=linux_i586_2.6 jprt.my.linux.i586.ejdk6=linux_i586_2.6
jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}} jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
jprt.my.linux.x64.jdk8=linux_x64_2.6
jprt.my.linux.x64.jdk7=linux_x64_2.6 jprt.my.linux.x64.jdk7=linux_x64_2.6
jprt.my.linux.x64.jdk7b107=linux_x64_2.6 jprt.my.linux.x64.jdk7b107=linux_x64_2.6
jprt.my.linux.x64.jdk7temp=linux_x64_2.6 jprt.my.linux.x64.jdk7temp=linux_x64_2.6
@ -129,6 +134,7 @@ jprt.my.linux.x64.ejdk7=${jprt.my.linux.x64.jdk7}
jprt.my.linux.x64.ejdk6=${jprt.my.linux.x64.jdk6} jprt.my.linux.x64.ejdk6=${jprt.my.linux.x64.jdk6}
jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}} jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
jprt.my.linux.ppc.jdk8=linux_ppc_2.6
jprt.my.linux.ppc.jdk7=linux_ppc_2.6 jprt.my.linux.ppc.jdk7=linux_ppc_2.6
jprt.my.linux.ppc.jdk7b107=linux_ppc_2.6 jprt.my.linux.ppc.jdk7b107=linux_ppc_2.6
jprt.my.linux.ppc.jdk7temp=linux_ppc_2.6 jprt.my.linux.ppc.jdk7temp=linux_ppc_2.6
@ -136,6 +142,7 @@ jprt.my.linux.ppc.ejdk6=linux_ppc_2.6
jprt.my.linux.ppc.ejdk7=linux_ppc_2.6 jprt.my.linux.ppc.ejdk7=linux_ppc_2.6
jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}} jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6 jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7b107=linux_ppcv2_2.6 jprt.my.linux.ppcv2.jdk7b107=linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7temp=linux_ppcv2_2.6 jprt.my.linux.ppcv2.jdk7temp=linux_ppcv2_2.6
@ -143,6 +150,7 @@ jprt.my.linux.ppcv2.ejdk6=linux_ppcv2_2.6
jprt.my.linux.ppcv2.ejdk7=linux_ppcv2_2.6 jprt.my.linux.ppcv2.ejdk7=linux_ppcv2_2.6
jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}} jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6 jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt.jdk7b107=linux_ppcsflt_2.6 jprt.my.linux.ppcsflt.jdk7b107=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt.jdk7temp=linux_ppcsflt_2.6 jprt.my.linux.ppcsflt.jdk7temp=linux_ppcsflt_2.6
@ -150,6 +158,7 @@ jprt.my.linux.ppcsflt.ejdk6=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt.ejdk7=linux_ppcsflt_2.6 jprt.my.linux.ppcsflt.ejdk7=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}} jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6
jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6 jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6
jprt.my.linux.armvfp.jdk7b107=linux_armvfp_2.6 jprt.my.linux.armvfp.jdk7b107=linux_armvfp_2.6
jprt.my.linux.armvfp.jdk7temp=linux_armvfp_2.6 jprt.my.linux.armvfp.jdk7temp=linux_armvfp_2.6
@ -157,6 +166,7 @@ jprt.my.linux.armvfp.ejdk6=linux_armvfp_2.6
jprt.my.linux.armvfp.ejdk7=linux_armvfp_2.6 jprt.my.linux.armvfp.ejdk7=linux_armvfp_2.6
jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}} jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}}
jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6 jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7b107=linux_armsflt_2.6 jprt.my.linux.armsflt.jdk7b107=linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7temp=linux_armsflt_2.6 jprt.my.linux.armsflt.jdk7temp=linux_armsflt_2.6
@ -164,6 +174,7 @@ jprt.my.linux.armsflt.ejdk6=linux_armsflt_2.6
jprt.my.linux.armsflt.ejdk7=linux_armsflt_2.6 jprt.my.linux.armsflt.ejdk7=linux_armsflt_2.6
jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}} jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
jprt.my.windows.i586.jdk8=windows_i586_5.1
jprt.my.windows.i586.jdk7=windows_i586_5.1 jprt.my.windows.i586.jdk7=windows_i586_5.1
jprt.my.windows.i586.jdk7b107=windows_i586_5.0 jprt.my.windows.i586.jdk7b107=windows_i586_5.0
jprt.my.windows.i586.jdk7temp=windows_i586_5.0 jprt.my.windows.i586.jdk7temp=windows_i586_5.0
@ -177,6 +188,7 @@ jprt.my.windows.i586.ejdk7=${jprt.my.windows.i586.jdk7}
jprt.my.windows.i586.ejdk6=${jprt.my.windows.i586.jdk6} jprt.my.windows.i586.ejdk6=${jprt.my.windows.i586.jdk6}
jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}} jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
jprt.my.windows.x64.jdk8=windows_x64_5.2
jprt.my.windows.x64.jdk7=windows_x64_5.2 jprt.my.windows.x64.jdk7=windows_x64_5.2
jprt.my.windows.x64.jdk7b107=windows_x64_5.2 jprt.my.windows.x64.jdk7b107=windows_x64_5.2
jprt.my.windows.x64.jdk7temp=windows_x64_5.2 jprt.my.windows.x64.jdk7temp=windows_x64_5.2
@ -202,17 +214,23 @@ jprt.build.targets.standard= \
${jprt.my.windows.i586}-{product|fastdebug|debug}, \ ${jprt.my.windows.i586}-{product|fastdebug|debug}, \
${jprt.my.windows.x64}-{product|fastdebug|debug} ${jprt.my.windows.x64}-{product|fastdebug|debug}
jprt.build.targets.open= \
${jprt.my.solaris.i586}-{productOpen}, \
${jprt.my.solaris.x64}-{debugOpen}, \
${jprt.my.linux.x64}-{productOpen}
jprt.build.targets.embedded= \ jprt.build.targets.embedded= \
${jprt.my.linux.i586}-{product|fastdebug|debug}, \ ${jprt.my.linux.i586}-{productEmb|fastdebugEmb|debugEmb}, \
${jprt.my.linux.ppc}-{product|fastdebug}, \ ${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \
${jprt.my.linux.ppcv2}-{product|fastdebug}, \ ${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \
${jprt.my.linux.ppcsflt}-{product|fastdebug}, \ ${jprt.my.linux.ppcsflt}-{productEmb|fastdebugEmb}, \
${jprt.my.linux.armvfp}-{product|fastdebug}, \ ${jprt.my.linux.armvfp}-{productEmb|fastdebugEmb}, \
${jprt.my.linux.armsflt}-{product|fastdebug} ${jprt.my.linux.armsflt}-{productEmb|fastdebugEmb}
jprt.build.targets.all=${jprt.build.targets.standard}, \ jprt.build.targets.all=${jprt.build.targets.standard}, \
${jprt.build.targets.embedded} ${jprt.build.targets.embedded}, ${jprt.build.targets.open}
jprt.build.targets.jdk8=${jprt.build.targets.all}
jprt.build.targets.jdk7=${jprt.build.targets.all} jprt.build.targets.jdk7=${jprt.build.targets.all}
jprt.build.targets.jdk7temp=${jprt.build.targets.all} jprt.build.targets.jdk7temp=${jprt.build.targets.all}
jprt.build.targets.jdk7b107=${jprt.build.targets.all} jprt.build.targets.jdk7b107=${jprt.build.targets.all}
@ -453,6 +471,12 @@ jprt.my.windows.x64.test.targets = \
${jprt.my.windows.x64}-product-c2-jbb_G1, \ ${jprt.my.windows.x64}-product-c2-jbb_G1, \
${jprt.my.windows.x64}-product-c2-jbb_ParOldGC ${jprt.my.windows.x64}-product-c2-jbb_ParOldGC
# Some basic "smoke" tests for OpenJDK builds
jprt.test.targets.open = \
${jprt.my.solaris.x64}-{productOpen|debugOpen|fastdebugOpen}-c2-jvm98_tiered, \
${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98_tiered, \
${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98_tiered
# Testing for actual embedded builds is different to standard # Testing for actual embedded builds is different to standard
jprt.my.linux.i586.test.targets.embedded = \ jprt.my.linux.i586.test.targets.embedded = \
linux_i586_2.6-product-c1-scimark linux_i586_2.6-product-c1-scimark
@ -461,6 +485,7 @@ jprt.my.linux.i586.test.targets.embedded = \
# Note: no PPC or ARM tests at this stage # Note: no PPC or ARM tests at this stage
jprt.test.targets.standard = \ jprt.test.targets.standard = \
${jprt.my.linux.i586.test.targets.embedded}, \
${jprt.my.solaris.sparc.test.targets}, \ ${jprt.my.solaris.sparc.test.targets}, \
${jprt.my.solaris.sparcv9.test.targets}, \ ${jprt.my.solaris.sparcv9.test.targets}, \
${jprt.my.solaris.i586.test.targets}, \ ${jprt.my.solaris.i586.test.targets}, \
@ -468,7 +493,8 @@ jprt.test.targets.standard = \
${jprt.my.linux.i586.test.targets}, \ ${jprt.my.linux.i586.test.targets}, \
${jprt.my.linux.x64.test.targets}, \ ${jprt.my.linux.x64.test.targets}, \
${jprt.my.windows.i586.test.targets}, \ ${jprt.my.windows.i586.test.targets}, \
${jprt.my.windows.x64.test.targets} ${jprt.my.windows.x64.test.targets}, \
${jprt.test.targets.open}
jprt.test.targets.embedded= \ jprt.test.targets.embedded= \
${jprt.my.linux.i586.test.targets.embedded}, \ ${jprt.my.linux.i586.test.targets.embedded}, \
@ -481,6 +507,7 @@ jprt.test.targets.embedded= \
${jprt.my.windows.x64.test.targets} ${jprt.my.windows.x64.test.targets}
jprt.test.targets.jdk8=${jprt.test.targets.standard}
jprt.test.targets.jdk7=${jprt.test.targets.standard} jprt.test.targets.jdk7=${jprt.test.targets.standard}
jprt.test.targets.jdk7temp=${jprt.test.targets.standard} jprt.test.targets.jdk7temp=${jprt.test.targets.standard}
jprt.test.targets.jdk7b105=${jprt.test.targets.standard} jprt.test.targets.jdk7b105=${jprt.test.targets.standard}
@ -521,6 +548,7 @@ jprt.make.rule.test.targets.standard = \
jprt.make.rule.test.targets.embedded = \ jprt.make.rule.test.targets.embedded = \
${jprt.make.rule.test.targets.standard.client} ${jprt.make.rule.test.targets.standard.client}
jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard} jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7temp=${jprt.make.rule.test.targets.standard} jprt.make.rule.test.targets.jdk7temp=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7b107=${jprt.make.rule.test.targets.standard} jprt.make.rule.test.targets.jdk7b107=${jprt.make.rule.test.targets.standard}

View file

@ -42,6 +42,12 @@
#include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/heapRegion.hpp"
#endif #endif
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) block_comment(str)
#endif
// Convert the raw encoding form into the form expected by the // Convert the raw encoding form into the form expected by the
// constructor for Address. // constructor for Address.
Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) { Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) {
@ -1072,6 +1078,12 @@ void MacroAssembler::call_VM_base(
check_and_forward_exception(Gtemp); check_and_forward_exception(Gtemp);
} }
#ifdef ASSERT
set(badHeapWordVal, G3);
set(badHeapWordVal, G4);
set(badHeapWordVal, G5);
#endif
// get oop result if there is one and reset the value in the thread // get oop result if there is one and reset the value in the thread
if (oop_result->is_valid()) { if (oop_result->is_valid()) {
get_vm_result(oop_result); get_vm_result(oop_result);
@ -1177,6 +1189,11 @@ void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_poin
call(entry_point, relocInfo::runtime_call_type); call(entry_point, relocInfo::runtime_call_type);
delayed()->nop(); delayed()->nop();
restore_thread(thread_cache); restore_thread(thread_cache);
#ifdef ASSERT
set(badHeapWordVal, G3);
set(badHeapWordVal, G4);
set(badHeapWordVal, G5);
#endif
} }
@ -1518,7 +1535,7 @@ int MacroAssembler::total_frame_size_in_bytes(int extraWords) {
// save_frame: given number of "extra" words in frame, // save_frame: given number of "extra" words in frame,
// issue approp. save instruction (p 200, v8 manual) // issue approp. save instruction (p 200, v8 manual)
void MacroAssembler::save_frame(int extraWords = 0) { void MacroAssembler::save_frame(int extraWords) {
int delta = -total_frame_size_in_bytes(extraWords); int delta = -total_frame_size_in_bytes(extraWords);
if (is_simm13(delta)) { if (is_simm13(delta)) {
save(SP, delta, SP); save(SP, delta, SP);
@ -1730,6 +1747,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * fil
if (reg == G0) return; // always NULL, which is always an oop if (reg == G0) return; // always NULL, which is always an oop
BLOCK_COMMENT("verify_oop {");
char buffer[64]; char buffer[64];
#ifdef COMPILER1 #ifdef COMPILER1
if (CommentedAssembly) { if (CommentedAssembly) {
@ -1768,6 +1786,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * fil
delayed()->nop(); delayed()->nop();
// recover frame size // recover frame size
add(SP, 8*8,SP); add(SP, 8*8,SP);
BLOCK_COMMENT("} verify_oop");
} }
void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) {
@ -2040,7 +2059,7 @@ void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) {
} }
else else
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
assert(false, "error"); assert(false, err_msg("DEBUG MESSAGE: %s", msg));
} }
@ -3230,6 +3249,7 @@ void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_
RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
Register temp_reg,
int extra_slot_offset) { int extra_slot_offset) {
// cf. TemplateTable::prepare_invoke(), if (load_receiver). // cf. TemplateTable::prepare_invoke(), if (load_receiver).
int stackElementSize = Interpreter::stackElementSize; int stackElementSize = Interpreter::stackElementSize;
@ -3238,18 +3258,19 @@ RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
offset += arg_slot.as_constant() * stackElementSize; offset += arg_slot.as_constant() * stackElementSize;
return offset; return offset;
} else { } else {
Register temp = arg_slot.as_register(); assert(temp_reg != noreg, "must specify");
sll_ptr(temp, exact_log2(stackElementSize), temp); sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg);
if (offset != 0) if (offset != 0)
add(temp, offset, temp); add(temp_reg, offset, temp_reg);
return temp; return temp_reg;
} }
} }
Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
Register temp_reg,
int extra_slot_offset) { int extra_slot_offset) {
return Address(Gargs, argument_offset(arg_slot, extra_slot_offset)); return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset));
} }
@ -4906,4 +4927,3 @@ void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
// Caller should set it: // Caller should set it:
// add(G0, 1, result); // equals // add(G0, 1, result); // equals
} }

View file

@ -316,6 +316,8 @@ class Address VALUE_OBJ_CLASS_SPEC {
bool has_index() const { return _index_or_disp.is_register(); } bool has_index() const { return _index_or_disp.is_register(); }
bool has_disp() const { return _index_or_disp.is_constant(); } bool has_disp() const { return _index_or_disp.is_constant(); }
bool uses(Register reg) const { return base() == reg || (has_index() && index() == reg); }
const relocInfo::relocType rtype() { return _rspec.type(); } const relocInfo::relocType rtype() { return _rspec.type(); }
const RelocationHolder& rspec() { return _rspec; } const RelocationHolder& rspec() { return _rspec; }
@ -330,6 +332,10 @@ class Address VALUE_OBJ_CLASS_SPEC {
Address a(base(), disp() + plusdisp); Address a(base(), disp() + plusdisp);
return a; return a;
} }
bool is_same_address(Address a) const {
// disregard _rspec
return base() == a.base() && (has_index() ? index() == a.index() : disp() == a.disp());
}
Address after_save() const { Address after_save() const {
Address a = (*this); Address a = (*this);
@ -436,6 +442,10 @@ class AddressLiteral VALUE_OBJ_CLASS_SPEC {
: _address((address) addr), : _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {} _rspec(rspec_from_rtype(rtype, (address) addr)) {}
AddressLiteral(oop* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {}
AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none) AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none)
: _address((address) addr), : _address((address) addr),
_rspec(rspec_from_rtype(rtype, (address) addr)) {} _rspec(rspec_from_rtype(rtype, (address) addr)) {}
@ -455,6 +465,21 @@ class AddressLiteral VALUE_OBJ_CLASS_SPEC {
} }
}; };
// Convenience classes
class ExternalAddress: public AddressLiteral {
private:
static relocInfo::relocType reloc_for_target(address target) {
// Sometimes ExternalAddress is used for values which aren't
// exactly addresses, like the card table base.
// external_word_type can't be used for values in the first page
// so just skip the reloc in that case.
return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
}
public:
ExternalAddress(address target) : AddressLiteral(target, reloc_for_target( target)) {}
ExternalAddress(oop* target) : AddressLiteral(target, reloc_for_target((address) target)) {}
};
inline Address RegisterImpl::address_in_saved_window() const { inline Address RegisterImpl::address_in_saved_window() const {
return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS)); return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
@ -691,6 +716,8 @@ class Assembler : public AbstractAssembler {
casa_op3 = 0x3c, casa_op3 = 0x3c,
casxa_op3 = 0x3e, casxa_op3 = 0x3e,
mftoi_op3 = 0x36,
alt_bit_op3 = 0x10, alt_bit_op3 = 0x10,
cc_bit_op3 = 0x10 cc_bit_op3 = 0x10
}; };
@ -725,7 +752,13 @@ class Assembler : public AbstractAssembler {
fitod_opf = 0xc8, fitod_opf = 0xc8,
fstod_opf = 0xc9, fstod_opf = 0xc9,
fstoi_opf = 0xd1, fstoi_opf = 0xd1,
fdtoi_opf = 0xd2 fdtoi_opf = 0xd2,
mdtox_opf = 0x110,
mstouw_opf = 0x111,
mstosw_opf = 0x113,
mxtod_opf = 0x118,
mwtos_opf = 0x119
}; };
enum RCondition { rc_z = 1, rc_lez = 2, rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7 }; enum RCondition { rc_z = 1, rc_lez = 2, rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7 };
@ -855,9 +888,8 @@ class Assembler : public AbstractAssembler {
// and be sign-extended. Check the range. // and be sign-extended. Check the range.
static void assert_signed_range(intptr_t x, int nbits) { static void assert_signed_range(intptr_t x, int nbits) {
assert( nbits == 32 assert(nbits == 32 || (-(1 << nbits-1) <= x && x < ( 1 << nbits-1)),
|| -(1 << nbits-1) <= x && x < ( 1 << nbits-1), err_msg("value out of range: x=" INTPTR_FORMAT ", nbits=%d", x, nbits));
"value out of range");
} }
static void assert_signed_word_disp_range(intptr_t x, int nbits) { static void assert_signed_word_disp_range(intptr_t x, int nbits) {
@ -1037,6 +1069,9 @@ class Assembler : public AbstractAssembler {
return x & ((1 << 10) - 1); return x & ((1 << 10) - 1);
} }
// instruction only in VIS3
static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
// instruction only in v9 // instruction only in v9
static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); } static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); }
@ -1223,8 +1258,8 @@ public:
// pp 159 // pp 159
void ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w) | fs2(s, w)); } void ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(fpop1_op3) | opf(0x80 + w) | fs2(s, w)); }
void ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xd0 + w) | fs2(s, w)); } void ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(fpop1_op3) | opf(0xd0 + w) | fs2(s, w)); }
// pp 160 // pp 160
@ -1232,8 +1267,8 @@ public:
// pp 161 // pp 161
void fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w*4) | fs2(s, w)); } void fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only(); emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w*4) | fs2(s, FloatRegisterImpl::D)); }
void fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xc0 + w*4) | fs2(s, w)); } void fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xc0 + w*4) | fs2(s, FloatRegisterImpl::S)); }
// pp 162 // pp 162
@ -1685,6 +1720,19 @@ public:
inline void wrasi( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); } inline void wrasi( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
inline void wrfprs( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); } inline void wrfprs( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
// VIS3 instructions
void movstosw( FloatRegister s, Register d ) { vis3_only(); emit_long( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstosw_opf) | fs2(s, FloatRegisterImpl::S)); }
void movstouw( FloatRegister s, Register d ) { vis3_only(); emit_long( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mstouw_opf) | fs2(s, FloatRegisterImpl::S)); }
void movdtox( FloatRegister s, Register d ) { vis3_only(); emit_long( op(arith_op) | rd(d) | op3(mftoi_op3) | opf(mdtox_opf) | fs2(s, FloatRegisterImpl::D)); }
void movwtos( Register s, FloatRegister d ) { vis3_only(); emit_long( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(mftoi_op3) | opf(mwtos_opf) | rs2(s)); }
void movxtod( Register s, FloatRegister d ) { vis3_only(); emit_long( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(mftoi_op3) | opf(mxtod_opf) | rs2(s)); }
// For a given register condition, return the appropriate condition code // For a given register condition, return the appropriate condition code
// Condition (the one you would use to get the same effect after "tst" on // Condition (the one you would use to get the same effect after "tst" on
// the target register.) // the target register.)
@ -2287,7 +2335,7 @@ public:
int total_frame_size_in_bytes(int extraWords); int total_frame_size_in_bytes(int extraWords);
// used when extraWords known statically // used when extraWords known statically
void save_frame(int extraWords); void save_frame(int extraWords = 0);
void save_frame_c1(int size_in_bytes); void save_frame_c1(int size_in_bytes);
// make a frame, and simultaneously pass up one or two register value // make a frame, and simultaneously pass up one or two register value
// into the new register window // into the new register window
@ -2456,9 +2504,11 @@ public:
// offset relative to Gargs of argument at tos[arg_slot]. // offset relative to Gargs of argument at tos[arg_slot].
// (arg_slot == 0 means the last argument, not the first). // (arg_slot == 0 means the last argument, not the first).
RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
Register temp_reg,
int extra_slot_offset = 0); int extra_slot_offset = 0);
// Address of Gargs and argument_offset. // Address of Gargs and argument_offset.
Address argument_address(RegisterOrConstant arg_slot, Address argument_address(RegisterOrConstant arg_slot,
Register temp_reg,
int extra_slot_offset = 0); int extra_slot_offset = 0);
// Stack overflow checking // Stack overflow checking

View file

@ -255,7 +255,11 @@ inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Regi
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); } inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) { relocate(a.rspec(offset)); stf(w, d, a.base(), a.disp() + offset); } inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) {
relocate(a.rspec(offset));
if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); }
else { stf(w, d, a.base(), a.disp() + offset); }
}
inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); } inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }

View file

@ -513,6 +513,8 @@ frame frame::sender(RegisterMap* map) const {
// interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
// explicitly recognized. // explicitly recognized.
if (is_ricochet_frame()) return sender_for_ricochet_frame(map);
bool frame_is_interpreted = is_interpreted_frame(); bool frame_is_interpreted = is_interpreted_frame();
if (frame_is_interpreted) { if (frame_is_interpreted) {
map->make_integer_regs_unsaved(); map->make_integer_regs_unsaved();

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,228 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// Platform-specific definitions for method handles.
// These definitions are inlined into class MethodHandles.
// Adapters
enum /* platform_dependent_constants */ {
adapter_code_size = NOT_LP64(22000 DEBUG_ONLY(+ 40000)) LP64_ONLY(32000 DEBUG_ONLY(+ 80000))
};
public:
class RicochetFrame : public ResourceObj {
friend class MethodHandles;
private:
/*
RF field x86 SPARC
sender_pc *(rsp+0) I7-0x8
sender_link rbp I6+BIAS
exact_sender_sp rsi/r13 I5_savedSP
conversion *(rcx+&amh_conv) L5_conv
saved_args_base rax L4_sab (cf. Gargs = G4)
saved_args_layout #NULL L3_sal
saved_target *(rcx+&mh_vmtgt) L2_stgt
continuation #STUB_CON L1_cont
*/
static const Register L1_continuation ; // what to do when control gets back here
static const Register L2_saved_target ; // target method handle to invoke on saved_args
static const Register L3_saved_args_layout; // caching point for MethodTypeForm.vmlayout cookie
static const Register L4_saved_args_base ; // base of pushed arguments (slot 0, arg N) (-3)
static const Register L5_conversion ; // misc. information from original AdapterMethodHandle (-2)
frame _fr;
RicochetFrame(const frame& fr) : _fr(fr) { }
intptr_t* register_addr(Register reg) const {
assert((_fr.sp() + reg->sp_offset_in_saved_window()) == _fr.register_addr(reg), "must agree");
return _fr.register_addr(reg);
}
intptr_t register_value(Register reg) const { return *register_addr(reg); }
public:
intptr_t* continuation() const { return (intptr_t*) register_value(L1_continuation); }
oop saved_target() const { return (oop) register_value(L2_saved_target); }
oop saved_args_layout() const { return (oop) register_value(L3_saved_args_layout); }
intptr_t* saved_args_base() const { return (intptr_t*) register_value(L4_saved_args_base); }
intptr_t conversion() const { return register_value(L5_conversion); }
intptr_t* exact_sender_sp() const { return (intptr_t*) register_value(I5_savedSP); }
intptr_t* sender_link() const { return _fr.sender_sp(); } // XXX
address sender_pc() const { return _fr.sender_pc(); }
// This value is not used for much, but it apparently must be nonzero.
static int frame_size_in_bytes() { return wordSize * 4; }
intptr_t* extended_sender_sp() const { return saved_args_base(); }
intptr_t return_value_slot_number() const {
return adapter_conversion_vminfo(conversion());
}
BasicType return_value_type() const {
return adapter_conversion_dest_type(conversion());
}
bool has_return_value_slot() const {
return return_value_type() != T_VOID;
}
intptr_t* return_value_slot_addr() const {
assert(has_return_value_slot(), "");
return saved_arg_slot_addr(return_value_slot_number());
}
intptr_t* saved_target_slot_addr() const {
return saved_arg_slot_addr(saved_args_length());
}
intptr_t* saved_arg_slot_addr(int slot) const {
assert(slot >= 0, "");
return (intptr_t*)( (address)saved_args_base() + (slot * Interpreter::stackElementSize) );
}
jint saved_args_length() const;
jint saved_arg_offset(int arg) const;
// GC interface
oop* saved_target_addr() { return (oop*)register_addr(L2_saved_target); }
oop* saved_args_layout_addr() { return (oop*)register_addr(L3_saved_args_layout); }
oop compute_saved_args_layout(bool read_cache, bool write_cache);
#ifdef ASSERT
// The magic number is supposed to help find ricochet frames within the bytes of stack dumps.
enum { MAGIC_NUMBER_1 = 0xFEED03E, MAGIC_NUMBER_2 = 0xBEEF03E };
static const Register L0_magic_number_1 ; // cookie for debugging, at start of RSA
static Address magic_number_2_addr() { return Address(L4_saved_args_base, -wordSize); }
intptr_t magic_number_1() const { return register_value(L0_magic_number_1); }
intptr_t magic_number_2() const { return saved_args_base()[-1]; }
#endif //ASSERT
public:
enum { RETURN_VALUE_PLACEHOLDER = (NOT_DEBUG(0) DEBUG_ONLY(42)) };
void verify() const NOT_DEBUG_RETURN; // check for MAGIC_NUMBER, etc.
static void generate_ricochet_blob(MacroAssembler* _masm,
// output params:
int* bounce_offset,
int* exception_offset,
int* frame_size_in_words);
static void enter_ricochet_frame(MacroAssembler* _masm,
Register recv_reg,
Register argv_reg,
address return_handler);
static void leave_ricochet_frame(MacroAssembler* _masm,
Register recv_reg,
Register new_sp_reg,
Register sender_pc_reg);
static RicochetFrame* from_frame(const frame& fr) {
RicochetFrame* rf = new RicochetFrame(fr);
rf->verify();
return rf;
}
static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
};
// Additional helper methods for MethodHandles code generation:
public:
static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg);
static void load_conversion_vminfo(MacroAssembler* _masm, Address conversion_field_addr, Register reg);
static void extract_conversion_vminfo(MacroAssembler* _masm, Register conversion_field_reg, Register reg);
static void extract_conversion_dest_type(MacroAssembler* _masm, Register conversion_field_reg, Register reg);
static void load_stack_move(MacroAssembler* _masm,
Address G3_amh_conversion,
Register G5_stack_move);
static void insert_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
Register argslot_reg,
Register temp_reg, Register temp2_reg, Register temp3_reg);
static void remove_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
Register argslot_reg,
Register temp_reg, Register temp2_reg, Register temp3_reg);
static void push_arg_slots(MacroAssembler* _masm,
Register argslot_reg,
RegisterOrConstant slot_count,
Register temp_reg, Register temp2_reg);
static void move_arg_slots_up(MacroAssembler* _masm,
Register bottom_reg, // invariant
Address top_addr, // can use temp_reg
RegisterOrConstant positive_distance_in_slots,
Register temp_reg, Register temp2_reg);
static void move_arg_slots_down(MacroAssembler* _masm,
Address bottom_addr, // can use temp_reg
Register top_reg, // invariant
RegisterOrConstant negative_distance_in_slots,
Register temp_reg, Register temp2_reg);
static void move_typed_arg(MacroAssembler* _masm,
BasicType type, bool is_element,
Address value_src, Address slot_dest,
Register temp_reg);
static void move_return_value(MacroAssembler* _masm, BasicType type,
Address return_slot);
static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
Register temp_reg,
const char* error_message) NOT_DEBUG_RETURN;
static void verify_argslots(MacroAssembler* _masm,
RegisterOrConstant argslot_count,
Register argslot_reg,
Register temp_reg,
Register temp2_reg,
bool negate_argslot,
const char* error_message) NOT_DEBUG_RETURN;
static void verify_stack_move(MacroAssembler* _masm,
RegisterOrConstant arg_slots,
int direction) NOT_DEBUG_RETURN;
static void verify_klass(MacroAssembler* _masm,
Register obj_reg, KlassHandle klass,
Register temp_reg, Register temp2_reg,
const char* error_message = "wrong klass") NOT_DEBUG_RETURN;
static void verify_method_handle(MacroAssembler* _masm, Register mh_reg,
Register temp_reg, Register temp2_reg) {
verify_klass(_masm, mh_reg, SystemDictionaryHandles::MethodHandle_klass(),
temp_reg, temp2_reg,
"reference is a MH");
}
// Similar to InterpreterMacroAssembler::jump_from_interpreted.
// Takes care of special dispatch from single stepping too.
static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, Register temp2);
static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
// machine-dependent implemention for register maps // machine-dependent implemention for register maps
friend class frame; friend class frame;
friend class MethodHandles;
private: private:
intptr_t* _window; // register window save area (for L and I regs) intptr_t* _window; // register window save area (for L and I regs)

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -42,8 +42,6 @@
#define __ masm-> #define __ masm->
ExceptionBlob *OptoRuntime::_exception_blob;
//------------------------------ generate_exception_blob --------------------------- //------------------------------ generate_exception_blob ---------------------------
// creates exception blob at the end // creates exception blob at the end
// Using exception blob, this code is jumped from a compiled method. // Using exception blob, this code is jumped from a compiled method.

View file

@ -47,18 +47,6 @@
#define __ masm-> #define __ masm->
#ifdef COMPILER2
UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob;
#endif // COMPILER2
DeoptimizationBlob* SharedRuntime::_deopt_blob;
SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob;
SafepointBlob* SharedRuntime::_polling_page_return_handler_blob;
RuntimeStub* SharedRuntime::_wrong_method_blob;
RuntimeStub* SharedRuntime::_ic_miss_blob;
RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
RuntimeStub* SharedRuntime::_resolve_static_call_blob;
class RegisterSaver { class RegisterSaver {
@ -3492,7 +3480,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// the 64-bit %o's, then do a save, then fixup the caller's SP (our FP). // the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
// Tricky, tricky, tricky... // Tricky, tricky, tricky...
static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) { SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
// allocate space for the code // allocate space for the code
@ -3587,7 +3575,7 @@ static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return)
// but since this is generic code we don't know what they are and the caller // but since this is generic code we don't know what they are and the caller
// must do any gc of the args. // must do any gc of the args.
// //
static RuntimeStub* generate_resolve_blob(address destination, const char* name) { RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
// allocate space for the code // allocate space for the code
@ -3677,35 +3665,3 @@ static RuntimeStub* generate_resolve_blob(address destination, const char* name)
// frame_size_words or bytes?? // frame_size_words or bytes??
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
} }
void SharedRuntime::generate_stubs() {
_wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
"wrong_method_stub");
_ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
"ic_miss_stub");
_resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
"resolve_opt_virtual_call");
_resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
"resolve_virtual_call");
_resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
"resolve_static_call");
_polling_page_safepoint_handler_blob =
generate_handler_blob(CAST_FROM_FN_PTR(address,
SafepointSynchronize::handle_polling_page_exception), false);
_polling_page_return_handler_blob =
generate_handler_blob(CAST_FROM_FN_PTR(address,
SafepointSynchronize::handle_polling_page_exception), true);
generate_deopt_blob();
#ifdef COMPILER2
generate_uncommon_trap_blob();
#endif // COMPILER2
}

View file

@ -425,7 +425,7 @@ reg_class dflt_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R
// but they are used with the "Op_RegD" type, and always occur in even/odd pairs. // but they are used with the "Op_RegD" type, and always occur in even/odd pairs.
// This class is usable for mis-aligned loads as happen in I2C adapters. // This class is usable for mis-aligned loads as happen in I2C adapters.
reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31 ); R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
%} %}
//----------DEFINITION BLOCK--------------------------------------------------- //----------DEFINITION BLOCK---------------------------------------------------
@ -1326,7 +1326,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// -------------------------------------- // --------------------------------------
// Check for float->int copy; requires a trip through memory // Check for float->int copy; requires a trip through memory
if( src_first_rc == rc_float && dst_first_rc == rc_int ) { if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) {
int offset = frame::register_save_words*wordSize; int offset = frame::register_save_words*wordSize;
if (cbuf) { if (cbuf) {
emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 ); emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 );
@ -1346,6 +1346,21 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
size += 16; size += 16;
} }
// Check for float->int copy on T4
if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) {
// Further check for aligned-adjacent pair, so we can use a double move
if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mdtox_opf,"MOVDTOX",size, st);
size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mstouw_opf,"MOVSTOUW",size, st);
}
// Check for int->float copy on T4
if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) {
// Further check for aligned-adjacent pair, so we can use a double move
if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mxtod_opf,"MOVXTOD",size, st);
size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mwtos_opf,"MOVWTOS",size, st);
}
// -------------------------------------- // --------------------------------------
// In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations. // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations.
// In such cases, I have to do the big-endian swap. For aligned targets, the // In such cases, I have to do the big-endian swap. For aligned targets, the
@ -8164,215 +8179,58 @@ instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{
ins_pipe( cadd_cmpltmask ); ins_pipe( cadd_cmpltmask );
%} %}
//----------Arithmetic Conversion Instructions---------------------------------
// The conversions operations are all Alpha sorted. Please keep it that way!
instruct convD2F_reg(regF dst, regD src) %{ //-----------------------------------------------------------------
match(Set dst (ConvD2F src)); // Direct raw moves between float and general registers using VIS3.
size(4);
format %{ "FDTOS $src,$dst" %} // ins_pipe(faddF_reg);
opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf); instruct MoveF2I_reg_reg(iRegI dst, regF src) %{
ins_encode(form3_opf_rs2D_rdF(src, dst)); predicate(UseVIS >= 3);
ins_pipe(fcvtD2F); match(Set dst (MoveF2I src));
format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %}
ins_encode %{
__ movstouw($src$$FloatRegister, $dst$$Register);
%} %}
// Convert a double to an int in a float register.
// If the double is a NAN, stuff a zero in instead.
instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{
effect(DEF dst, USE src, KILL fcc0);
format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t"
"FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
"FDTOI $src,$dst\t! convert in delay slot\n\t"
"FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t"
"FSUBs $dst,$dst,$dst\t! cleared only if nan\n"
"skip:" %}
ins_encode(form_d2i_helper(src,dst));
ins_pipe(fcvtD2I);
%}
instruct convD2I_reg(stackSlotI dst, regD src) %{
match(Set dst (ConvD2I src));
ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
expand %{
regF tmp;
convD2I_helper(tmp, src);
regF_to_stkI(dst, tmp);
%}
%}
// Convert a double to a long in a double register.
// If the double is a NAN, stuff a zero in instead.
instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{
effect(DEF dst, USE src, KILL fcc0);
format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t"
"FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
"FDTOX $src,$dst\t! convert in delay slot\n\t"
"FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t"
"FSUBd $dst,$dst,$dst\t! cleared only if nan\n"
"skip:" %}
ins_encode(form_d2l_helper(src,dst));
ins_pipe(fcvtD2L);
%}
// Double to Long conversion
instruct convD2L_reg(stackSlotL dst, regD src) %{
match(Set dst (ConvD2L src));
ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
expand %{
regD tmp;
convD2L_helper(tmp, src);
regD_to_stkL(dst, tmp);
%}
%}
instruct convF2D_reg(regD dst, regF src) %{
match(Set dst (ConvF2D src));
format %{ "FSTOD $src,$dst" %}
opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf);
ins_encode(form3_opf_rs2F_rdD(src, dst));
ins_pipe(fcvtF2D);
%}
instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{
effect(DEF dst, USE src, KILL fcc0);
format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t"
"FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
"FSTOI $src,$dst\t! convert in delay slot\n\t"
"FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t"
"FSUBs $dst,$dst,$dst\t! cleared only if nan\n"
"skip:" %}
ins_encode(form_f2i_helper(src,dst));
ins_pipe(fcvtF2I);
%}
instruct convF2I_reg(stackSlotI dst, regF src) %{
match(Set dst (ConvF2I src));
ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
expand %{
regF tmp;
convF2I_helper(tmp, src);
regF_to_stkI(dst, tmp);
%}
%}
instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{
effect(DEF dst, USE src, KILL fcc0);
format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t"
"FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
"FSTOX $src,$dst\t! convert in delay slot\n\t"
"FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t"
"FSUBd $dst,$dst,$dst\t! cleared only if nan\n"
"skip:" %}
ins_encode(form_f2l_helper(src,dst));
ins_pipe(fcvtF2L);
%}
// Float to Long conversion
instruct convF2L_reg(stackSlotL dst, regF src) %{
match(Set dst (ConvF2L src));
ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
expand %{
regD tmp;
convF2L_helper(tmp, src);
regD_to_stkL(dst, tmp);
%}
%}
instruct convI2D_helper(regD dst, regF tmp) %{
effect(USE tmp, DEF dst);
format %{ "FITOD $tmp,$dst" %}
opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
ins_encode(form3_opf_rs2F_rdD(tmp, dst));
ins_pipe(fcvtI2D);
%}
instruct convI2D_reg(stackSlotI src, regD dst) %{
match(Set dst (ConvI2D src));
ins_cost(DEFAULT_COST + MEMORY_REF_COST);
expand %{
regF tmp;
stkI_to_regF( tmp, src);
convI2D_helper( dst, tmp);
%}
%}
instruct convI2D_mem( regD_low dst, memory mem ) %{
match(Set dst (ConvI2D (LoadI mem)));
ins_cost(DEFAULT_COST + MEMORY_REF_COST);
size(8);
format %{ "LDF $mem,$dst\n\t"
"FITOD $dst,$dst" %}
opcode(Assembler::ldf_op3, Assembler::fitod_opf);
ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
ins_pipe(floadF_mem);
%}
instruct convI2F_helper(regF dst, regF tmp) %{
effect(DEF dst, USE tmp);
format %{ "FITOS $tmp,$dst" %}
opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf);
ins_encode(form3_opf_rs2F_rdF(tmp, dst));
ins_pipe(fcvtI2F);
%}
instruct convI2F_reg( regF dst, stackSlotI src ) %{
match(Set dst (ConvI2F src));
ins_cost(DEFAULT_COST + MEMORY_REF_COST);
expand %{
regF tmp;
stkI_to_regF(tmp,src);
convI2F_helper(dst, tmp);
%}
%}
instruct convI2F_mem( regF dst, memory mem ) %{
match(Set dst (ConvI2F (LoadI mem)));
ins_cost(DEFAULT_COST + MEMORY_REF_COST);
size(8);
format %{ "LDF $mem,$dst\n\t"
"FITOS $dst,$dst" %}
opcode(Assembler::ldf_op3, Assembler::fitos_opf);
ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
ins_pipe(floadF_mem);
%}
instruct convI2L_reg(iRegL dst, iRegI src) %{
match(Set dst (ConvI2L src));
size(4);
format %{ "SRA $src,0,$dst\t! int->long" %}
opcode(Assembler::sra_op3, Assembler::arith_op);
ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
ins_pipe(ialu_reg_reg); ins_pipe(ialu_reg_reg);
%} %}
// Zero-extend convert int to long instruct MoveI2F_reg_reg(regF dst, iRegI src) %{
instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{ predicate(UseVIS >= 3);
match(Set dst (AndL (ConvI2L src) mask) ); match(Set dst (MoveI2F src));
size(4);
format %{ "SRL $src,0,$dst\t! zero-extend int to long" %} format %{ "MOVWTOS $src,$dst\t! MoveI2F" %}
opcode(Assembler::srl_op3, Assembler::arith_op); ins_encode %{
ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); __ movwtos($src$$Register, $dst$$FloatRegister);
%}
ins_pipe(ialu_reg_reg); ins_pipe(ialu_reg_reg);
%} %}
// Zero-extend long instruct MoveD2L_reg_reg(iRegL dst, regD src) %{
instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{ predicate(UseVIS >= 3);
match(Set dst (AndL src mask) ); match(Set dst (MoveD2L src));
size(4);
format %{ "SRL $src,0,$dst\t! zero-extend long" %} format %{ "MOVDTOX $src,$dst\t! MoveD2L" %}
opcode(Assembler::srl_op3, Assembler::arith_op); ins_encode %{
ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register);
%}
ins_pipe(ialu_reg_reg); ins_pipe(ialu_reg_reg);
%} %}
instruct MoveL2D_reg_reg(regD dst, iRegL src) %{
predicate(UseVIS >= 3);
match(Set dst (MoveL2D src));
format %{ "MOVXTOD $src,$dst\t! MoveL2D" %}
ins_encode %{
__ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg));
%}
ins_pipe(ialu_reg_reg);
%}
// Raw moves between float and general registers using stack.
instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{ instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{
match(Set dst (MoveF2I src)); match(Set dst (MoveF2I src));
effect(DEF dst, USE src); effect(DEF dst, USE src);
@ -8470,6 +8328,283 @@ instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
%} %}
//----------Arithmetic Conversion Instructions---------------------------------
// The conversions operations are all Alpha sorted. Please keep it that way!
instruct convD2F_reg(regF dst, regD src) %{
match(Set dst (ConvD2F src));
size(4);
format %{ "FDTOS $src,$dst" %}
opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf);
ins_encode(form3_opf_rs2D_rdF(src, dst));
ins_pipe(fcvtD2F);
%}
// Convert a double to an int in a float register.
// If the double is a NAN, stuff a zero in instead.
instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{
effect(DEF dst, USE src, KILL fcc0);
format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t"
"FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
"FDTOI $src,$dst\t! convert in delay slot\n\t"
"FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t"
"FSUBs $dst,$dst,$dst\t! cleared only if nan\n"
"skip:" %}
ins_encode(form_d2i_helper(src,dst));
ins_pipe(fcvtD2I);
%}
instruct convD2I_stk(stackSlotI dst, regD src) %{
match(Set dst (ConvD2I src));
ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
expand %{
regF tmp;
convD2I_helper(tmp, src);
regF_to_stkI(dst, tmp);
%}
%}
instruct convD2I_reg(iRegI dst, regD src) %{
predicate(UseVIS >= 3);
match(Set dst (ConvD2I src));
ins_cost(DEFAULT_COST*2 + BRANCH_COST);
expand %{
regF tmp;
convD2I_helper(tmp, src);
MoveF2I_reg_reg(dst, tmp);
%}
%}
// Convert a double to a long in a double register.
// If the double is a NAN, stuff a zero in instead.
instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{
effect(DEF dst, USE src, KILL fcc0);
format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t"
"FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
"FDTOX $src,$dst\t! convert in delay slot\n\t"
"FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t"
"FSUBd $dst,$dst,$dst\t! cleared only if nan\n"
"skip:" %}
ins_encode(form_d2l_helper(src,dst));
ins_pipe(fcvtD2L);
%}
instruct convD2L_stk(stackSlotL dst, regD src) %{
match(Set dst (ConvD2L src));
ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
expand %{
regD tmp;
convD2L_helper(tmp, src);
regD_to_stkL(dst, tmp);
%}
%}
instruct convD2L_reg(iRegL dst, regD src) %{
predicate(UseVIS >= 3);
match(Set dst (ConvD2L src));
ins_cost(DEFAULT_COST*2 + BRANCH_COST);
expand %{
regD tmp;
convD2L_helper(tmp, src);
MoveD2L_reg_reg(dst, tmp);
%}
%}
instruct convF2D_reg(regD dst, regF src) %{
match(Set dst (ConvF2D src));
format %{ "FSTOD $src,$dst" %}
opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf);
ins_encode(form3_opf_rs2F_rdD(src, dst));
ins_pipe(fcvtF2D);
%}
// Convert a float to an int in a float register.
// If the float is a NAN, stuff a zero in instead.
instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{
effect(DEF dst, USE src, KILL fcc0);
format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t"
"FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
"FSTOI $src,$dst\t! convert in delay slot\n\t"
"FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t"
"FSUBs $dst,$dst,$dst\t! cleared only if nan\n"
"skip:" %}
ins_encode(form_f2i_helper(src,dst));
ins_pipe(fcvtF2I);
%}
instruct convF2I_stk(stackSlotI dst, regF src) %{
match(Set dst (ConvF2I src));
ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
expand %{
regF tmp;
convF2I_helper(tmp, src);
regF_to_stkI(dst, tmp);
%}
%}
instruct convF2I_reg(iRegI dst, regF src) %{
predicate(UseVIS >= 3);
match(Set dst (ConvF2I src));
ins_cost(DEFAULT_COST*2 + BRANCH_COST);
expand %{
regF tmp;
convF2I_helper(tmp, src);
MoveF2I_reg_reg(dst, tmp);
%}
%}
// Convert a float to a long in a float register.
// If the float is a NAN, stuff a zero in instead.
instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{
effect(DEF dst, USE src, KILL fcc0);
format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t"
"FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
"FSTOX $src,$dst\t! convert in delay slot\n\t"
"FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t"
"FSUBd $dst,$dst,$dst\t! cleared only if nan\n"
"skip:" %}
ins_encode(form_f2l_helper(src,dst));
ins_pipe(fcvtF2L);
%}
instruct convF2L_stk(stackSlotL dst, regF src) %{
match(Set dst (ConvF2L src));
ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
expand %{
regD tmp;
convF2L_helper(tmp, src);
regD_to_stkL(dst, tmp);
%}
%}
instruct convF2L_reg(iRegL dst, regF src) %{
predicate(UseVIS >= 3);
match(Set dst (ConvF2L src));
ins_cost(DEFAULT_COST*2 + BRANCH_COST);
expand %{
regD tmp;
convF2L_helper(tmp, src);
MoveD2L_reg_reg(dst, tmp);
%}
%}
instruct convI2D_helper(regD dst, regF tmp) %{
effect(USE tmp, DEF dst);
format %{ "FITOD $tmp,$dst" %}
opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
ins_encode(form3_opf_rs2F_rdD(tmp, dst));
ins_pipe(fcvtI2D);
%}
instruct convI2D_stk(stackSlotI src, regD dst) %{
match(Set dst (ConvI2D src));
ins_cost(DEFAULT_COST + MEMORY_REF_COST);
expand %{
regF tmp;
stkI_to_regF(tmp, src);
convI2D_helper(dst, tmp);
%}
%}
instruct convI2D_reg(regD_low dst, iRegI src) %{
predicate(UseVIS >= 3);
match(Set dst (ConvI2D src));
expand %{
regF tmp;
MoveI2F_reg_reg(tmp, src);
convI2D_helper(dst, tmp);
%}
%}
instruct convI2D_mem(regD_low dst, memory mem) %{
match(Set dst (ConvI2D (LoadI mem)));
ins_cost(DEFAULT_COST + MEMORY_REF_COST);
size(8);
format %{ "LDF $mem,$dst\n\t"
"FITOD $dst,$dst" %}
opcode(Assembler::ldf_op3, Assembler::fitod_opf);
ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
ins_pipe(floadF_mem);
%}
instruct convI2F_helper(regF dst, regF tmp) %{
effect(DEF dst, USE tmp);
format %{ "FITOS $tmp,$dst" %}
opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf);
ins_encode(form3_opf_rs2F_rdF(tmp, dst));
ins_pipe(fcvtI2F);
%}
instruct convI2F_stk(regF dst, stackSlotI src) %{
match(Set dst (ConvI2F src));
ins_cost(DEFAULT_COST + MEMORY_REF_COST);
expand %{
regF tmp;
stkI_to_regF(tmp,src);
convI2F_helper(dst, tmp);
%}
%}
instruct convI2F_reg(regF dst, iRegI src) %{
predicate(UseVIS >= 3);
match(Set dst (ConvI2F src));
ins_cost(DEFAULT_COST);
expand %{
regF tmp;
MoveI2F_reg_reg(tmp, src);
convI2F_helper(dst, tmp);
%}
%}
instruct convI2F_mem( regF dst, memory mem ) %{
match(Set dst (ConvI2F (LoadI mem)));
ins_cost(DEFAULT_COST + MEMORY_REF_COST);
size(8);
format %{ "LDF $mem,$dst\n\t"
"FITOS $dst,$dst" %}
opcode(Assembler::ldf_op3, Assembler::fitos_opf);
ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
ins_pipe(floadF_mem);
%}
instruct convI2L_reg(iRegL dst, iRegI src) %{
match(Set dst (ConvI2L src));
size(4);
format %{ "SRA $src,0,$dst\t! int->long" %}
opcode(Assembler::sra_op3, Assembler::arith_op);
ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
ins_pipe(ialu_reg_reg);
%}
// Zero-extend convert int to long
instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{
match(Set dst (AndL (ConvI2L src) mask) );
size(4);
format %{ "SRL $src,0,$dst\t! zero-extend int to long" %}
opcode(Assembler::srl_op3, Assembler::arith_op);
ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
ins_pipe(ialu_reg_reg);
%}
// Zero-extend long
instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{
match(Set dst (AndL src mask) );
size(4);
format %{ "SRL $src,0,$dst\t! zero-extend long" %}
opcode(Assembler::srl_op3, Assembler::arith_op);
ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
ins_pipe(ialu_reg_reg);
%}
//----------- //-----------
// Long to Double conversion using V8 opcodes. // Long to Double conversion using V8 opcodes.
// Still useful because cheetah traps and becomes // Still useful because cheetah traps and becomes
@ -8589,7 +8724,7 @@ instruct convL2D_helper(regD dst, regD tmp) %{
ins_pipe(fcvtL2D); ins_pipe(fcvtL2D);
%} %}
instruct convL2D_reg_fast_fxtof(regD dst, stackSlotL src) %{ instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{
predicate(VM_Version::has_fast_fxtof()); predicate(VM_Version::has_fast_fxtof());
match(Set dst (ConvL2D src)); match(Set dst (ConvL2D src));
ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST); ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST);
@ -8600,10 +8735,15 @@ instruct convL2D_reg_fast_fxtof(regD dst, stackSlotL src) %{
%} %}
%} %}
//----------- instruct convL2D_reg(regD dst, iRegL src) %{
// Long to Float conversion using V8 opcodes. predicate(UseVIS >= 3);
// Still useful because cheetah traps and becomes match(Set dst (ConvL2D src));
// amazingly slow for some common numbers. expand %{
regD tmp;
MoveL2D_reg_reg(tmp, src);
convL2D_helper(dst, tmp);
%}
%}
// Long to Float conversion using fast fxtof // Long to Float conversion using fast fxtof
instruct convL2F_helper(regF dst, regD tmp) %{ instruct convL2F_helper(regF dst, regD tmp) %{
@ -8615,7 +8755,7 @@ instruct convL2F_helper(regF dst, regD tmp) %{
ins_pipe(fcvtL2F); ins_pipe(fcvtL2F);
%} %}
instruct convL2F_reg_fast_fxtof(regF dst, stackSlotL src) %{ instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{
match(Set dst (ConvL2F src)); match(Set dst (ConvL2F src));
ins_cost(DEFAULT_COST + MEMORY_REF_COST); ins_cost(DEFAULT_COST + MEMORY_REF_COST);
expand %{ expand %{
@ -8624,6 +8764,18 @@ instruct convL2F_reg_fast_fxtof(regF dst, stackSlotL src) %{
convL2F_helper(dst, tmp); convL2F_helper(dst, tmp);
%} %}
%} %}
instruct convL2F_reg(regF dst, iRegL src) %{
predicate(UseVIS >= 3);
match(Set dst (ConvL2F src));
ins_cost(DEFAULT_COST);
expand %{
regD tmp;
MoveL2D_reg_reg(tmp, src);
convL2F_helper(dst, tmp);
%}
%}
//----------- //-----------
instruct convL2I_reg(iRegI dst, iRegL src) %{ instruct convL2I_reg(iRegI dst, iRegL src) %{

View file

@ -440,7 +440,8 @@ class StubGenerator: public StubCodeGenerator {
#undef __ #undef __
#define __ masm-> #define __ masm->
address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc) { address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
Register arg1 = noreg, Register arg2 = noreg) {
#ifdef ASSERT #ifdef ASSERT
int insts_size = VerifyThread ? 1 * K : 600; int insts_size = VerifyThread ? 1 * K : 600;
#else #else
@ -476,6 +477,13 @@ class StubGenerator: public StubCodeGenerator {
__ set_last_Java_frame(last_java_sp, G0); __ set_last_Java_frame(last_java_sp, G0);
if (VerifyThread) __ mov(G2_thread, O0); // about to be smashed; pass early if (VerifyThread) __ mov(G2_thread, O0); // about to be smashed; pass early
__ save_thread(noreg); __ save_thread(noreg);
if (arg1 != noreg) {
assert(arg2 != O1, "clobbered");
__ mov(arg1, O1);
}
if (arg2 != noreg) {
__ mov(arg2, O2);
}
// do the call // do the call
BLOCK_COMMENT("call runtime_entry"); BLOCK_COMMENT("call runtime_entry");
__ call(runtime_entry, relocInfo::runtime_call_type); __ call(runtime_entry, relocInfo::runtime_call_type);
@ -3240,6 +3248,14 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry; StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry;
#endif // COMPILER2 !=> _LP64 #endif // COMPILER2 !=> _LP64
// Build this early so it's available for the interpreter. The
// stub expects the required and actual type to already be in O1
// and O2 respectively.
StubRoutines::_throw_WrongMethodTypeException_entry =
generate_throw_exception("WrongMethodTypeException throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
false, G5_method_type, G3_method_handle);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -44,11 +44,6 @@ enum /* platform_dependent_constants */ {
code_size2 = 20000 // simply increase if too small (assembler will crash if too small) code_size2 = 20000 // simply increase if too small (assembler will crash if too small)
}; };
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 15000
};
class Sparc { class Sparc {
friend class StubGenerator; friend class StubGenerator;

View file

@ -128,24 +128,6 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
} }
// Arguments are: required type in G5_method_type, and
// failing object (or NULL) in G3_method_handle.
address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
address entry = __ pc();
// expression stack must be empty before entering the VM if an exception
// happened
__ empty_expression_stack();
// load exception object
__ call_VM(Oexception,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_WrongMethodTypeException),
G5_method_type, // required
G3_method_handle); // actual
__ should_not_reach_here();
return entry;
}
address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
address entry = __ pc(); address entry = __ pc();
// expression stack must be empty before entering the VM if an exception happened // expression stack must be empty before entering the VM if an exception happened
@ -1712,7 +1694,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0; int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
*interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS; *interpreter_frame->register_addr(I5_savedSP) = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
} else { } else {
assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases"); assert(caller->is_compiled_frame() || caller->is_entry_frame() || caller->is_ricochet_frame(), "only possible cases");
// Don't have Lesp available; lay out locals block in the caller // Don't have Lesp available; lay out locals block in the caller
// adjacent to the register window save area. // adjacent to the register window save area.
// //

View file

@ -266,7 +266,7 @@ void TemplateTable::sipush() {
void TemplateTable::ldc(bool wide) { void TemplateTable::ldc(bool wide) {
transition(vtos, vtos); transition(vtos, vtos);
Label call_ldc, notInt, notString, notClass, exit; Label call_ldc, notInt, isString, notString, notClass, exit;
if (wide) { if (wide) {
__ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
@ -317,8 +317,11 @@ void TemplateTable::ldc(bool wide) {
__ bind(notInt); __ bind(notInt);
// __ cmp(O2, JVM_CONSTANT_String); // __ cmp(O2, JVM_CONSTANT_String);
__ brx(Assembler::equal, true, Assembler::pt, isString);
__ delayed()->cmp(O2, JVM_CONSTANT_Object);
__ brx(Assembler::notEqual, true, Assembler::pt, notString); __ brx(Assembler::notEqual, true, Assembler::pt, notString);
__ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
__ bind(isString);
__ ld_ptr(O0, O1, Otos_i); __ ld_ptr(O0, O1, Otos_i);
__ verify_oop(Otos_i); __ verify_oop(Otos_i);
__ push(atos); __ push(atos);

View file

@ -144,6 +144,18 @@ void VM_Version::initialize() {
// buf is started with ", " or is empty // buf is started with ", " or is empty
_features_str = strdup(strlen(buf) > 2 ? buf + 2 : buf); _features_str = strdup(strlen(buf) > 2 ? buf + 2 : buf);
// UseVIS is set to the smallest of what hardware supports and what
// the command line requires. I.e., you cannot set UseVIS to 3 on
// older UltraSparc which do not support it.
if (UseVIS > 3) UseVIS=3;
if (UseVIS < 0) UseVIS=0;
if (!has_vis3()) // Drop to 2 if no VIS3 support
UseVIS = MIN2((intx)2,UseVIS);
if (!has_vis2()) // Drop to 1 if no VIS2 support
UseVIS = MIN2((intx)1,UseVIS);
if (!has_vis1()) // Drop to 0 if no VIS1 support
UseVIS = 0;
#ifndef PRODUCT #ifndef PRODUCT
if (PrintMiscellaneous && Verbose) { if (PrintMiscellaneous && Verbose) {
tty->print("Allocation: "); tty->print("Allocation: ");

View file

@ -3804,6 +3804,14 @@ void Assembler::addq(Register dst, Register src) {
emit_arith(0x03, 0xC0, dst, src); emit_arith(0x03, 0xC0, dst, src);
} }
void Assembler::andq(Address dst, int32_t imm32) {
InstructionMark im(this);
prefixq(dst);
emit_byte(0x81);
emit_operand(rsp, dst, 4);
emit_long(imm32);
}
void Assembler::andq(Register dst, int32_t imm32) { void Assembler::andq(Register dst, int32_t imm32) {
(void) prefixq_and_encode(dst->encoding()); (void) prefixq_and_encode(dst->encoding());
emit_arith(0x81, 0xE0, dst, imm32); emit_arith(0x81, 0xE0, dst, imm32);
@ -5090,7 +5098,7 @@ void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rd
} else { } else {
ttyLocker ttyl; ttyLocker ttyl;
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
assert(false, "DEBUG MESSAGE"); assert(false, err_msg("DEBUG MESSAGE: %s", msg));
} }
ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
} }
@ -5653,6 +5661,7 @@ void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
ttyLocker ttyl; ttyLocker ttyl;
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
msg); msg);
assert(false, err_msg("DEBUG MESSAGE: %s", msg));
} }
} }
@ -5890,6 +5899,53 @@ void MacroAssembler::call_VM(Register oop_result,
call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
} }
void MacroAssembler::super_call_VM(Register oop_result,
Register last_java_sp,
address entry_point,
int number_of_arguments,
bool check_exceptions) {
Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
}
void MacroAssembler::super_call_VM(Register oop_result,
Register last_java_sp,
address entry_point,
Register arg_1,
bool check_exceptions) {
pass_arg1(this, arg_1);
super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
}
void MacroAssembler::super_call_VM(Register oop_result,
Register last_java_sp,
address entry_point,
Register arg_1,
Register arg_2,
bool check_exceptions) {
LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
pass_arg2(this, arg_2);
pass_arg1(this, arg_1);
super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
}
void MacroAssembler::super_call_VM(Register oop_result,
Register last_java_sp,
address entry_point,
Register arg_1,
Register arg_2,
Register arg_3,
bool check_exceptions) {
LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
pass_arg3(this, arg_3);
LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
pass_arg2(this, arg_2);
pass_arg1(this, arg_1);
super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
}
void MacroAssembler::call_VM_base(Register oop_result, void MacroAssembler::call_VM_base(Register oop_result,
Register java_thread, Register java_thread,
Register last_java_sp, Register last_java_sp,

View file

@ -779,6 +779,7 @@ private:
void andl(Register dst, Address src); void andl(Register dst, Address src);
void andl(Register dst, Register src); void andl(Register dst, Register src);
void andq(Address dst, int32_t imm32);
void andq(Register dst, int32_t imm32); void andq(Register dst, int32_t imm32);
void andq(Register dst, Address src); void andq(Register dst, Address src);
void andq(Register dst, Register src); void andq(Register dst, Register src);
@ -1660,6 +1661,14 @@ class MacroAssembler: public Assembler {
Register arg_1, Register arg_2, Register arg_3, Register arg_1, Register arg_2, Register arg_3,
bool check_exceptions = true); bool check_exceptions = true);
// These always tightly bind to MacroAssembler::call_VM_base
// bypassing the virtual implementation
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true);
void call_VM_leaf(address entry_point, void call_VM_leaf(address entry_point,
int number_of_arguments = 0); int number_of_arguments = 0);
void call_VM_leaf(address entry_point, void call_VM_leaf(address entry_point,

View file

@ -47,7 +47,7 @@
static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
// Use the expression (adr)&(~0xF) to provide 128-bits aligned address // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
// of 128-bits operands for SSE instructions. // of 128-bits operands for SSE instructions.
jlong *operand = (jlong*)(((long)adr)&((long)(~0xF))); jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
// Store the value to a 128-bits operand. // Store the value to a 128-bits operand.
operand[0] = lo; operand[0] = lo;
operand[1] = hi; operand[1] = hi;
@ -3113,7 +3113,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// reload the register args properly if we go slow path. Yuck // reload the register args properly if we go slow path. Yuck
// These are proper for the calling convention // These are proper for the calling convention
store_parameter(length, 2); store_parameter(length, 2);
store_parameter(dst_pos, 1); store_parameter(dst_pos, 1);
store_parameter(dst, 0); store_parameter(dst, 0);
@ -3351,12 +3350,15 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ jcc(Assembler::notEqual, *stub->entry()); __ jcc(Assembler::notEqual, *stub->entry());
} }
#ifndef _LP64 // Spill because stubs can use any register they like and it's
// save caller save registers // easier to restore just those that we care about.
store_parameter(rax, 2); store_parameter(dst, 0);
store_parameter(rcx, 1); store_parameter(dst_pos, 1);
store_parameter(rdx, 0); store_parameter(length, 2);
store_parameter(src_pos, 3);
store_parameter(src, 4);
#ifndef _LP64
__ movptr(tmp, dst_klass_addr); __ movptr(tmp, dst_klass_addr);
__ movptr(tmp, Address(tmp, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); __ movptr(tmp, Address(tmp, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
__ push(tmp); __ push(tmp);
@ -3372,17 +3374,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
#else #else
__ movl2ptr(length, length); //higher 32bits must be null __ movl2ptr(length, length); //higher 32bits must be null
// save caller save registers: copy them to callee save registers
__ mov(rbx, rdx);
__ mov(r13, r8);
__ mov(r14, r9);
#ifndef _WIN64
store_parameter(rsi, 1);
store_parameter(rcx, 0);
// on WIN64 other incoming parameters are in rdi and rsi saved
// across the call
#endif
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
assert_different_registers(c_rarg0, dst, dst_pos, length); assert_different_registers(c_rarg0, dst, dst_pos, length);
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
@ -3432,25 +3423,13 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ xorl(tmp, -1); __ xorl(tmp, -1);
#ifndef _LP64 // Restore previously spilled arguments
// restore caller save registers __ movptr (dst, Address(rsp, 0*BytesPerWord));
assert_different_registers(tmp, rdx, rcx, rax); // result of stub will be lost __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
__ movptr(rdx, Address(rsp, 0*BytesPerWord)); __ movptr (length, Address(rsp, 2*BytesPerWord));
__ movptr(rcx, Address(rsp, 1*BytesPerWord)); __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
__ movptr(rax, Address(rsp, 2*BytesPerWord)); __ movptr (src, Address(rsp, 4*BytesPerWord));
#else
// restore caller save registers
__ mov(rdx, rbx);
__ mov(r8, r13);
__ mov(r9, r14);
#ifndef _WIN64
assert_different_registers(tmp, rdx, r8, r9, rcx, rsi); // result of stub will be lost
__ movptr(rcx, Address(rsp, 0*BytesPerWord));
__ movptr(rsi, Address(rsp, 1*BytesPerWord));
#else
assert_different_registers(tmp, rdx, r8, r9); // result of stub will be lost
#endif
#endif
__ subl(length, tmp); __ subl(length, tmp);
__ addl(src_pos, tmp); __ addl(src_pos, tmp);

View file

@ -45,6 +45,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
_pc = pc; _pc = pc;
assert(pc != NULL, "no pc?"); assert(pc != NULL, "no pc?");
_cb = CodeCache::find_blob(pc); _cb = CodeCache::find_blob(pc);
adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {
@ -92,6 +93,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
// assert(_pc != NULL, "no pc?"); // assert(_pc != NULL, "no pc?");
_cb = CodeCache::find_blob(_pc); _cb = CodeCache::find_blob(_pc);
adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this); address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) { if (original_pc != NULL) {

View file

@ -43,8 +43,8 @@ class ICache : public AbstractICache {
#ifdef AMD64 #ifdef AMD64
enum { enum {
stub_size = 64, // Size of the icache flush stub in bytes stub_size = 64, // Size of the icache flush stub in bytes
line_size = 32, // Icache line size in bytes line_size = 64, // Icache line size in bytes
log2_line_size = 5 // log2(line_size) log2_line_size = 6 // log2(line_size)
}; };
// Use default implementation // Use default implementation

View file

@ -403,9 +403,9 @@ void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register
// interp_only_mode if these events CAN be enabled. // interp_only_mode if these events CAN be enabled.
get_thread(temp); get_thread(temp);
// interp_only is an int, on little endian it is sufficient to test the byte only // interp_only is an int, on little endian it is sufficient to test the byte only
// Is a cmpl faster (ce // Is a cmpl faster?
cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0); cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
jcc(Assembler::zero, run_compiled_code); jccb(Assembler::zero, run_compiled_code);
jmp(Address(method, methodOopDesc::interpreter_entry_offset())); jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
bind(run_compiled_code); bind(run_compiled_code);
} }

View file

@ -402,7 +402,7 @@ void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register
// interp_only is an int, on little endian it is sufficient to test the byte only // interp_only is an int, on little endian it is sufficient to test the byte only
// Is a cmpl faster? // Is a cmpl faster?
cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0); cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);
jcc(Assembler::zero, run_compiled_code); jccb(Assembler::zero, run_compiled_code);
jmp(Address(method, methodOopDesc::interpreter_entry_offset())); jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
bind(run_compiled_code); bind(run_compiled_code);
} }

View file

@ -24,6 +24,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "interpreter/interpreter.hpp" #include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
#include "prims/methodHandles.hpp" #include "prims/methodHandles.hpp"
@ -37,6 +38,11 @@
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
static RegisterOrConstant constant(int value) {
return RegisterOrConstant(value);
}
address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm, address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
address interpreted_entry) { address interpreted_entry) {
// Just before the actual machine code entry point, allocate space // Just before the actual machine code entry point, allocate space
@ -139,9 +145,9 @@ oop MethodHandles::RicochetFrame::compute_saved_args_layout(bool read_cache, boo
void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm, void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
// output params: // output params:
int* frame_size_in_words,
int* bounce_offset, int* bounce_offset,
int* exception_offset) { int* exception_offset,
int* frame_size_in_words) {
(*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize; (*frame_size_in_words) = RicochetFrame::frame_size_in_bytes() / wordSize;
address start = __ pc(); address start = __ pc();
@ -366,7 +372,7 @@ void MethodHandles::load_stack_move(MacroAssembler* _masm,
Register rdi_stack_move, Register rdi_stack_move,
Register rcx_amh, Register rcx_amh,
bool might_be_negative) { bool might_be_negative) {
BLOCK_COMMENT("load_stack_move"); BLOCK_COMMENT("load_stack_move {");
Address rcx_amh_conversion(rcx_amh, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes()); Address rcx_amh_conversion(rcx_amh, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
__ movl(rdi_stack_move, rcx_amh_conversion); __ movl(rdi_stack_move, rcx_amh_conversion);
__ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT); __ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
@ -387,9 +393,10 @@ void MethodHandles::load_stack_move(MacroAssembler* _masm,
__ stop("load_stack_move of garbage value"); __ stop("load_stack_move of garbage value");
__ BIND(L_ok); __ BIND(L_ok);
} }
BLOCK_COMMENT("} load_stack_move");
} }
#ifndef PRODUCT #ifdef ASSERT
void MethodHandles::RicochetFrame::verify_offsets() { void MethodHandles::RicochetFrame::verify_offsets() {
// Check compatibility of this struct with the more generally used offsets of class frame: // Check compatibility of this struct with the more generally used offsets of class frame:
int ebp_off = sender_link_offset_in_bytes(); // offset from struct base to local rbp value int ebp_off = sender_link_offset_in_bytes(); // offset from struct base to local rbp value
@ -539,6 +546,28 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
} }
#endif //ASSERT #endif //ASSERT
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp) {
if (JvmtiExport::can_post_interpreter_events()) {
Label run_compiled_code;
// JVMTI events, such as single-stepping, are implemented partly by avoiding running
// compiled code in threads for which the event is enabled. Check here for
// interp_only_mode if these events CAN be enabled.
#ifdef _LP64
Register rthread = r15_thread;
#else
Register rthread = temp;
__ get_thread(rthread);
#endif
// interp_only is an int, on little endian it is sufficient to test the byte only
// Is a cmpl faster?
__ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0);
__ jccb(Assembler::zero, run_compiled_code);
__ jmp(Address(method, methodOopDesc::interpreter_entry_offset()));
__ bind(run_compiled_code);
}
__ jmp(Address(method, methodOopDesc::from_interpreted_offset()));
}
// Code generation // Code generation
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
// rbx: methodOop // rbx: methodOop
@ -555,13 +584,11 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
// emit WrongMethodType path first, to enable jccb back-branch from main path // emit WrongMethodType path first, to enable jccb back-branch from main path
Label wrong_method_type; Label wrong_method_type;
__ bind(wrong_method_type); __ bind(wrong_method_type);
Label invoke_generic_slow_path; Label invoke_generic_slow_path, invoke_exact_error_path;
assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");; assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
__ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact); __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact);
__ jcc(Assembler::notEqual, invoke_generic_slow_path); __ jcc(Assembler::notEqual, invoke_generic_slow_path);
__ push(rax_mtype); // required mtype __ jmp(invoke_exact_error_path);
__ push(rcx_recv); // bad mh (1st stacked argument)
__ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
// here's where control starts out: // here's where control starts out:
__ align(CodeEntryAlignment); __ align(CodeEntryAlignment);
@ -595,6 +622,11 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
__ jump_to_method_handle_entry(rcx_recv, rdi_temp); __ jump_to_method_handle_entry(rcx_recv, rdi_temp);
// error path for invokeExact (only)
__ bind(invoke_exact_error_path);
// Stub wants expected type in rax and the actual type in rcx
__ jump(ExternalAddress(StubRoutines::throw_WrongMethodTypeException_entry()));
// for invokeGeneric (only), apply argument and result conversions on the fly // for invokeGeneric (only), apply argument and result conversions on the fly
__ bind(invoke_generic_slow_path); __ bind(invoke_generic_slow_path);
#ifdef ASSERT #ifdef ASSERT
@ -632,11 +664,6 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
return entry_point; return entry_point;
} }
// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
static RegisterOrConstant constant(int value) {
return RegisterOrConstant(value);
}
// Helper to insert argument slots into the stack. // Helper to insert argument slots into the stack.
// arg_slots must be a multiple of stack_move_unit() and < 0 // arg_slots must be a multiple of stack_move_unit() and < 0
// rax_argslot is decremented to point to the new (shifted) location of the argslot // rax_argslot is decremented to point to the new (shifted) location of the argslot
@ -1115,9 +1142,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets"); guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
// some handy addresses // some handy addresses
Address rbx_method_fie( rbx, methodOopDesc::from_interpreted_offset() );
Address rbx_method_fce( rbx, methodOopDesc::from_compiled_offset() );
Address rcx_mh_vmtarget( rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() ); Address rcx_mh_vmtarget( rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() );
Address rcx_dmh_vmindex( rcx_recv, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes() ); Address rcx_dmh_vmindex( rcx_recv, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes() );
@ -1147,7 +1171,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
trace_method_handle(_masm, entry_name(ek)); trace_method_handle(_masm, entry_name(ek));
BLOCK_COMMENT(entry_name(ek)); BLOCK_COMMENT(err_msg("Entry %s {", entry_name(ek)));
switch ((int) ek) { switch ((int) ek) {
case _raise_exception: case _raise_exception:
@ -1158,32 +1182,24 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
assert(raise_exception_method(), "must be set"); assert(raise_exception_method(), "must be set");
assert(raise_exception_method()->from_compiled_entry(), "method must be linked"); assert(raise_exception_method()->from_compiled_entry(), "method must be linked");
const Register rdi_pc = rax; const Register rax_pc = rax;
__ pop(rdi_pc); // caller PC __ pop(rax_pc); // caller PC
__ mov(rsp, saved_last_sp); // cut the stack back to where the caller started __ mov(rsp, saved_last_sp); // cut the stack back to where the caller started
Register rbx_method = rbx_temp; Register rbx_method = rbx_temp;
Label L_no_method;
// FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
__ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method)); __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
__ testptr(rbx_method, rbx_method);
__ jccb(Assembler::zero, L_no_method);
const int jobject_oop_offset = 0; const int jobject_oop_offset = 0;
__ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject
__ testptr(rbx_method, rbx_method);
__ jccb(Assembler::zero, L_no_method);
__ verify_oop(rbx_method);
NOT_LP64(__ push(rarg2_required)); __ movptr(rsi, rsp);
__ push(rdi_pc); // restore caller PC __ subptr(rsp, 3 * wordSize);
__ jmp(rbx_method_fce); // jump to compiled entry __ push(rax_pc); // restore caller PC
// Do something that is at least causes a valid throw from the interpreter. __ movptr(__ argument_address(constant(2)), rarg0_code);
__ bind(L_no_method); __ movptr(__ argument_address(constant(1)), rarg1_actual);
__ push(rarg2_required); __ movptr(__ argument_address(constant(0)), rarg2_required);
__ push(rarg1_actual); jump_from_method_handle(_masm, rbx_method, rax);
__ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
} }
break; break;
@ -1202,7 +1218,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ null_check(rcx_recv); __ null_check(rcx_recv);
__ verify_oop(rcx_recv); __ verify_oop(rcx_recv);
} }
__ jmp(rbx_method_fie); jump_from_method_handle(_masm, rbx_method, rax);
} }
break; break;
@ -1235,7 +1251,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(rbx_method, vtable_entry_addr); __ movptr(rbx_method, vtable_entry_addr);
__ verify_oop(rbx_method); __ verify_oop(rbx_method);
__ jmp(rbx_method_fie); jump_from_method_handle(_masm, rbx_method, rax);
} }
break; break;
@ -1270,7 +1286,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
no_such_interface); no_such_interface);
__ verify_oop(rbx_method); __ verify_oop(rbx_method);
__ jmp(rbx_method_fie); jump_from_method_handle(_masm, rbx_method, rax);
__ hlt(); __ hlt();
__ bind(no_such_interface); __ bind(no_such_interface);
@ -1292,7 +1308,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case _bound_int_direct_mh: case _bound_int_direct_mh:
case _bound_long_direct_mh: case _bound_long_direct_mh:
{ {
bool direct_to_method = (ek >= _bound_ref_direct_mh); const bool direct_to_method = (ek >= _bound_ref_direct_mh);
BasicType arg_type = ek_bound_mh_arg_type(ek); BasicType arg_type = ek_bound_mh_arg_type(ek);
int arg_slots = type2size[arg_type]; int arg_slots = type2size[arg_type];
@ -1318,7 +1334,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Register rbx_method = rbx_temp; Register rbx_method = rbx_temp;
__ load_heap_oop(rbx_method, rcx_mh_vmtarget); __ load_heap_oop(rbx_method, rcx_mh_vmtarget);
__ verify_oop(rbx_method); __ verify_oop(rbx_method);
__ jmp(rbx_method_fie); jump_from_method_handle(_masm, rbx_method, rax);
} else { } else {
__ load_heap_oop(rcx_recv, rcx_mh_vmtarget); __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
__ verify_oop(rcx_recv); __ verify_oop(rcx_recv);
@ -1632,14 +1648,16 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// rax = src_addr + swap_bytes // rax = src_addr + swap_bytes
// rbx = dest_addr // rbx = dest_addr
// while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++; // while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++;
__ addptr(rbx_destslot, wordSize); // dest_slot denotes an exclusive upper limit
int limit_bias = OP_ROT_ARGS_DOWN_LIMIT_BIAS;
if (limit_bias != 0)
__ addptr(rbx_destslot, - limit_bias * wordSize);
move_arg_slots_down(_masm, move_arg_slots_down(_masm,
Address(rax_argslot, swap_slots * wordSize), Address(rax_argslot, swap_slots * wordSize),
rbx_destslot, rbx_destslot,
-swap_slots, -swap_slots,
rax_argslot, rdx_temp); rax_argslot, rdx_temp);
__ subptr(rbx_destslot, swap_slots * wordSize);
__ subptr(rbx_destslot, wordSize);
} }
// pop the original first chunk into the destination slot, now free // pop the original first chunk into the destination slot, now free
for (int i = 0; i < swap_slots; i++) { for (int i = 0; i < swap_slots; i++) {
@ -1929,7 +1947,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// In the non-retaining case, this might move keep2 either up or down. // In the non-retaining case, this might move keep2 either up or down.
// We don't have to copy the whole | RF... collect | complex, // We don't have to copy the whole | RF... collect | complex,
// but we must adjust RF.saved_args_base. // but we must adjust RF.saved_args_base.
// Also, from now on, we will forget about the origial copy of |collect|. // Also, from now on, we will forget about the original copy of |collect|.
// If we are retaining it, we will treat it as part of |keep2|. // If we are retaining it, we will treat it as part of |keep2|.
// For clarity we will define |keep3| = |collect|keep2| or |keep2|. // For clarity we will define |keep3| = |collect|keep2| or |keep2|.
@ -1986,7 +2004,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// Net shift (&new_argv - &old_argv) is (close_count - open_count). // Net shift (&new_argv - &old_argv) is (close_count - open_count).
bool zero_open_count = (open_count == 0); // remember this bit of info bool zero_open_count = (open_count == 0); // remember this bit of info
if (move_keep3 && fix_arg_base) { if (move_keep3 && fix_arg_base) {
// It will be easier t have everything in one register: // It will be easier to have everything in one register:
if (close_count.is_register()) { if (close_count.is_register()) {
// Deduct open_count from close_count register to get a clean +/- value. // Deduct open_count from close_count register to get a clean +/- value.
__ subptr(close_count.as_register(), open_count); __ subptr(close_count.as_register(), open_count);
@ -2396,6 +2414,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ nop(); __ nop();
return; return;
} }
BLOCK_COMMENT(err_msg("} Entry %s", entry_name(ek)));
__ hlt(); __ hlt();
address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry); address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);

View file

@ -25,6 +25,11 @@
// Platform-specific definitions for method handles. // Platform-specific definitions for method handles.
// These definitions are inlined into class MethodHandles. // These definitions are inlined into class MethodHandles.
// Adapters
enum /* platform_dependent_constants */ {
adapter_code_size = NOT_LP64(30000 DEBUG_ONLY(+ 10000)) LP64_ONLY(80000 DEBUG_ONLY(+ 120000))
};
public: public:
// The stack just after the recursive call from a ricochet frame // The stack just after the recursive call from a ricochet frame
@ -188,7 +193,9 @@ class RicochetFrame {
static void generate_ricochet_blob(MacroAssembler* _masm, static void generate_ricochet_blob(MacroAssembler* _masm,
// output params: // output params:
int* frame_size_in_words, int* bounce_offset, int* exception_offset); int* bounce_offset,
int* exception_offset,
int* frame_size_in_words);
static void enter_ricochet_frame(MacroAssembler* _masm, static void enter_ricochet_frame(MacroAssembler* _masm,
Register rcx_recv, Register rcx_recv,
@ -284,6 +291,10 @@ public:
"reference is a MH"); "reference is a MH");
} }
// Similar to InterpreterMacroAssembler::jump_from_interpreted.
// Takes care of special dispatch from single stepping too.
static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp);
static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN; static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
static Register saved_last_sp_register() { static Register saved_last_sp_register() {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -42,8 +42,6 @@
#define __ masm-> #define __ masm->
ExceptionBlob* OptoRuntime::_exception_blob;
//------------------------------generate_exception_blob--------------------------- //------------------------------generate_exception_blob---------------------------
// creates exception blob at the end // creates exception blob at the end
// Using exception blob, this code is jumped from a compiled method. // Using exception blob, this code is jumped from a compiled method.

View file

@ -42,18 +42,6 @@
#endif #endif
#define __ masm-> #define __ masm->
#ifdef COMPILER2
UncommonTrapBlob *SharedRuntime::_uncommon_trap_blob;
#endif // COMPILER2
DeoptimizationBlob *SharedRuntime::_deopt_blob;
SafepointBlob *SharedRuntime::_polling_page_safepoint_handler_blob;
SafepointBlob *SharedRuntime::_polling_page_return_handler_blob;
RuntimeStub* SharedRuntime::_wrong_method_blob;
RuntimeStub* SharedRuntime::_ic_miss_blob;
RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
RuntimeStub* SharedRuntime::_resolve_static_call_blob;
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
@ -2253,31 +2241,6 @@ uint SharedRuntime::out_preserve_stack_slots() {
return 0; return 0;
} }
//----------------------------generate_ricochet_blob---------------------------
void SharedRuntime::generate_ricochet_blob() {
if (!EnableInvokeDynamic) return; // leave it as a null
// allocate space for the code
ResourceMark rm;
// setup code generation tools
CodeBuffer buffer("ricochet_blob", 256, 256);
MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_in_words = -1, bounce_offset = -1, exception_offset = -1;
MethodHandles::RicochetFrame::generate_ricochet_blob(masm, &frame_size_in_words, &bounce_offset, &exception_offset);
// -------------
// make sure all code is generated
masm->flush();
// failed to generate?
if (frame_size_in_words < 0 || bounce_offset < 0 || exception_offset < 0) {
assert(false, "bad ricochet blob");
return;
}
_ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words);
}
//------------------------------generate_deopt_blob---------------------------- //------------------------------generate_deopt_blob----------------------------
void SharedRuntime::generate_deopt_blob() { void SharedRuntime::generate_deopt_blob() {
@ -2816,7 +2779,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// setup oopmap, and calls safepoint code to stop the compiled code for // setup oopmap, and calls safepoint code to stop the compiled code for
// a safepoint. // a safepoint.
// //
static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) { SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
// Account for thread arg in our frame // Account for thread arg in our frame
const int additional_words = 1; const int additional_words = 1;
@ -2913,7 +2876,7 @@ static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return)
// but since this is generic code we don't know what they are and the caller // but since this is generic code we don't know what they are and the caller
// must do any gc of the args. // must do any gc of the args.
// //
static RuntimeStub* generate_resolve_blob(address destination, const char* name) { RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
// allocate space for the code // allocate space for the code
@ -2995,36 +2958,3 @@ static RuntimeStub* generate_resolve_blob(address destination, const char* name)
// frame_size_words or bytes?? // frame_size_words or bytes??
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
} }
void SharedRuntime::generate_stubs() {
_wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
"wrong_method_stub");
_ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
"ic_miss_stub");
_resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
"resolve_opt_virtual_call");
_resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
"resolve_virtual_call");
_resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
"resolve_static_call");
_polling_page_safepoint_handler_blob =
generate_handler_blob(CAST_FROM_FN_PTR(address,
SafepointSynchronize::handle_polling_page_exception), false);
_polling_page_return_handler_blob =
generate_handler_blob(CAST_FROM_FN_PTR(address,
SafepointSynchronize::handle_polling_page_exception), true);
generate_ricochet_blob();
generate_deopt_blob();
#ifdef COMPILER2
generate_uncommon_trap_blob();
#endif // COMPILER2
}

View file

@ -41,24 +41,10 @@
#include "opto/runtime.hpp" #include "opto/runtime.hpp"
#endif #endif
DeoptimizationBlob *SharedRuntime::_deopt_blob; #define __ masm->
#ifdef COMPILER2
UncommonTrapBlob *SharedRuntime::_uncommon_trap_blob;
ExceptionBlob *OptoRuntime::_exception_blob;
#endif // COMPILER2
SafepointBlob *SharedRuntime::_polling_page_safepoint_handler_blob;
SafepointBlob *SharedRuntime::_polling_page_return_handler_blob;
RuntimeStub* SharedRuntime::_wrong_method_blob;
RuntimeStub* SharedRuntime::_ic_miss_blob;
RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
RuntimeStub* SharedRuntime::_resolve_static_call_blob;
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
#define __ masm->
class SimpleRuntimeFrame { class SimpleRuntimeFrame {
public: public:
@ -2530,32 +2516,6 @@ uint SharedRuntime::out_preserve_stack_slots() {
} }
//----------------------------generate_ricochet_blob---------------------------
void SharedRuntime::generate_ricochet_blob() {
if (!EnableInvokeDynamic) return; // leave it as a null
// allocate space for the code
ResourceMark rm;
// setup code generation tools
CodeBuffer buffer("ricochet_blob", 512, 512);
MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_in_words = -1, bounce_offset = -1, exception_offset = -1;
MethodHandles::RicochetFrame::generate_ricochet_blob(masm, &frame_size_in_words, &bounce_offset, &exception_offset);
// -------------
// make sure all code is generated
masm->flush();
// failed to generate?
if (frame_size_in_words < 0 || bounce_offset < 0 || exception_offset < 0) {
assert(false, "bad ricochet blob");
return;
}
_ricochet_blob = RicochetBlob::create(&buffer, bounce_offset, exception_offset, frame_size_in_words);
}
//------------------------------generate_deopt_blob---------------------------- //------------------------------generate_deopt_blob----------------------------
void SharedRuntime::generate_deopt_blob() { void SharedRuntime::generate_deopt_blob() {
// Allocate space for the code // Allocate space for the code
@ -3046,7 +3006,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Generate a special Compile2Runtime blob that saves all registers, // Generate a special Compile2Runtime blob that saves all registers,
// and setup oopmap. // and setup oopmap.
// //
static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) { SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
assert(StubRoutines::forward_exception_entry() != NULL, assert(StubRoutines::forward_exception_entry() != NULL,
"must be generated before"); "must be generated before");
@ -3132,7 +3092,7 @@ static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return)
// but since this is generic code we don't know what they are and the caller // but since this is generic code we don't know what they are and the caller
// must do any gc of the args. // must do any gc of the args.
// //
static RuntimeStub* generate_resolve_blob(address destination, const char* name) { RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
// allocate space for the code // allocate space for the code
@ -3209,38 +3169,6 @@ static RuntimeStub* generate_resolve_blob(address destination, const char* name)
} }
void SharedRuntime::generate_stubs() {
_wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
"wrong_method_stub");
_ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
"ic_miss_stub");
_resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
"resolve_opt_virtual_call");
_resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
"resolve_virtual_call");
_resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
"resolve_static_call");
_polling_page_safepoint_handler_blob =
generate_handler_blob(CAST_FROM_FN_PTR(address,
SafepointSynchronize::handle_polling_page_exception), false);
_polling_page_return_handler_blob =
generate_handler_blob(CAST_FROM_FN_PTR(address,
SafepointSynchronize::handle_polling_page_exception), true);
generate_ricochet_blob();
generate_deopt_blob();
#ifdef COMPILER2
generate_uncommon_trap_blob();
#endif // COMPILER2
}
#ifdef COMPILER2 #ifdef COMPILER2
// This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
// //

View file

@ -2151,6 +2151,8 @@ class StubGenerator: public StubCodeGenerator {
// if they expect all registers to be preserved. // if they expect all registers to be preserved.
enum layout { enum layout {
thread_off, // last_java_sp thread_off, // last_java_sp
arg1_off,
arg2_off,
rbp_off, // callee saved register rbp_off, // callee saved register
ret_pc, ret_pc,
framesize framesize
@ -2185,7 +2187,7 @@ class StubGenerator: public StubCodeGenerator {
// either at call sites or otherwise assume that stack unwinding will be initiated, // either at call sites or otherwise assume that stack unwinding will be initiated,
// so caller saved registers were assumed volatile in the compiler. // so caller saved registers were assumed volatile in the compiler.
address generate_throw_exception(const char* name, address runtime_entry, address generate_throw_exception(const char* name, address runtime_entry,
bool restore_saved_exception_pc) { bool restore_saved_exception_pc, Register arg1 = noreg, Register arg2 = noreg) {
int insts_size = 256; int insts_size = 256;
int locs_size = 32; int locs_size = 32;
@ -2218,6 +2220,13 @@ class StubGenerator: public StubCodeGenerator {
// push java thread (becomes first argument of C function) // push java thread (becomes first argument of C function)
__ movptr(Address(rsp, thread_off * wordSize), java_thread); __ movptr(Address(rsp, thread_off * wordSize), java_thread);
if (arg1 != noreg) {
__ movptr(Address(rsp, arg1_off * wordSize), arg1);
}
if (arg2 != noreg) {
assert(arg1 != noreg, "missing reg arg");
__ movptr(Address(rsp, arg2_off * wordSize), arg2);
}
// Set up last_Java_sp and last_Java_fp // Set up last_Java_sp and last_Java_fp
__ set_last_Java_frame(java_thread, rsp, rbp, NULL); __ set_last_Java_frame(java_thread, rsp, rbp, NULL);
@ -2309,6 +2318,12 @@ class StubGenerator: public StubCodeGenerator {
CAST_FROM_FN_PTR(address, SharedRuntime::d2i)); CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG, StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG,
CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
// Build this early so it's available for the interpreter
StubRoutines::_throw_WrongMethodTypeException_entry =
generate_throw_exception("WrongMethodTypeException throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
false, rax, rcx);
} }

View file

@ -2934,7 +2934,9 @@ class StubGenerator: public StubCodeGenerator {
// caller saved registers were assumed volatile in the compiler. // caller saved registers were assumed volatile in the compiler.
address generate_throw_exception(const char* name, address generate_throw_exception(const char* name,
address runtime_entry, address runtime_entry,
bool restore_saved_exception_pc) { bool restore_saved_exception_pc,
Register arg1 = noreg,
Register arg2 = noreg) {
// Information about frame layout at time of blocking runtime call. // Information about frame layout at time of blocking runtime call.
// Note that we only have to preserve callee-saved registers since // Note that we only have to preserve callee-saved registers since
// the compilers are responsible for supplying a continuation point // the compilers are responsible for supplying a continuation point
@ -2980,6 +2982,13 @@ class StubGenerator: public StubCodeGenerator {
__ set_last_Java_frame(rsp, rbp, NULL); __ set_last_Java_frame(rsp, rbp, NULL);
// Call runtime // Call runtime
if (arg1 != noreg) {
assert(arg2 != c_rarg1, "clobbered");
__ movptr(c_rarg1, arg1);
}
if (arg2 != noreg) {
__ movptr(c_rarg2, arg2);
}
__ movptr(c_rarg0, r15_thread); __ movptr(c_rarg0, r15_thread);
BLOCK_COMMENT("call runtime_entry"); BLOCK_COMMENT("call runtime_entry");
__ call(RuntimeAddress(runtime_entry)); __ call(RuntimeAddress(runtime_entry));
@ -3052,6 +3061,14 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
// Build this early so it's available for the interpreter. Stub
// expects the required and actual types as register arguments in
// j_rarg0 and j_rarg1 respectively.
StubRoutines::_throw_WrongMethodTypeException_entry =
generate_throw_exception("WrongMethodTypeException throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
false, rax, rcx);
} }
void generate_all() { void generate_all() {

View file

@ -34,11 +34,6 @@ enum platform_dependent_constants {
code_size2 = 22000 // simply increase if too small (assembler will crash if too small) code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
}; };
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 30000 DEBUG_ONLY(+ 10000)
};
class x86 { class x86 {
friend class StubGenerator; friend class StubGenerator;
friend class VMStructs; friend class VMStructs;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -36,11 +36,6 @@ enum platform_dependent_constants {
code_size2 = 22000 // simply increase if too small (assembler will crash if too small) code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
}; };
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 80000 DEBUG_ONLY(+ 120000)
};
class x86 { class x86 {
friend class StubGenerator; friend class StubGenerator;

View file

@ -112,32 +112,6 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
return entry; return entry;
} }
// Arguments are: required type at TOS+4, failing object (or NULL) at TOS.
address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
address entry = __ pc();
__ pop(rbx); // actual failing object is at TOS
__ pop(rax); // required type is at TOS+4
__ verify_oop(rbx);
__ verify_oop(rax);
// Various method handle types use interpreter registers as temps.
__ restore_bcp();
__ restore_locals();
// Expression stack must be empty before entering the VM for an exception.
__ empty_expression_stack();
__ empty_FPU_stack();
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_WrongMethodTypeException),
// pass required type, failing object (or NULL)
rax, rbx);
return entry;
}
address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
assert(!pass_oop || message == NULL, "either oop or message but not both"); assert(!pass_oop || message == NULL, "either oop or message but not both");
address entry = __ pc(); address entry = __ pc();

View file

@ -120,31 +120,6 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
return entry; return entry;
} }
// Arguments are: required type at TOS+8, failing object (or NULL) at TOS+4.
address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
address entry = __ pc();
__ pop(c_rarg2); // failing object is at TOS
__ pop(c_rarg1); // required type is at TOS+8
__ verify_oop(c_rarg1);
__ verify_oop(c_rarg2);
// Various method handle types use interpreter registers as temps.
__ restore_bcp();
__ restore_locals();
// Expression stack must be empty before entering the VM for an exception.
__ empty_expression_stack();
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_WrongMethodTypeException),
// pass required type, failing object (or NULL)
c_rarg1, c_rarg2);
return entry;
}
address TemplateInterpreterGenerator::generate_exception_handler_common( address TemplateInterpreterGenerator::generate_exception_handler_common(
const char* name, const char* message, bool pass_oop) { const char* name, const char* message, bool pass_oop) {
assert(!pass_oop || message == NULL, "either oop or message but not both"); assert(!pass_oop || message == NULL, "either oop or message but not both");

View file

@ -373,15 +373,17 @@ void TemplateTable::ldc(bool wide) {
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
__ cmpl(rdx, JVM_CONSTANT_String); __ cmpl(rdx, JVM_CONSTANT_String);
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
__ cmpl(rdx, JVM_CONSTANT_Object);
__ jcc(Assembler::equal, L);
__ stop("unexpected tag type in ldc"); __ stop("unexpected tag type in ldc");
__ bind(L); __ bind(L);
} }
#endif #endif
Label isOop; Label isOop;
// atos and itos // atos and itos
// String is only oop type we will see here // Integer is only non-oop type we will see here
__ cmpl(rdx, JVM_CONSTANT_String); __ cmpl(rdx, JVM_CONSTANT_Integer);
__ jccb(Assembler::equal, isOop); __ jccb(Assembler::notEqual, isOop);
__ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset)); __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
__ push(itos); __ push(itos);
__ jmp(Done); __ jmp(Done);

View file

@ -385,6 +385,8 @@ void TemplateTable::ldc(bool wide) {
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
__ cmpl(rdx, JVM_CONSTANT_String); __ cmpl(rdx, JVM_CONSTANT_String);
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
__ cmpl(rdx, JVM_CONSTANT_Object);
__ jcc(Assembler::equal, L);
__ stop("unexpected tag type in ldc"); __ stop("unexpected tag type in ldc");
__ bind(L); __ bind(L);
} }

View file

@ -321,6 +321,20 @@ void VM_Version::get_processor_features() {
if (UseSSE < 2) UseSSE = 2; if (UseSSE < 2) UseSSE = 2;
#endif #endif
#ifdef AMD64
// flush_icache_stub have to be generated first.
// That is why Icache line size is hard coded in ICache class,
// see icache_x86.hpp. It is also the reason why we can't use
// clflush instruction in 32-bit VM since it could be running
// on CPU which does not support it.
//
// The only thing we can do is to verify that flushed
// ICache::line_size has correct value.
guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
// clflush_size is size in quadwords (8 bytes).
guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported");
#endif
// If the OS doesn't support SSE, we can't use this feature even if the HW does // If the OS doesn't support SSE, we can't use this feature even if the HW does
if (!os::supports_sse()) if (!os::supports_sse())
_cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); _cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2);

View file

@ -91,7 +91,9 @@ public:
cmpxchg8 : 1, cmpxchg8 : 1,
: 6, : 6,
cmov : 1, cmov : 1,
: 7, : 3,
clflush : 1,
: 3,
mmx : 1, mmx : 1,
fxsr : 1, fxsr : 1,
sse : 1, sse : 1,

View file

@ -830,6 +830,17 @@ void encode_CopyXD( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
} }
} }
// This could be in MacroAssembler but it's fairly C2 specific
void emit_cmpfp_fixup(MacroAssembler& _masm) {
Label exit;
__ jccb(Assembler::noParity, exit);
__ pushf();
__ andq(Address(rsp, 0), 0xffffff2b);
__ popf();
__ bind(exit);
__ nop(); // (target for branch to avoid branch to branch)
}
//============================================================================= //=============================================================================
const bool Matcher::constant_table_absolute_addressing = true; const bool Matcher::constant_table_absolute_addressing = true;
@ -2173,27 +2184,9 @@ encode %{
emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7); emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
%} %}
enc_class cmpfp_fixup() enc_class cmpfp_fixup() %{
%{ MacroAssembler _masm(&cbuf);
// jnp,s exit emit_cmpfp_fixup(_masm);
emit_opcode(cbuf, 0x7B);
emit_d8(cbuf, 0x0A);
// pushfq
emit_opcode(cbuf, 0x9C);
// andq $0xffffff2b, (%rsp)
emit_opcode(cbuf, Assembler::REX_W);
emit_opcode(cbuf, 0x81);
emit_opcode(cbuf, 0x24);
emit_opcode(cbuf, 0x24);
emit_d32(cbuf, 0xffffff2b);
// popfq
emit_opcode(cbuf, 0x9D);
// nop (target for branch to avoid branch to branch)
emit_opcode(cbuf, 0x90);
%} %}
enc_class cmpfp3(rRegI dst) enc_class cmpfp3(rRegI dst)
@ -3179,50 +3172,6 @@ encode %{
emit_rm(cbuf, 0x3, 0x0, dstenc); emit_rm(cbuf, 0x3, 0x0, dstenc);
%} %}
enc_class enc_cmpLTP(no_rcx_RegI p, no_rcx_RegI q, no_rcx_RegI y,
rcx_RegI tmp)
%{
// cadd_cmpLT
int tmpReg = $tmp$$reg;
int penc = $p$$reg;
int qenc = $q$$reg;
int yenc = $y$$reg;
// subl $p,$q
if (penc < 8) {
if (qenc >= 8) {
emit_opcode(cbuf, Assembler::REX_B);
}
} else {
if (qenc < 8) {
emit_opcode(cbuf, Assembler::REX_R);
} else {
emit_opcode(cbuf, Assembler::REX_RB);
}
}
emit_opcode(cbuf, 0x2B);
emit_rm(cbuf, 0x3, penc & 7, qenc & 7);
// sbbl $tmp, $tmp
emit_opcode(cbuf, 0x1B);
emit_rm(cbuf, 0x3, tmpReg, tmpReg);
// andl $tmp, $y
if (yenc >= 8) {
emit_opcode(cbuf, Assembler::REX_B);
}
emit_opcode(cbuf, 0x23);
emit_rm(cbuf, 0x3, tmpReg, yenc & 7);
// addl $p,$tmp
if (penc >= 8) {
emit_opcode(cbuf, Assembler::REX_R);
}
emit_opcode(cbuf, 0x03);
emit_rm(cbuf, 0x3, penc & 7, tmpReg);
%}
// Compare the lonogs and set -1, 0, or 1 into dst // Compare the lonogs and set -1, 0, or 1 into dst
enc_class cmpl3_flag(rRegL src1, rRegL src2, rRegI dst) enc_class cmpl3_flag(rRegL src1, rRegL src2, rRegI dst)
@ -10206,9 +10155,7 @@ instruct cmpLTMask0(rRegI dst, immI0 zero, rFlagsReg cr)
%} %}
instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, rRegI tmp, rFlagsReg cr)
rRegI tmp,
rFlagsReg cr)
%{ %{
match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
effect(TEMP tmp, KILL cr); effect(TEMP tmp, KILL cr);
@ -10218,25 +10165,19 @@ instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y,
"sbbl $tmp, $tmp\n\t" "sbbl $tmp, $tmp\n\t"
"andl $tmp, $y\n\t" "andl $tmp, $y\n\t"
"addl $p, $tmp" %} "addl $p, $tmp" %}
ins_encode(enc_cmpLTP(p, q, y, tmp)); ins_encode %{
Register Rp = $p$$Register;
Register Rq = $q$$Register;
Register Ry = $y$$Register;
Register Rt = $tmp$$Register;
__ subl(Rp, Rq);
__ sbbl(Rt, Rt);
__ andl(Rt, Ry);
__ addl(Rp, Rt);
%}
ins_pipe(pipe_cmplt); ins_pipe(pipe_cmplt);
%} %}
/* If I enable this, I encourage spilling in the inner loop of compress.
instruct cadd_cmpLTMask_mem( rRegI p, rRegI q, memory y, rRegI tmp, rFlagsReg cr )
%{
match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q)));
effect( TEMP tmp, KILL cr );
ins_cost(400);
format %{ "SUB $p,$q\n\t"
"SBB RCX,RCX\n\t"
"AND RCX,$y\n\t"
"ADD $p,RCX" %}
ins_encode( enc_cmpLTP_mem(p,q,y,tmp) );
%}
*/
//---------- FP Instructions------------------------------------------------ //---------- FP Instructions------------------------------------------------
instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2) instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2)
@ -10305,14 +10246,8 @@ instruct cmpF_cc_imm(rFlagsRegU cr, regF src, immF con) %{
"popfq\n" "popfq\n"
"exit: nop\t# avoid branch to branch" %} "exit: nop\t# avoid branch to branch" %}
ins_encode %{ ins_encode %{
Label L_exit;
__ ucomiss($src$$XMMRegister, $constantaddress($con)); __ ucomiss($src$$XMMRegister, $constantaddress($con));
__ jcc(Assembler::noParity, L_exit); emit_cmpfp_fixup(_masm);
__ pushf();
__ andq(rsp, 0xffffff2b);
__ popf();
__ bind(L_exit);
__ nop();
%} %}
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
@ -10393,14 +10328,8 @@ instruct cmpD_cc_imm(rFlagsRegU cr, regD src, immD con) %{
"popfq\n" "popfq\n"
"exit: nop\t# avoid branch to branch" %} "exit: nop\t# avoid branch to branch" %}
ins_encode %{ ins_encode %{
Label L_exit;
__ ucomisd($src$$XMMRegister, $constantaddress($con)); __ ucomisd($src$$XMMRegister, $constantaddress($con));
__ jcc(Assembler::noParity, L_exit); emit_cmpfp_fixup(_masm);
__ pushf();
__ andq(rsp, 0xffffff2b);
__ popf();
__ bind(L_exit);
__ nop();
%} %}
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}

View file

@ -657,7 +657,7 @@ int CppInterpreter::method_handle_entry(methodOop method,
if (!is_exact) { if (!is_exact) {
if (method->intrinsic_id() == vmIntrinsics::_invokeExact) { if (method->intrinsic_id() == vmIntrinsics::_invokeExact) {
CALL_VM_NOCHECK_NOFIX( CALL_VM_NOCHECK_NOFIX(
InterpreterRuntime::throw_WrongMethodTypeException( SharedRuntime::throw_WrongMethodTypeException(
thread, method_type, mhtype)); thread, method_type, mhtype));
// NB all oops trashed! // NB all oops trashed!
assert(HAS_PENDING_EXCEPTION, "should do"); assert(HAS_PENDING_EXCEPTION, "should do");
@ -673,7 +673,7 @@ int CppInterpreter::method_handle_entry(methodOop method,
oop adapter = java_lang_invoke_MethodTypeForm::genericInvoker(form); oop adapter = java_lang_invoke_MethodTypeForm::genericInvoker(form);
if (adapter == NULL) { if (adapter == NULL) {
CALL_VM_NOCHECK_NOFIX( CALL_VM_NOCHECK_NOFIX(
InterpreterRuntime::throw_WrongMethodTypeException( SharedRuntime::throw_WrongMethodTypeException(
thread, method_type, mhtype)); thread, method_type, mhtype));
// NB all oops trashed! // NB all oops trashed!
assert(HAS_PENDING_EXCEPTION, "should do"); assert(HAS_PENDING_EXCEPTION, "should do");

View file

@ -169,7 +169,35 @@ sigset_t SR_sigset;
/* Used to protect dlsym() calls */ /* Used to protect dlsym() calls */
static pthread_mutex_t dl_mutex; static pthread_mutex_t dl_mutex;
//////////////////////////////////////////////////////////////////////////////// #ifdef JAVASE_EMBEDDED
class MemNotifyThread: public Thread {
friend class VMStructs;
public:
virtual void run();
private:
static MemNotifyThread* _memnotify_thread;
int _fd;
public:
// Constructor
MemNotifyThread(int fd);
// Tester
bool is_memnotify_thread() const { return true; }
// Printing
char* name() const { return (char*)"Linux MemNotify Thread"; }
// Returns the single instance of the MemNotifyThread
static MemNotifyThread* memnotify_thread() { return _memnotify_thread; }
// Create and start the single instance of MemNotifyThread
static void start();
};
#endif // JAVASE_EMBEDDED
// utility functions // utility functions
static int SR_initialize(); static int SR_initialize();
@ -2085,6 +2113,14 @@ void os::print_os_info(outputStream* st) {
st->cr(); st->cr();
} }
void os::pd_print_cpu_info(outputStream* st) {
st->print("\n/proc/cpuinfo:\n");
if (!_print_ascii_file("/proc/cpuinfo", st)) {
st->print(" <Not Available>");
}
st->cr();
}
void os::print_memory_info(outputStream* st) { void os::print_memory_info(outputStream* st) {
st->print("Memory:"); st->print("Memory:");
@ -4237,7 +4273,16 @@ jint os::init_2(void)
} }
// this is called at the end of vm_initialization // this is called at the end of vm_initialization
void os::init_3(void) { } void os::init_3(void)
{
#ifdef JAVASE_EMBEDDED
// Start the MemNotifyThread
if (LowMemoryProtection) {
MemNotifyThread::start();
}
return;
#endif
}
// Mark the polling page as unreadable // Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) { void os::make_polling_page_unreadable(void) {
@ -5360,3 +5405,78 @@ bool os::is_headless_jre() {
return true; return true;
} }
#ifdef JAVASE_EMBEDDED
//
// A thread to watch the '/dev/mem_notify' device, which will tell us when the OS is running low on memory.
//
MemNotifyThread* MemNotifyThread::_memnotify_thread = NULL;
// ctor
//
MemNotifyThread::MemNotifyThread(int fd): Thread() {
assert(memnotify_thread() == NULL, "we can only allocate one MemNotifyThread");
_fd = fd;
if (os::create_thread(this, os::os_thread)) {
_memnotify_thread = this;
os::set_priority(this, NearMaxPriority);
os::start_thread(this);
}
}
// Where all the work gets done
//
void MemNotifyThread::run() {
assert(this == memnotify_thread(), "expected the singleton MemNotifyThread");
// Set up the select arguments
fd_set rfds;
if (_fd != -1) {
FD_ZERO(&rfds);
FD_SET(_fd, &rfds);
}
// Now wait for the mem_notify device to wake up
while (1) {
// Wait for the mem_notify device to signal us..
int rc = select(_fd+1, _fd != -1 ? &rfds : NULL, NULL, NULL, NULL);
if (rc == -1) {
perror("select!\n");
break;
} else if (rc) {
//ssize_t free_before = os::available_memory();
//tty->print ("Notified: Free: %dK \n",os::available_memory()/1024);
// The kernel is telling us there is not much memory left...
// try to do something about that
// If we are not already in a GC, try one.
if (!Universe::heap()->is_gc_active()) {
Universe::heap()->collect(GCCause::_allocation_failure);
//ssize_t free_after = os::available_memory();
//tty->print ("Post-Notify: Free: %dK\n",free_after/1024);
//tty->print ("GC freed: %dK\n", (free_after - free_before)/1024);
}
// We might want to do something like the following if we find the GC's are not helping...
// Universe::heap()->size_policy()->set_gc_time_limit_exceeded(true);
}
}
}
//
// See if the /dev/mem_notify device exists, and if so, start a thread to monitor it.
//
void MemNotifyThread::start() {
int fd;
fd = open ("/dev/mem_notify", O_RDONLY, 0);
if (fd < 0) {
return;
}
if (memnotify_thread() == NULL) {
new MemNotifyThread(fd);
}
}
#endif // JAVASE_EMBEDDED

View file

@ -2317,6 +2317,10 @@ static bool check_addr0(outputStream* st) {
return status; return status;
} }
void os::pd_print_cpu_info(outputStream* st) {
// Nothing to do for now.
}
void os::print_memory_info(outputStream* st) { void os::print_memory_info(outputStream* st) {
st->print("Memory:"); st->print("Memory:");
st->print(" %dk page", os::vm_page_size()>>10); st->print(" %dk page", os::vm_page_size()>>10);

View file

@ -1720,6 +1720,10 @@ void os::print_os_info(outputStream* st) {
st->cr(); st->cr();
} }
void os::pd_print_cpu_info(outputStream* st) {
// Nothing to do for now.
}
void os::print_memory_info(outputStream* st) { void os::print_memory_info(outputStream* st) {
st->print("Memory:"); st->print("Memory:");
st->print(" %dk page", os::vm_page_size()>>10); st->print(" %dk page", os::vm_page_size()>>10);

View file

@ -33,6 +33,28 @@ void MacroAssembler::int3() {
call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
} }
#ifdef MINIMIZE_RAM_USAGE
void MacroAssembler::get_thread(Register thread) {
// call pthread_getspecific
// void * pthread_getspecific(pthread_key_t key);
if (thread != rax) push(rax);
push(rcx);
push(rdx);
push(ThreadLocalStorage::thread_index());
call(RuntimeAddress(CAST_FROM_FN_PTR(address, pthread_getspecific)));
increment(rsp, wordSize);
pop(rdx);
pop(rcx);
if (thread != rax) {
mov(thread, rax);
pop(rax);
}
}
#else
void MacroAssembler::get_thread(Register thread) { void MacroAssembler::get_thread(Register thread) {
movl(thread, rsp); movl(thread, rsp);
shrl(thread, PAGE_SHIFT); shrl(thread, PAGE_SHIFT);
@ -43,6 +65,7 @@ void MacroAssembler::get_thread(Register thread) {
movptr(thread, tls); movptr(thread, tls);
} }
#endif // MINIMIZE_RAM_USAGE
#else #else
void MacroAssembler::int3() { void MacroAssembler::int3() {
call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));

View file

@ -52,25 +52,20 @@
// MADV_DONTNEED on Linux keeps the virtual memory mapping, but zaps the // MADV_DONTNEED on Linux keeps the virtual memory mapping, but zaps the
// physical memory page (i.e. similar to MADV_FREE on Solaris). // physical memory page (i.e. similar to MADV_FREE on Solaris).
#ifndef AMD64 #if !defined(AMD64) && !defined(MINIMIZE_RAM_USAGE)
Thread* ThreadLocalStorage::_sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)]; Thread* ThreadLocalStorage::_sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
#endif // !AMD64
void ThreadLocalStorage::generate_code_for_get_thread() { void ThreadLocalStorage::generate_code_for_get_thread() {
// nothing we can do here for user-level thread // nothing we can do here for user-level thread
} }
void ThreadLocalStorage::pd_init() { void ThreadLocalStorage::pd_init() {
#ifndef AMD64
assert(align_size_down(os::vm_page_size(), PAGE_SIZE) == os::vm_page_size(), assert(align_size_down(os::vm_page_size(), PAGE_SIZE) == os::vm_page_size(),
"page size must be multiple of PAGE_SIZE"); "page size must be multiple of PAGE_SIZE");
#endif // !AMD64
} }
void ThreadLocalStorage::pd_set_thread(Thread* thread) { void ThreadLocalStorage::pd_set_thread(Thread* thread) {
os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread); os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
#ifndef AMD64
address stack_top = os::current_stack_base(); address stack_top = os::current_stack_base();
size_t stack_size = os::current_stack_size(); size_t stack_size = os::current_stack_size();
@ -88,5 +83,17 @@ void ThreadLocalStorage::pd_set_thread(Thread* thread) {
"thread exited without detaching from VM??"); "thread exited without detaching from VM??");
_sp_map[(uintptr_t)p >> PAGE_SHIFT] = thread; _sp_map[(uintptr_t)p >> PAGE_SHIFT] = thread;
} }
#endif // !AMD64
} }
#else
void ThreadLocalStorage::generate_code_for_get_thread() {
// nothing we can do here for user-level thread
}
void ThreadLocalStorage::pd_init() {
}
void ThreadLocalStorage::pd_set_thread(Thread* thread) {
os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
}
#endif // !AMD64 && !MINIMIZE_RAM_USAGE

View file

@ -27,28 +27,32 @@
// Processor dependent parts of ThreadLocalStorage // Processor dependent parts of ThreadLocalStorage
#ifndef AMD64 #if !defined(AMD64) && !defined(MINIMIZE_RAM_USAGE)
// map stack pointer to thread pointer - see notes in threadLS_linux_x86.cpp // map stack pointer to thread pointer - see notes in threadLS_linux_x86.cpp
#define SP_BITLENGTH 32 #define SP_BITLENGTH 32
#define PAGE_SHIFT 12 #define PAGE_SHIFT 12
#define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_SIZE (1UL << PAGE_SHIFT)
static Thread* _sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)]; static Thread* _sp_map[1UL << (SP_BITLENGTH - PAGE_SHIFT)];
#endif // !AMD64
public: public:
#ifndef AMD64
static Thread** sp_map_addr() { return _sp_map; } static Thread** sp_map_addr() { return _sp_map; }
#endif // !AMD64
static Thread* thread() { static Thread* thread() {
#ifdef AMD64
return (Thread*) os::thread_local_storage_at(thread_index());
#else
uintptr_t sp; uintptr_t sp;
__asm__ volatile ("movl %%esp, %0" : "=r" (sp)); __asm__ volatile ("movl %%esp, %0" : "=r" (sp));
return _sp_map[sp >> PAGE_SHIFT]; return _sp_map[sp >> PAGE_SHIFT];
#endif // AMD64
} }
#else
public:
static Thread* thread() {
return (Thread*) os::thread_local_storage_at(thread_index());
}
#endif // AMD64 || MINIMIZE_RAM_USAGE
#endif // OS_CPU_LINUX_X86_VM_THREADLS_LINUX_X86_HPP #endif // OS_CPU_LINUX_X86_VM_THREADLS_LINUX_X86_HPP

View file

@ -75,8 +75,16 @@ will build the Win32 cross compiled version of hsdis based on 2.19.1.
* Installing * Installing
Products are named like build/$OS-$LIBARCH/hsdis-$LIBARCH.so. You can Products are named like build/$OS-$LIBARCH/hsdis-$LIBARCH.so. You can
install them on your LD_LIBRARY_PATH, or inside of your JRE next to install them on your LD_LIBRARY_PATH, or inside of your JRE/JDK. The
$LIBARCH/libjvm.so. search path in the JVM is:
1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so
2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
3. <home>/jre/lib/<arch>/hsdis-<arch>.so
4. hsdis-<arch>.so (using LD_LIBRARY_PATH)
Note that there's a bug in hotspot versions prior to hs22 that causes
steps 2 and 3 to fail when used with JDK7.
Now test: Now test:

View file

@ -2812,6 +2812,13 @@ void ADLParser::ins_encode_parse_block(InstructForm& inst) {
params->add_entry(param); params->add_entry(param);
} }
// Check for duplicate ins_encode sections after parsing the block
// so that parsing can continue and find any other errors.
if (inst._insencode != NULL) {
parse_err(SYNERR, "Multiple ins_encode sections defined\n");
return;
}
// Set encode class of this instruction. // Set encode class of this instruction.
inst._insencode = encrule; inst._insencode = encrule;
} }
@ -3044,6 +3051,13 @@ void ADLParser::ins_encode_parse(InstructForm& inst) {
next_char(); // move past ';' next_char(); // move past ';'
skipws(); // be friendly to oper_parse() skipws(); // be friendly to oper_parse()
// Check for duplicate ins_encode sections after parsing the block
// so that parsing can continue and find any other errors.
if (inst._insencode != NULL) {
parse_err(SYNERR, "Multiple ins_encode sections defined\n");
return;
}
// Debug Stuff // Debug Stuff
if (_AD._adl_debug > 1) fprintf(stderr,"Instruction Encode: %s\n", ec_name); if (_AD._adl_debug > 1) fprintf(stderr,"Instruction Encode: %s\n", ec_name);

View file

@ -33,6 +33,7 @@
#include "compiler/compileBroker.hpp" #include "compiler/compileBroker.hpp"
#include "interpreter/bytecode.hpp" #include "interpreter/bytecode.hpp"
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
#include "runtime/compilationPolicy.hpp"
#include "utilities/bitMap.inline.hpp" #include "utilities/bitMap.inline.hpp"
class BlockListBuilder VALUE_OBJ_CLASS_SPEC { class BlockListBuilder VALUE_OBJ_CLASS_SPEC {
@ -3395,8 +3396,8 @@ void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) { bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
assert(!callee->is_native(), "callee must not be native"); assert(!callee->is_native(), "callee must not be native");
if (count_backedges() && callee->has_loops()) { if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) {
INLINE_BAILOUT("too complex for tiered"); INLINE_BAILOUT("inlining prohibited by policy");
} }
// first perform tests of things it's not possible to inline // first perform tests of things it's not possible to inline
if (callee->has_exception_handlers() && if (callee->has_exception_handlers() &&

View file

@ -2799,7 +2799,7 @@ void LIRGenerator::do_Invoke(Invoke* x) {
// Load CallSite object from constant pool cache. // Load CallSite object from constant pool cache.
__ oop2reg(cpcache->constant_encoding(), tmp); __ oop2reg(cpcache->constant_encoding(), tmp);
__ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp); __ move_wide(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
// Load target MethodHandle from CallSite object. // Load target MethodHandle from CallSite object.
__ load(new LIR_Address(tmp, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver); __ load(new LIR_Address(tmp, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);

View file

@ -642,7 +642,7 @@ void NullCheckVisitor::do_NewInstance (NewInstance* x) { nce()->handle_Ne
void NullCheckVisitor::do_NewTypeArray (NewTypeArray* x) { nce()->handle_NewArray(x); } void NullCheckVisitor::do_NewTypeArray (NewTypeArray* x) { nce()->handle_NewArray(x); }
void NullCheckVisitor::do_NewObjectArray (NewObjectArray* x) { nce()->handle_NewArray(x); } void NullCheckVisitor::do_NewObjectArray (NewObjectArray* x) { nce()->handle_NewArray(x); }
void NullCheckVisitor::do_NewMultiArray (NewMultiArray* x) { nce()->handle_NewArray(x); } void NullCheckVisitor::do_NewMultiArray (NewMultiArray* x) { nce()->handle_NewArray(x); }
void NullCheckVisitor::do_CheckCast (CheckCast* x) {} void NullCheckVisitor::do_CheckCast (CheckCast* x) { nce()->clear_last_explicit_null_check(); }
void NullCheckVisitor::do_InstanceOf (InstanceOf* x) {} void NullCheckVisitor::do_InstanceOf (InstanceOf* x) {}
void NullCheckVisitor::do_MonitorEnter (MonitorEnter* x) { nce()->handle_AccessMonitor(x); } void NullCheckVisitor::do_MonitorEnter (MonitorEnter* x) { nce()->handle_AccessMonitor(x); }
void NullCheckVisitor::do_MonitorExit (MonitorExit* x) { nce()->handle_AccessMonitor(x); } void NullCheckVisitor::do_MonitorExit (MonitorExit* x) { nce()->handle_AccessMonitor(x); }

View file

@ -383,8 +383,10 @@ JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread))
} }
JRT_END JRT_END
// This is a helper to allow us to safepoint but allow the outer entry // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
// to be safepoint free if we need to do an osr // associated with the top activation record. The inlinee (that is possibly included in the enclosing
// method) method oop is passed as an argument. In order to do that it is embedded in the code as
// a constant.
static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, methodOopDesc* m) { static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, methodOopDesc* m) {
nmethod* osr_nm = NULL; nmethod* osr_nm = NULL;
methodHandle method(THREAD, m); methodHandle method(THREAD, m);
@ -420,7 +422,7 @@ static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, meth
bci = branch_bci + offset; bci = branch_bci + offset;
} }
osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, THREAD); osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
return osr_nm; return osr_nm;
} }

View file

@ -36,6 +36,7 @@ class ciCallProfile : StackObj {
private: private:
// Fields are initialized directly by ciMethod::call_profile_at_bci. // Fields are initialized directly by ciMethod::call_profile_at_bci.
friend class ciMethod; friend class ciMethod;
friend class ciMethodHandle;
enum { MorphismLimit = 2 }; // Max call site's morphism we care about enum { MorphismLimit = 2 }; // Max call site's morphism we care about
int _limit; // number of receivers have been determined int _limit; // number of receivers have been determined
@ -58,10 +59,10 @@ private:
public: public:
// Note: The following predicates return false for invalid profiles: // Note: The following predicates return false for invalid profiles:
bool has_receiver(int i) { return _limit > i; } bool has_receiver(int i) const { return _limit > i; }
int morphism() { return _morphism; } int morphism() const { return _morphism; }
int count() { return _count; } int count() const { return _count; }
int receiver_count(int i) { int receiver_count(int i) {
assert(i < _limit, "out of Call Profile MorphismLimit"); assert(i < _limit, "out of Call Profile MorphismLimit");
return _receiver_count[i]; return _receiver_count[i];

View file

@ -50,6 +50,7 @@
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp" #include "oops/oop.inline2.hpp"
#include "prims/jvmtiExport.hpp" #include "prims/jvmtiExport.hpp"
#include "prims/methodHandleWalk.hpp"
#include "runtime/init.hpp" #include "runtime/init.hpp"
#include "runtime/reflection.hpp" #include "runtime/reflection.hpp"
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
@ -371,6 +372,7 @@ bool ciEnv::check_klass_accessibility(ciKlass* accessing_klass,
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciEnv::get_klass_by_name_impl // ciEnv::get_klass_by_name_impl
ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass, ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
constantPoolHandle cpool,
ciSymbol* name, ciSymbol* name,
bool require_local) { bool require_local) {
ASSERT_IN_VM; ASSERT_IN_VM;
@ -386,7 +388,7 @@ ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
sym->utf8_length()-2, sym->utf8_length()-2,
KILL_COMPILE_ON_FATAL_(_unloaded_ciinstance_klass)); KILL_COMPILE_ON_FATAL_(_unloaded_ciinstance_klass));
ciSymbol* strippedname = get_symbol(strippedsym); ciSymbol* strippedname = get_symbol(strippedsym);
return get_klass_by_name_impl(accessing_klass, strippedname, require_local); return get_klass_by_name_impl(accessing_klass, cpool, strippedname, require_local);
} }
// Check for prior unloaded klass. The SystemDictionary's answers // Check for prior unloaded klass. The SystemDictionary's answers
@ -443,6 +445,7 @@ ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
// Get element ciKlass recursively. // Get element ciKlass recursively.
ciKlass* elem_klass = ciKlass* elem_klass =
get_klass_by_name_impl(accessing_klass, get_klass_by_name_impl(accessing_klass,
cpool,
get_symbol(elem_sym), get_symbol(elem_sym),
require_local); require_local);
if (elem_klass != NULL && elem_klass->is_loaded()) { if (elem_klass != NULL && elem_klass->is_loaded()) {
@ -451,6 +454,19 @@ ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
} }
} }
if (found_klass() == NULL && !cpool.is_null() && cpool->has_preresolution()) {
// Look inside the constant pool for pre-resolved class entries.
for (int i = cpool->length() - 1; i >= 1; i--) {
if (cpool->tag_at(i).is_klass()) {
klassOop kls = cpool->resolved_klass_at(i);
if (Klass::cast(kls)->name() == sym) {
found_klass = KlassHandle(THREAD, kls);
break;
}
}
}
}
if (found_klass() != NULL) { if (found_klass() != NULL) {
// Found it. Build a CI handle. // Found it. Build a CI handle.
return get_object(found_klass())->as_klass(); return get_object(found_klass())->as_klass();
@ -468,6 +484,7 @@ ciKlass* ciEnv::get_klass_by_name(ciKlass* accessing_klass,
ciSymbol* klass_name, ciSymbol* klass_name,
bool require_local) { bool require_local) {
GUARDED_VM_ENTRY(return get_klass_by_name_impl(accessing_klass, GUARDED_VM_ENTRY(return get_klass_by_name_impl(accessing_klass,
constantPoolHandle(),
klass_name, klass_name,
require_local);) require_local);)
} }
@ -508,13 +525,14 @@ ciKlass* ciEnv::get_klass_by_index_impl(constantPoolHandle cpool,
if (klass.is_null()) { if (klass.is_null()) {
// Not found in constant pool. Use the name to do the lookup. // Not found in constant pool. Use the name to do the lookup.
ciKlass* k = get_klass_by_name_impl(accessor, ciKlass* k = get_klass_by_name_impl(accessor,
cpool,
get_symbol(klass_name), get_symbol(klass_name),
false); false);
// Calculate accessibility the hard way. // Calculate accessibility the hard way.
if (!k->is_loaded()) { if (!k->is_loaded()) {
is_accessible = false; is_accessible = false;
} else if (k->loader() != accessor->loader() && } else if (k->loader() != accessor->loader() &&
get_klass_by_name_impl(accessor, k->name(), true) == NULL) { get_klass_by_name_impl(accessor, cpool, k->name(), true) == NULL) {
// Loaded only remotely. Not linked yet. // Loaded only remotely. Not linked yet.
is_accessible = false; is_accessible = false;
} else { } else {
@ -565,7 +583,7 @@ ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool,
index = cpc_entry->constant_pool_index(); index = cpc_entry->constant_pool_index();
oop obj = cpc_entry->f1(); oop obj = cpc_entry->f1();
if (obj != NULL) { if (obj != NULL) {
assert(obj->is_instance(), "must be an instance"); assert(obj->is_instance() || obj->is_array(), "must be a Java reference");
ciObject* ciobj = get_object(obj); ciObject* ciobj = get_object(obj);
return ciConstant(T_OBJECT, ciobj); return ciConstant(T_OBJECT, ciobj);
} }
@ -607,7 +625,7 @@ ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool,
return ciConstant(T_OBJECT, klass->java_mirror()); return ciConstant(T_OBJECT, klass->java_mirror());
} else if (tag.is_object()) { } else if (tag.is_object()) {
oop obj = cpool->object_at(index); oop obj = cpool->object_at(index);
assert(obj->is_instance(), "must be an instance"); assert(obj->is_instance() || obj->is_array(), "must be a Java reference");
ciObject* ciobj = get_object(obj); ciObject* ciobj = get_object(obj);
return ciConstant(T_OBJECT, ciobj); return ciConstant(T_OBJECT, ciobj);
} else if (tag.is_method_type()) { } else if (tag.is_method_type()) {
@ -729,9 +747,35 @@ ciMethod* ciEnv::get_method_by_index_impl(constantPoolHandle cpool,
Symbol* name_sym = cpool->name_ref_at(index); Symbol* name_sym = cpool->name_ref_at(index);
Symbol* sig_sym = cpool->signature_ref_at(index); Symbol* sig_sym = cpool->signature_ref_at(index);
if (cpool->has_preresolution()
|| (holder == ciEnv::MethodHandle_klass() &&
methodOopDesc::is_method_handle_invoke_name(name_sym))) {
// Short-circuit lookups for JSR 292-related call sites.
// That is, do not rely only on name-based lookups, because they may fail
// if the names are not resolvable in the boot class loader (7056328).
switch (bc) {
case Bytecodes::_invokevirtual:
case Bytecodes::_invokeinterface:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
{
methodOop m = constantPoolOopDesc::method_at_if_loaded(cpool, index, bc);
if (m != NULL) {
return get_object(m)->as_method();
}
}
}
}
if (holder_is_accessible) { // Our declared holder is loaded. if (holder_is_accessible) { // Our declared holder is loaded.
instanceKlass* lookup = declared_holder->get_instanceKlass(); instanceKlass* lookup = declared_holder->get_instanceKlass();
methodOop m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc); methodOop m = lookup_method(accessor->get_instanceKlass(), lookup, name_sym, sig_sym, bc);
if (m != NULL &&
(bc == Bytecodes::_invokestatic
? instanceKlass::cast(m->method_holder())->is_not_initialized()
: !instanceKlass::cast(m->method_holder())->is_loaded())) {
m = NULL;
}
if (m != NULL) { if (m != NULL) {
// We found the method. // We found the method.
return get_object(m)->as_method(); return get_object(m)->as_method();
@ -1046,7 +1090,7 @@ void ciEnv::register_method(ciMethod* target,
// ciEnv::find_system_klass // ciEnv::find_system_klass
ciKlass* ciEnv::find_system_klass(ciSymbol* klass_name) { ciKlass* ciEnv::find_system_klass(ciSymbol* klass_name) {
VM_ENTRY_MARK; VM_ENTRY_MARK;
return get_klass_by_name_impl(NULL, klass_name, false); return get_klass_by_name_impl(NULL, constantPoolHandle(), klass_name, false);
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------

View file

@ -137,6 +137,7 @@ private:
// Implementation methods for loading and constant pool access. // Implementation methods for loading and constant pool access.
ciKlass* get_klass_by_name_impl(ciKlass* accessing_klass, ciKlass* get_klass_by_name_impl(ciKlass* accessing_klass,
constantPoolHandle cpool,
ciSymbol* klass_name, ciSymbol* klass_name,
bool require_local); bool require_local);
ciKlass* get_klass_by_index_impl(constantPoolHandle cpool, ciKlass* get_klass_by_index_impl(constantPoolHandle cpool,

View file

@ -287,7 +287,7 @@ ciType* ciField::compute_type() {
} }
ciType* ciField::compute_type_impl() { ciType* ciField::compute_type_impl() {
ciKlass* type = CURRENT_ENV->get_klass_by_name_impl(_holder, _signature, false); ciKlass* type = CURRENT_ENV->get_klass_by_name_impl(_holder, constantPoolHandle(), _signature, false);
if (!type->is_primitive_type() && is_shared()) { if (!type->is_primitive_type() && is_shared()) {
// We must not cache a pointer to an unshared type, in a shared field. // We must not cache a pointer to an unshared type, in a shared field.
bool type_is_also_shared = false; bool type_is_also_shared = false;

View file

@ -125,7 +125,8 @@ ciMethod::ciMethod(methodHandle h_m) : ciObject(h_m) {
_name = env->get_symbol(h_m()->name()); _name = env->get_symbol(h_m()->name());
_holder = env->get_object(h_m()->method_holder())->as_instance_klass(); _holder = env->get_object(h_m()->method_holder())->as_instance_klass();
ciSymbol* sig_symbol = env->get_symbol(h_m()->signature()); ciSymbol* sig_symbol = env->get_symbol(h_m()->signature());
_signature = new (env->arena()) ciSignature(_holder, sig_symbol); constantPoolHandle cpool = h_m()->constants();
_signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
_method_data = NULL; _method_data = NULL;
// Take a snapshot of these values, so they will be commensurate with the MDO. // Take a snapshot of these values, so they will be commensurate with the MDO.
if (ProfileInterpreter || TieredCompilation) { if (ProfileInterpreter || TieredCompilation) {
@ -152,7 +153,7 @@ ciMethod::ciMethod(ciInstanceKlass* holder,
// These fields are always filled in. // These fields are always filled in.
_name = name; _name = name;
_holder = holder; _holder = holder;
_signature = new (CURRENT_ENV->arena()) ciSignature(_holder, signature); _signature = new (CURRENT_ENV->arena()) ciSignature(_holder, constantPoolHandle(), signature);
_intrinsic_id = vmIntrinsics::_none; _intrinsic_id = vmIntrinsics::_none;
_liveness = NULL; _liveness = NULL;
_can_be_statically_bound = false; _can_be_statically_bound = false;
@ -1009,6 +1010,12 @@ int ciMethod::comp_level() {
return 0; return 0;
} }
int ciMethod::highest_osr_comp_level() {
check_is_loaded();
VM_ENTRY_MARK;
return get_methodOop()->highest_osr_comp_level();
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciMethod::instructions_size // ciMethod::instructions_size
// //

View file

@ -158,6 +158,7 @@ class ciMethod : public ciObject {
int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; } int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; }
int comp_level(); int comp_level();
int highest_osr_comp_level();
Bytecodes::Code java_code_at_bci(int bci) { Bytecodes::Code java_code_at_bci(int bci) {
address bcp = code() + bci; address bcp = code() + bci;

View file

@ -41,9 +41,19 @@ ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) const {
VM_ENTRY_MARK; VM_ENTRY_MARK;
Handle h(get_oop()); Handle h(get_oop());
methodHandle callee(_callee->get_methodOop()); methodHandle callee(_callee->get_methodOop());
assert(callee->is_method_handle_invoke(), "");
oop mt1 = callee->method_handle_type();
oop mt2 = java_lang_invoke_MethodHandle::type(h());
if (!java_lang_invoke_MethodType::equals(mt1, mt2)) {
if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print_cr("ciMethodHandle::get_adapter: types not equal");
mt1->print(); mt2->print();
}
return NULL;
}
// We catch all exceptions here that could happen in the method // We catch all exceptions here that could happen in the method
// handle compiler and stop the VM. // handle compiler and stop the VM.
MethodHandleCompiler mhc(h, callee, _profile->count(), is_invokedynamic, THREAD); MethodHandleCompiler mhc(h, callee->name(), callee->signature(), _profile.count(), is_invokedynamic, THREAD);
if (!HAS_PENDING_EXCEPTION) { if (!HAS_PENDING_EXCEPTION) {
methodHandle m = mhc.compile(THREAD); methodHandle m = mhc.compile(THREAD);
if (!HAS_PENDING_EXCEPTION) { if (!HAS_PENDING_EXCEPTION) {
@ -53,7 +63,7 @@ ciMethod* ciMethodHandle::get_adapter_impl(bool is_invokedynamic) const {
if (PrintMiscellaneous && (Verbose || WizardMode)) { if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print("*** ciMethodHandle::get_adapter => "); tty->print("*** ciMethodHandle::get_adapter => ");
PENDING_EXCEPTION->print(); PENDING_EXCEPTION->print();
tty->print("*** get_adapter (%s): ", is_invokedynamic ? "indy" : "mh"); ((ciObject*)this)->print(); //@@ tty->print("*** get_adapter (%s): ", is_invokedynamic ? "indy" : "mh"); ((ciObject*)this)->print();
} }
CLEAR_PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION;
return NULL; return NULL;

View file

@ -36,7 +36,7 @@ class ciMethodHandle : public ciInstance {
private: private:
ciMethod* _callee; ciMethod* _callee;
ciMethod* _caller; ciMethod* _caller;
ciCallProfile* _profile; ciCallProfile _profile;
// Return an adapter for this MethodHandle. // Return an adapter for this MethodHandle.
ciMethod* get_adapter_impl(bool is_invokedynamic) const; ciMethod* get_adapter_impl(bool is_invokedynamic) const;
@ -49,8 +49,7 @@ public:
ciMethodHandle(instanceHandle h_i) : ciMethodHandle(instanceHandle h_i) :
ciInstance(h_i), ciInstance(h_i),
_callee(NULL), _callee(NULL),
_caller(NULL), _caller(NULL)
_profile(NULL)
{} {}
// What kind of ciObject is this? // What kind of ciObject is this?
@ -58,7 +57,7 @@ public:
void set_callee(ciMethod* m) { _callee = m; } void set_callee(ciMethod* m) { _callee = m; }
void set_caller(ciMethod* m) { _caller = m; } void set_caller(ciMethod* m) { _caller = m; }
void set_call_profile(ciCallProfile* profile) { _profile = profile; } void set_call_profile(ciCallProfile profile) { _profile = profile; }
// Return an adapter for a MethodHandle call. // Return an adapter for a MethodHandle call.
ciMethod* get_method_handle_adapter() const { return get_adapter(false); } ciMethod* get_method_handle_adapter() const { return get_adapter(false); }

View file

@ -93,6 +93,7 @@ ciKlass* ciObjArrayKlass::element_klass() {
// element klass by name. // element klass by name.
_element_klass = CURRENT_THREAD_ENV->get_klass_by_name_impl( _element_klass = CURRENT_THREAD_ENV->get_klass_by_name_impl(
this, this,
constantPoolHandle(),
construct_array_name(base_element_klass()->name(), construct_array_name(base_element_klass()->name(),
dimension() - 1), dimension() - 1),
false); false);

View file

@ -187,7 +187,7 @@ jobject ciObject::constant_encoding() {
// ciObject::can_be_constant // ciObject::can_be_constant
bool ciObject::can_be_constant() { bool ciObject::can_be_constant() {
if (ScavengeRootsInCode >= 1) return true; // now everybody can encode as a constant if (ScavengeRootsInCode >= 1) return true; // now everybody can encode as a constant
return handle() == NULL || !is_scavengable(); return handle() == NULL || is_perm();
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
@ -204,7 +204,7 @@ bool ciObject::should_be_constant() {
return true; return true;
} }
} }
return handle() == NULL || !is_scavengable(); return handle() == NULL || is_perm();
} }

View file

@ -108,7 +108,7 @@ public:
int hash(); int hash();
// Tells if this oop has an encoding as a constant. // Tells if this oop has an encoding as a constant.
// True if is_scavengable is false. // True if is_perm is true.
// Also true if ScavengeRootsInCode is non-zero. // Also true if ScavengeRootsInCode is non-zero.
// If it does not have an encoding, the compiler is responsible for // If it does not have an encoding, the compiler is responsible for
// making other arrangements for dealing with the oop. // making other arrangements for dealing with the oop.
@ -116,7 +116,7 @@ public:
bool can_be_constant(); bool can_be_constant();
// Tells if this oop should be made a constant. // Tells if this oop should be made a constant.
// True if is_scavengable is false or ScavengeRootsInCode > 1. // True if is_perm is true or ScavengeRootsInCode > 1.
bool should_be_constant(); bool should_be_constant();
// Is this object guaranteed to be in the permanent part of the heap? // Is this object guaranteed to be in the permanent part of the heap?

View file

@ -35,7 +35,7 @@
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciSignature::ciSignature // ciSignature::ciSignature
ciSignature::ciSignature(ciKlass* accessing_klass, ciSymbol* symbol) { ciSignature::ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* symbol) {
ASSERT_IN_VM; ASSERT_IN_VM;
EXCEPTION_CONTEXT; EXCEPTION_CONTEXT;
_accessing_klass = accessing_klass; _accessing_klass = accessing_klass;
@ -64,7 +64,7 @@ ciSignature::ciSignature(ciKlass* accessing_klass, ciSymbol* symbol) {
CLEAR_PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION;
} else { } else {
ciSymbol* klass_name = env->get_symbol(name); ciSymbol* klass_name = env->get_symbol(name);
type = env->get_klass_by_name_impl(_accessing_klass, klass_name, false); type = env->get_klass_by_name_impl(_accessing_klass, cpool, klass_name, false);
} }
} }
_types->append(type); _types->append(type);

View file

@ -44,7 +44,7 @@ private:
friend class ciMethod; friend class ciMethod;
ciSignature(ciKlass* accessing_klass, ciSymbol* signature); ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* signature);
void get_all_klasses(); void get_all_klasses();

View file

@ -3287,9 +3287,9 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
// Fields allocation: oops fields in super and sub classes are together. // Fields allocation: oops fields in super and sub classes are together.
if( nonstatic_field_size > 0 && super_klass() != NULL && if( nonstatic_field_size > 0 && super_klass() != NULL &&
super_klass->nonstatic_oop_map_size() > 0 ) { super_klass->nonstatic_oop_map_size() > 0 ) {
int map_size = super_klass->nonstatic_oop_map_size(); int map_count = super_klass->nonstatic_oop_map_count();
OopMapBlock* first_map = super_klass->start_of_nonstatic_oop_maps(); OopMapBlock* first_map = super_klass->start_of_nonstatic_oop_maps();
OopMapBlock* last_map = first_map + map_size - 1; OopMapBlock* last_map = first_map + map_count - 1;
int next_offset = last_map->offset() + (last_map->count() * heapOopSize); int next_offset = last_map->offset() + (last_map->count() * heapOopSize);
if (next_offset == next_nonstatic_field_offset) { if (next_offset == next_nonstatic_field_offset) {
allocation_style = 0; // allocate oops first allocation_style = 0; // allocate oops first

View file

@ -1258,7 +1258,6 @@ class BacktraceBuilder: public StackObj {
objArrayOop _methods; objArrayOop _methods;
typeArrayOop _bcis; typeArrayOop _bcis;
int _index; int _index;
bool _dirty;
No_Safepoint_Verifier _nsv; No_Safepoint_Verifier _nsv;
public: public:
@ -1272,37 +1271,13 @@ class BacktraceBuilder: public StackObj {
}; };
// constructor for new backtrace // constructor for new backtrace
BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _dirty(false) { BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL) {
expand(CHECK); expand(CHECK);
_backtrace = _head; _backtrace = _head;
_index = 0; _index = 0;
} }
void flush() {
// The following appears to have been an optimization to save from
// doing a barrier for each individual store into the _methods array,
// but rather to do it for the entire array after the series of writes.
// That optimization seems to have been lost when compressed oops was
// implemented. However, the extra card-marks below was left in place,
// but is now redundant because the individual stores into the
// _methods array already execute the barrier code. CR 6918185 has
// been filed so the original code may be restored by deferring the
// barriers until after the entire sequence of stores, thus re-enabling
// the intent of the original optimization. In the meantime the redundant
// card mark below is now disabled.
if (_dirty && _methods != NULL) {
#if 0
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
#endif
_dirty = false;
}
}
void expand(TRAPS) { void expand(TRAPS) {
flush();
objArrayHandle old_head(THREAD, _head); objArrayHandle old_head(THREAD, _head);
Pause_No_Safepoint_Verifier pnsv(&_nsv); Pause_No_Safepoint_Verifier pnsv(&_nsv);
@ -1328,7 +1303,6 @@ class BacktraceBuilder: public StackObj {
} }
oop backtrace() { oop backtrace() {
flush();
return _backtrace(); return _backtrace();
} }
@ -1342,7 +1316,6 @@ class BacktraceBuilder: public StackObj {
_methods->obj_at_put(_index, method); _methods->obj_at_put(_index, method);
_bcis->ushort_at_put(_index, bci); _bcis->ushort_at_put(_index, bci);
_index++; _index++;
_dirty = true;
} }
methodOop current_method() { methodOop current_method() {
@ -2574,6 +2547,18 @@ Symbol* java_lang_invoke_MethodType::as_signature(oop mt, bool intern_if_not_fou
return name; return name;
} }
bool java_lang_invoke_MethodType::equals(oop mt1, oop mt2) {
if (rtype(mt1) != rtype(mt2))
return false;
if (ptype_count(mt1) != ptype_count(mt2))
return false;
for (int i = ptype_count(mt1) - 1; i >= 0; i--) {
if (ptype(mt1, i) != ptype(mt2, i))
return false;
}
return true;
}
oop java_lang_invoke_MethodType::rtype(oop mt) { oop java_lang_invoke_MethodType::rtype(oop mt) {
assert(is_instance(mt), "must be a MethodType"); assert(is_instance(mt), "must be a MethodType");
return mt->obj_field(_rtype_offset); return mt->obj_field(_rtype_offset);

View file

@ -1079,6 +1079,8 @@ class java_lang_invoke_MethodType: AllStatic {
return obj != NULL && obj->klass() == SystemDictionary::MethodType_klass(); return obj != NULL && obj->klass() == SystemDictionary::MethodType_klass();
} }
static bool equals(oop mt1, oop mt2);
// Accessors for code generation: // Accessors for code generation:
static int rtype_offset_in_bytes() { return _rtype_offset; } static int rtype_offset_in_bytes() { return _rtype_offset; }
static int ptypes_offset_in_bytes() { return _ptypes_offset; } static int ptypes_offset_in_bytes() { return _ptypes_offset; }

View file

@ -2367,6 +2367,8 @@ methodOop SystemDictionary::find_method_handle_invoke(Symbol* name,
// Link m to his method type, if it is suitably generic. // Link m to his method type, if it is suitably generic.
oop mtform = java_lang_invoke_MethodType::form(mt()); oop mtform = java_lang_invoke_MethodType::form(mt());
if (mtform != NULL && mt() == java_lang_invoke_MethodTypeForm::erasedType(mtform) if (mtform != NULL && mt() == java_lang_invoke_MethodTypeForm::erasedType(mtform)
// vmlayout must be an invokeExact:
&& name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name)
&& java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) { && java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() > 0) {
java_lang_invoke_MethodTypeForm::init_vmlayout(mtform, m()); java_lang_invoke_MethodTypeForm::init_vmlayout(mtform, m());
} }

View file

@ -152,6 +152,7 @@ class SymbolPropertyTable;
template(DirectMethodHandle_klass, java_lang_invoke_DirectMethodHandle, Pre_JSR292) \ template(DirectMethodHandle_klass, java_lang_invoke_DirectMethodHandle, Pre_JSR292) \
template(MethodType_klass, java_lang_invoke_MethodType, Pre_JSR292) \ template(MethodType_klass, java_lang_invoke_MethodType, Pre_JSR292) \
template(MethodTypeForm_klass, java_lang_invoke_MethodTypeForm, Pre_JSR292) \ template(MethodTypeForm_klass, java_lang_invoke_MethodTypeForm, Pre_JSR292) \
template(BootstrapMethodError_klass, java_lang_BootstrapMethodError, Pre_JSR292) \
template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \ template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \
template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \ template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \
/* Note: MethodHandle must be first, and CallSite last in group */ \ /* Note: MethodHandle must be first, and CallSite last in group */ \

View file

@ -148,6 +148,7 @@
template(java_lang_InstantiationException, "java/lang/InstantiationException") \ template(java_lang_InstantiationException, "java/lang/InstantiationException") \
template(java_lang_InstantiationError, "java/lang/InstantiationError") \ template(java_lang_InstantiationError, "java/lang/InstantiationError") \
template(java_lang_InterruptedException, "java/lang/InterruptedException") \ template(java_lang_InterruptedException, "java/lang/InterruptedException") \
template(java_lang_BootstrapMethodError, "java/lang/BootstrapMethodError") \
template(java_lang_LinkageError, "java/lang/LinkageError") \ template(java_lang_LinkageError, "java/lang/LinkageError") \
template(java_lang_NegativeArraySizeException, "java/lang/NegativeArraySizeException") \ template(java_lang_NegativeArraySizeException, "java/lang/NegativeArraySizeException") \
template(java_lang_NoSuchFieldException, "java/lang/NoSuchFieldException") \ template(java_lang_NoSuchFieldException, "java/lang/NoSuchFieldException") \

View file

@ -1810,7 +1810,7 @@ public:
void maybe_print(oop* p) { void maybe_print(oop* p) {
if (_print_nm == NULL) return; if (_print_nm == NULL) return;
if (!_detected_scavenge_root) _print_nm->print_on(tty, "new scavenge root"); if (!_detected_scavenge_root) _print_nm->print_on(tty, "new scavenge root");
tty->print_cr(""PTR_FORMAT"[offset=%d] detected non-perm oop "PTR_FORMAT" (found at "PTR_FORMAT")", tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
_print_nm, (int)((intptr_t)p - (intptr_t)_print_nm), _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
(intptr_t)(*p), (intptr_t)p); (intptr_t)(*p), (intptr_t)p);
(*p)->print(); (*p)->print();
@ -1832,7 +1832,9 @@ void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map
if (!method()->is_native()) { if (!method()->is_native()) {
SimpleScopeDesc ssd(this, fr.pc()); SimpleScopeDesc ssd(this, fr.pc());
Bytecode_invoke call(ssd.method(), ssd.bci()); Bytecode_invoke call(ssd.method(), ssd.bci());
bool has_receiver = call.has_receiver(); // compiled invokedynamic call sites have an implicit receiver at
// resolution time, so make sure it gets GC'ed.
bool has_receiver = !call.is_invokestatic();
Symbol* signature = call.signature(); Symbol* signature = call.signature();
fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f); fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
} }
@ -2311,7 +2313,7 @@ public:
_nm->print_nmethod(true); _nm->print_nmethod(true);
_ok = false; _ok = false;
} }
tty->print_cr("*** non-perm oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)", tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
(intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm)); (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
(*p)->print(); (*p)->print();
} }
@ -2324,7 +2326,7 @@ void nmethod::verify_scavenge_root_oops() {
DebugScavengeRoot debug_scavenge_root(this); DebugScavengeRoot debug_scavenge_root(this);
oops_do(&debug_scavenge_root); oops_do(&debug_scavenge_root);
if (!debug_scavenge_root.ok()) if (!debug_scavenge_root.ok())
fatal("found an unadvertised bad non-perm oop in the code cache"); fatal("found an unadvertised bad scavengable oop in the code cache");
} }
assert(scavenge_root_not_marked(), ""); assert(scavenge_root_not_marked(), "");
} }

View file

@ -109,7 +109,7 @@ class xmlStream;
class nmethod : public CodeBlob { class nmethod : public CodeBlob {
friend class VMStructs; friend class VMStructs;
friend class NMethodSweeper; friend class NMethodSweeper;
friend class CodeCache; // non-perm oops friend class CodeCache; // scavengable oops
private: private:
// Shared fields for all nmethod's // Shared fields for all nmethod's
methodOop _method; methodOop _method;
@ -466,17 +466,17 @@ public:
bool is_at_poll_return(address pc); bool is_at_poll_return(address pc);
bool is_at_poll_or_poll_return(address pc); bool is_at_poll_or_poll_return(address pc);
// Non-perm oop support // Scavengable oop support
bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; } bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
protected: protected:
enum { npl_on_list = 0x01, npl_marked = 0x10 }; enum { sl_on_list = 0x01, sl_marked = 0x10 };
void set_on_scavenge_root_list() { _scavenge_root_state = npl_on_list; } void set_on_scavenge_root_list() { _scavenge_root_state = sl_on_list; }
void clear_on_scavenge_root_list() { _scavenge_root_state = 0; } void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
// assertion-checking and pruning logic uses the bits of _scavenge_root_state // assertion-checking and pruning logic uses the bits of _scavenge_root_state
#ifndef PRODUCT #ifndef PRODUCT
void set_scavenge_root_marked() { _scavenge_root_state |= npl_marked; } void set_scavenge_root_marked() { _scavenge_root_state |= sl_marked; }
void clear_scavenge_root_marked() { _scavenge_root_state &= ~npl_marked; } void clear_scavenge_root_marked() { _scavenge_root_state &= ~sl_marked; }
bool scavenge_root_not_marked() { return (_scavenge_root_state &~ npl_on_list) == 0; } bool scavenge_root_not_marked() { return (_scavenge_root_state &~ sl_on_list) == 0; }
// N.B. there is no positive marked query, and we only use the not_marked query for asserts. // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
#endif //PRODUCT #endif //PRODUCT
nmethod* scavenge_root_link() const { return _scavenge_root_link; } nmethod* scavenge_root_link() const { return _scavenge_root_link; }

View file

@ -44,7 +44,7 @@ address PcDesc::real_pc(const nmethod* code) const {
void PcDesc::print(nmethod* code) { void PcDesc::print(nmethod* code) {
#ifndef PRODUCT #ifndef PRODUCT
ResourceMark rm; ResourceMark rm;
tty->print_cr("PcDesc(pc=0x%lx offset=%x):", real_pc(code), pc_offset()); tty->print_cr("PcDesc(pc=0x%lx offset=%x bits=%x):", real_pc(code), pc_offset(), _flags.bits);
if (scope_decode_offset() == DebugInformationRecorder::serialized_null) { if (scope_decode_offset() == DebugInformationRecorder::serialized_null) {
return; return;

View file

@ -300,12 +300,23 @@ void CompileTask::print_compilation_impl(outputStream* st, methodOop method, int
st->print("%7d ", (int) st->time_stamp().milliseconds()); // print timestamp st->print("%7d ", (int) st->time_stamp().milliseconds()); // print timestamp
st->print("%4d ", compile_id); // print compilation number st->print("%4d ", compile_id); // print compilation number
// For unloaded methods the transition to zombie occurs after the
// method is cleared so it's impossible to report accurate
// information for that case.
bool is_synchronized = false;
bool has_exception_handler = false;
bool is_native = false;
if (method != NULL) {
is_synchronized = method->is_synchronized();
has_exception_handler = method->has_exception_handler();
is_native = method->is_native();
}
// method attributes // method attributes
const char compile_type = is_osr_method ? '%' : ' '; const char compile_type = is_osr_method ? '%' : ' ';
const char sync_char = method->is_synchronized() ? 's' : ' '; const char sync_char = is_synchronized ? 's' : ' ';
const char exception_char = method->has_exception_handler() ? '!' : ' '; const char exception_char = has_exception_handler ? '!' : ' ';
const char blocking_char = is_blocking ? 'b' : ' '; const char blocking_char = is_blocking ? 'b' : ' ';
const char native_char = method->is_native() ? 'n' : ' '; const char native_char = is_native ? 'n' : ' ';
// print method attributes // print method attributes
st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char); st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
@ -316,11 +327,15 @@ void CompileTask::print_compilation_impl(outputStream* st, methodOop method, int
} }
st->print(" "); // more indent st->print(" "); // more indent
if (method == NULL) {
st->print("(method)");
} else {
method->print_short_name(st); method->print_short_name(st);
if (is_osr_method) { if (is_osr_method) {
st->print(" @ %d", osr_bci); st->print(" @ %d", osr_bci);
} }
st->print(" (%d bytes)", method->code_size()); st->print(" (%d bytes)", method->code_size());
}
if (msg != NULL) { if (msg != NULL) {
st->print(" %s", msg); st->print(" %s", msg);

View file

@ -78,21 +78,46 @@ bool Disassembler::load_library() {
char buf[JVM_MAXPATHLEN]; char buf[JVM_MAXPATHLEN];
os::jvm_path(buf, sizeof(buf)); os::jvm_path(buf, sizeof(buf));
int jvm_offset = -1; int jvm_offset = -1;
int lib_offset = -1;
{ {
// Match "jvm[^/]*" in jvm_path. // Match "jvm[^/]*" in jvm_path.
const char* base = buf; const char* base = buf;
const char* p = strrchr(buf, '/'); const char* p = strrchr(buf, '/');
if (p != NULL) lib_offset = p - base + 1;
p = strstr(p ? p : base, "jvm"); p = strstr(p ? p : base, "jvm");
if (p != NULL) jvm_offset = p - base; if (p != NULL) jvm_offset = p - base;
} }
// Find the disassembler shared library.
// Search for several paths derived from libjvm, in this order:
// 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so (for compatibility)
// 2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
// 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
// 4. hsdis-<arch>.so (using LD_LIBRARY_PATH)
if (jvm_offset >= 0) { if (jvm_offset >= 0) {
// Find the disassembler next to libjvm.so. // 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so
strcpy(&buf[jvm_offset], hsdis_library_name); strcpy(&buf[jvm_offset], hsdis_library_name);
strcat(&buf[jvm_offset], os::dll_file_extension()); strcat(&buf[jvm_offset], os::dll_file_extension());
_library = os::dll_load(buf, ebuf, sizeof ebuf); _library = os::dll_load(buf, ebuf, sizeof ebuf);
if (_library == NULL) {
// 2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
strcpy(&buf[lib_offset], hsdis_library_name);
strcat(&buf[lib_offset], os::dll_file_extension());
_library = os::dll_load(buf, ebuf, sizeof ebuf);
} }
if (_library == NULL) { if (_library == NULL) {
// Try a free-floating lookup. // 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
buf[lib_offset - 1] = '\0';
const char* p = strrchr(buf, '/');
if (p != NULL) {
lib_offset = p - buf + 1;
strcpy(&buf[lib_offset], hsdis_library_name);
strcat(&buf[lib_offset], os::dll_file_extension());
_library = os::dll_load(buf, ebuf, sizeof ebuf);
}
}
}
if (_library == NULL) {
// 4. hsdis-<arch>.so (using LD_LIBRARY_PATH)
strcpy(&buf[0], hsdis_library_name); strcpy(&buf[0], hsdis_library_name);
strcat(&buf[0], os::dll_file_extension()); strcat(&buf[0], os::dll_file_extension());
_library = os::dll_load(buf, ebuf, sizeof ebuf); _library = os::dll_load(buf, ebuf, sizeof ebuf);
@ -249,7 +274,13 @@ address decode_env::handle_event(const char* event, address arg) {
return arg; return arg;
} }
} else if (match(event, "mach")) { } else if (match(event, "mach")) {
static char buffer[32] = { 0, };
if (strcmp(buffer, (const char*)arg) != 0 ||
strlen((const char*)arg) > sizeof(buffer) - 1) {
// Only print this when the mach changes
strncpy(buffer, (const char*)arg, sizeof(buffer) - 1);
output()->print_cr("[Disassembling for mach='%s']", arg); output()->print_cr("[Disassembling for mach='%s']", arg);
}
} else if (match(event, "format bytes-per-line")) { } else if (match(event, "format bytes-per-line")) {
_bytes_per_line = (int) (intptr_t) arg; _bytes_per_line = (int) (intptr_t) arg;
} else { } else {

View file

@ -638,7 +638,9 @@ void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) {
assert(*derived_loc != (oop)base_loc, "location already added"); assert(*derived_loc != (oop)base_loc, "location already added");
assert(_list != NULL, "list must exist"); assert(_list != NULL, "list must exist");
intptr_t offset = value_of_loc(derived_loc) - value_of_loc(base_loc); intptr_t offset = value_of_loc(derived_loc) - value_of_loc(base_loc);
assert(offset >= -1000000, "wrong derived pointer info"); // This assert is invalid because derived pointers can be
// arbitrarily far away from their base.
// assert(offset >= -1000000, "wrong derived pointer info");
if (TraceDerivedPointers) { if (TraceDerivedPointers) {
tty->print_cr( tty->print_cr(

View file

@ -1833,8 +1833,6 @@ CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
} }
) )
_indexedFreeList[size].removeChunk(fc); _indexedFreeList[size].removeChunk(fc);
debug_only(fc->clearNext());
debug_only(fc->clearPrev());
NOT_PRODUCT( NOT_PRODUCT(
if (FLSVerifyIndexTable) { if (FLSVerifyIndexTable) {
verifyIndexedFreeList(size); verifyIndexedFreeList(size);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -407,6 +407,11 @@ class CompactibleFreeListSpace: public CompactibleSpace {
void save_sweep_limit() { void save_sweep_limit() {
_sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
unallocated_block() : end(); unallocated_block() : end();
if (CMSTraceSweeper) {
gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT
" for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
_sweep_limit, bottom(), end());
}
} }
NOT_PRODUCT( NOT_PRODUCT(
void clear_sweep_limit() { _sweep_limit = NULL; } void clear_sweep_limit() { _sweep_limit = NULL; }

View file

@ -2716,6 +2716,10 @@ void CMSCollector::gc_epilogue(bool full) {
bitMapLock()->unlock(); bitMapLock()->unlock();
releaseFreelistLocks(); releaseFreelistLocks();
if (!CleanChunkPoolAsync) {
Chunk::clean_chunk_pool();
}
_between_prologue_and_epilogue = false; // ready for next cycle _between_prologue_and_epilogue = false; // ready for next cycle
} }
@ -7888,40 +7892,45 @@ SweepClosure::SweepClosure(CMSCollector* collector,
assert(_limit >= _sp->bottom() && _limit <= _sp->end(), assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
"sweep _limit out of bounds"); "sweep _limit out of bounds");
if (CMSTraceSweeper) { if (CMSTraceSweeper) {
gclog_or_tty->print("\n====================\nStarting new sweep\n"); gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
_limit);
} }
} }
// We need this destructor to reclaim any space at the end void SweepClosure::print_on(outputStream* st) const {
// of the space, which do_blk below may not yet have added back to tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
// the free lists. _sp->bottom(), _sp->end());
tty->print_cr("_limit = " PTR_FORMAT, _limit);
tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
_inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
}
#ifndef PRODUCT
// Assertion checking only: no useful work in product mode --
// however, if any of the flags below become product flags,
// you may need to review this code to see if it needs to be
// enabled in product mode.
SweepClosure::~SweepClosure() { SweepClosure::~SweepClosure() {
assert_lock_strong(_freelistLock); assert_lock_strong(_freelistLock);
assert(_limit >= _sp->bottom() && _limit <= _sp->end(), assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
"sweep _limit out of bounds"); "sweep _limit out of bounds");
// Flush any remaining coterminal free run as a single
// coalesced chunk to the appropriate free list.
if (inFreeRange()) { if (inFreeRange()) {
assert(freeFinger() < _limit, "freeFinger points too high"); warning("inFreeRange() should have been reset; dumping state of SweepClosure");
flush_cur_free_chunk(freeFinger(), pointer_delta(_limit, freeFinger())); print();
if (CMSTraceSweeper) { ShouldNotReachHere();
gclog_or_tty->print("Sweep: last chunk: ");
gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") [coalesced:"SIZE_FORMAT"]\n",
freeFinger(), pointer_delta(_limit, freeFinger()), lastFreeRangeCoalesced());
} }
} // else nothing to flush
NOT_PRODUCT(
if (Verbose && PrintGC) { if (Verbose && PrintGC) {
gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
SIZE_FORMAT " bytes",
_numObjectsFreed, _numWordsFreed*sizeof(HeapWord)); _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, " gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
SIZE_FORMAT" bytes " SIZE_FORMAT" bytes "
"Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes", "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
_numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsLive, _numWordsLive*sizeof(HeapWord),
_numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord)); _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
sizeof(HeapWord); * sizeof(HeapWord);
gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes); gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
if (PrintCMSStatistics && CMSVerifyReturnedBytes) { if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
@ -7935,13 +7944,12 @@ SweepClosure::~SweepClosure() {
dictReturnedBytes); dictReturnedBytes);
} }
} }
)
// Now, in debug mode, just null out the sweep_limit
NOT_PRODUCT(_sp->clear_sweep_limit();)
if (CMSTraceSweeper) { if (CMSTraceSweeper) {
gclog_or_tty->print("end of sweep\n================\n"); gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
_limit);
} }
} }
#endif // PRODUCT
void SweepClosure::initialize_free_range(HeapWord* freeFinger, void SweepClosure::initialize_free_range(HeapWord* freeFinger,
bool freeRangeInFreeLists) { bool freeRangeInFreeLists) {
@ -8001,15 +8009,17 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
// we started the sweep, it may no longer be one because heap expansion // we started the sweep, it may no longer be one because heap expansion
// may have caused us to coalesce the block ending at the address _limit // may have caused us to coalesce the block ending at the address _limit
// with a newly expanded chunk (this happens when _limit was set to the // with a newly expanded chunk (this happens when _limit was set to the
// previous _end of the space), so we may have stepped past _limit; see CR 6977970. // previous _end of the space), so we may have stepped past _limit:
// see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
if (addr >= _limit) { // we have swept up to or past the limit: finish up if (addr >= _limit) { // we have swept up to or past the limit: finish up
assert(_limit >= _sp->bottom() && _limit <= _sp->end(), assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
"sweep _limit out of bounds"); "sweep _limit out of bounds");
assert(addr < _sp->end(), "addr out of bounds"); assert(addr < _sp->end(), "addr out of bounds");
// Flush any remaining coterminal free run as a single // Flush any free range we might be holding as a single
// coalesced chunk to the appropriate free list. // coalesced chunk to the appropriate free list.
if (inFreeRange()) { if (inFreeRange()) {
assert(freeFinger() < _limit, "finger points too high"); assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
flush_cur_free_chunk(freeFinger(), flush_cur_free_chunk(freeFinger(),
pointer_delta(addr, freeFinger())); pointer_delta(addr, freeFinger()));
if (CMSTraceSweeper) { if (CMSTraceSweeper) {
@ -8033,7 +8043,16 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
res = fc->size(); res = fc->size();
do_already_free_chunk(fc); do_already_free_chunk(fc);
debug_only(_sp->verifyFreeLists()); debug_only(_sp->verifyFreeLists());
assert(res == fc->size(), "Don't expect the size to change"); // If we flush the chunk at hand in lookahead_and_flush()
// and it's coalesced with a preceding chunk, then the
// process of "mangling" the payload of the coalesced block
// will cause erasure of the size information from the
// (erstwhile) header of all the coalesced blocks but the
// first, so the first disjunct in the assert will not hold
// in that specific case (in which case the second disjunct
// will hold).
assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
"Otherwise the size info doesn't change at this step");
NOT_PRODUCT( NOT_PRODUCT(
_numObjectsAlreadyFree++; _numObjectsAlreadyFree++;
_numWordsAlreadyFree += res; _numWordsAlreadyFree += res;
@ -8103,7 +8122,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
// //
void SweepClosure::do_already_free_chunk(FreeChunk* fc) { void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
size_t size = fc->size(); const size_t size = fc->size();
// Chunks that cannot be coalesced are not in the // Chunks that cannot be coalesced are not in the
// free lists. // free lists.
if (CMSTestInFreeList && !fc->cantCoalesce()) { if (CMSTestInFreeList && !fc->cantCoalesce()) {
@ -8112,7 +8131,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
} }
// a chunk that is already free, should not have been // a chunk that is already free, should not have been
// marked in the bit map // marked in the bit map
HeapWord* addr = (HeapWord*) fc; HeapWord* const addr = (HeapWord*) fc;
assert(!_bitMap->isMarked(addr), "free chunk should be unmarked"); assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
// Verify that the bit map has no bits marked between // Verify that the bit map has no bits marked between
// addr and purported end of this block. // addr and purported end of this block.
@ -8149,7 +8168,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
} }
} else { } else {
// the midst of a free range, we are coalescing // the midst of a free range, we are coalescing
debug_only(record_free_block_coalesced(fc);) print_free_block_coalesced(fc);
if (CMSTraceSweeper) { if (CMSTraceSweeper) {
gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size); gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
} }
@ -8173,6 +8192,10 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
} }
} }
} }
// Note that if the chunk is not coalescable (the else arm
// below), we unconditionally flush, without needing to do
// a "lookahead," as we do below.
if (inFreeRange()) lookahead_and_flush(fc, size);
} else { } else {
// Code path common to both original and adaptive free lists. // Code path common to both original and adaptive free lists.
@ -8191,8 +8214,8 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
// This is a chunk of garbage. It is not in any free list. // This is a chunk of garbage. It is not in any free list.
// Add it to a free list or let it possibly be coalesced into // Add it to a free list or let it possibly be coalesced into
// a larger chunk. // a larger chunk.
HeapWord* addr = (HeapWord*) fc; HeapWord* const addr = (HeapWord*) fc;
size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
if (_sp->adaptive_freelists()) { if (_sp->adaptive_freelists()) {
// Verify that the bit map has no bits marked between // Verify that the bit map has no bits marked between
@ -8205,7 +8228,6 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
// start of a new free range // start of a new free range
assert(size > 0, "A free range should have a size"); assert(size > 0, "A free range should have a size");
initialize_free_range(addr, false); initialize_free_range(addr, false);
} else { } else {
// this will be swept up when we hit the end of the // this will be swept up when we hit the end of the
// free range // free range
@ -8235,6 +8257,9 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
// addr and purported end of just dead object. // addr and purported end of just dead object.
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
} }
assert(_limit >= addr + size,
"A freshly garbage chunk can't possibly straddle over _limit");
if (inFreeRange()) lookahead_and_flush(fc, size);
return size; return size;
} }
@ -8284,8 +8309,8 @@ size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
(!_collector->should_unload_classes() (!_collector->should_unload_classes()
|| oop(addr)->is_parsable()), || oop(addr)->is_parsable()),
"Should be an initialized object"); "Should be an initialized object");
// Note that there are objects used during class redefinition // Note that there are objects used during class redefinition,
// (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite() // e.g. merge_cp in VM_RedefineClasses::merge_cp_and_rewrite(),
// which are discarded with their is_conc_safe state still // which are discarded with their is_conc_safe state still
// false. These object may be floating garbage so may be // false. These object may be floating garbage so may be
// seen here. If they are floating garbage their size // seen here. If they are floating garbage their size
@ -8307,7 +8332,7 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
size_t chunkSize) { size_t chunkSize) {
// do_post_free_or_garbage_chunk() should only be called in the case // do_post_free_or_garbage_chunk() should only be called in the case
// of the adaptive free list allocator. // of the adaptive free list allocator.
bool fcInFreeLists = fc->isFree(); const bool fcInFreeLists = fc->isFree();
assert(_sp->adaptive_freelists(), "Should only be used in this case."); assert(_sp->adaptive_freelists(), "Should only be used in this case.");
assert((HeapWord*)fc <= _limit, "sweep invariant"); assert((HeapWord*)fc <= _limit, "sweep invariant");
if (CMSTestInFreeList && fcInFreeLists) { if (CMSTestInFreeList && fcInFreeLists) {
@ -8318,11 +8343,11 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize); gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
} }
HeapWord* addr = (HeapWord*) fc; HeapWord* const fc_addr = (HeapWord*) fc;
bool coalesce; bool coalesce;
size_t left = pointer_delta(addr, freeFinger()); const size_t left = pointer_delta(fc_addr, freeFinger());
size_t right = chunkSize; const size_t right = chunkSize;
switch (FLSCoalescePolicy) { switch (FLSCoalescePolicy) {
// numeric value forms a coalition aggressiveness metric // numeric value forms a coalition aggressiveness metric
case 0: { // never coalesce case 0: { // never coalesce
@ -8355,15 +8380,15 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
// If the chunk is in a free range and either we decided to coalesce above // If the chunk is in a free range and either we decided to coalesce above
// or the chunk is near the large block at the end of the heap // or the chunk is near the large block at the end of the heap
// (isNearLargestChunk() returns true), then coalesce this chunk. // (isNearLargestChunk() returns true), then coalesce this chunk.
bool doCoalesce = inFreeRange() && const bool doCoalesce = inFreeRange()
(coalesce || _g->isNearLargestChunk((HeapWord*)fc)); && (coalesce || _g->isNearLargestChunk(fc_addr));
if (doCoalesce) { if (doCoalesce) {
// Coalesce the current free range on the left with the new // Coalesce the current free range on the left with the new
// chunk on the right. If either is on a free list, // chunk on the right. If either is on a free list,
// it must be removed from the list and stashed in the closure. // it must be removed from the list and stashed in the closure.
if (freeRangeInFreeLists()) { if (freeRangeInFreeLists()) {
FreeChunk* ffc = (FreeChunk*)freeFinger(); FreeChunk* const ffc = (FreeChunk*)freeFinger();
assert(ffc->size() == pointer_delta(addr, freeFinger()), assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
"Size of free range is inconsistent with chunk size."); "Size of free range is inconsistent with chunk size.");
if (CMSTestInFreeList) { if (CMSTestInFreeList) {
assert(_sp->verifyChunkInFreeLists(ffc), assert(_sp->verifyChunkInFreeLists(ffc),
@ -8380,13 +8405,14 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
_sp->removeFreeChunkFromFreeLists(fc); _sp->removeFreeChunkFromFreeLists(fc);
} }
set_lastFreeRangeCoalesced(true); set_lastFreeRangeCoalesced(true);
print_free_block_coalesced(fc);
} else { // not in a free range and/or should not coalesce } else { // not in a free range and/or should not coalesce
// Return the current free range and start a new one. // Return the current free range and start a new one.
if (inFreeRange()) { if (inFreeRange()) {
// In a free range but cannot coalesce with the right hand chunk. // In a free range but cannot coalesce with the right hand chunk.
// Put the current free range into the free lists. // Put the current free range into the free lists.
flush_cur_free_chunk(freeFinger(), flush_cur_free_chunk(freeFinger(),
pointer_delta(addr, freeFinger())); pointer_delta(fc_addr, freeFinger()));
} }
// Set up for new free range. Pass along whether the right hand // Set up for new free range. Pass along whether the right hand
// chunk is in the free lists. // chunk is in the free lists.
@ -8394,6 +8420,42 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
} }
} }
// Lookahead flush:
// If we are tracking a free range, and this is the last chunk that
// we'll look at because its end crosses past _limit, we'll preemptively
// flush it along with any free range we may be holding on to. Note that
// this can be the case only for an already free or freshly garbage
// chunk. If this block is an object, it can never straddle
// over _limit. The "straddling" occurs when _limit is set at
// the previous end of the space when this cycle started, and
// a subsequent heap expansion caused the previously co-terminal
// free block to be coalesced with the newly expanded portion,
// thus rendering _limit a non-block-boundary making it dangerous
// for the sweeper to step over and examine.
void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
assert(inFreeRange(), "Should only be called if currently in a free range.");
HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
assert(_sp->used_region().contains(eob - 1),
err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
" when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
_limit, _sp->bottom(), _sp->end(), fc, chunk_size));
if (eob >= _limit) {
assert(eob == _limit || fc->isFree(), "Only a free chunk should allow us to cross over the limit");
if (CMSTraceSweeper) {
gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
"[" PTR_FORMAT "," PTR_FORMAT ") in space "
"[" PTR_FORMAT "," PTR_FORMAT ")",
_limit, fc, eob, _sp->bottom(), _sp->end());
}
// Return the storage we are tracking back into the free lists.
if (CMSTraceSweeper) {
gclog_or_tty->print_cr("Flushing ... ");
}
assert(freeFinger() < eob, "Error");
flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
}
}
void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) { void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
assert(inFreeRange(), "Should only be called if currently in a free range."); assert(inFreeRange(), "Should only be called if currently in a free range.");
assert(size > 0, assert(size > 0,
@ -8419,6 +8481,8 @@ void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
} }
_sp->addChunkAndRepairOffsetTable(chunk, size, _sp->addChunkAndRepairOffsetTable(chunk, size,
lastFreeRangeCoalesced()); lastFreeRangeCoalesced());
} else if (CMSTraceSweeper) {
gclog_or_tty->print_cr("Already in free list: nothing to flush");
} }
set_inFreeRange(false); set_inFreeRange(false);
set_freeRangeInFreeLists(false); set_freeRangeInFreeLists(false);
@ -8477,14 +8541,15 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
bool debug_verifyChunkInFreeLists(FreeChunk* fc) { bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
return debug_cms_space->verifyChunkInFreeLists(fc); return debug_cms_space->verifyChunkInFreeLists(fc);
} }
void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
if (CMSTraceSweeper) {
gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
}
}
#endif #endif
void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
if (CMSTraceSweeper) {
gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
fc, fc->size());
}
}
// CMSIsAliveClosure // CMSIsAliveClosure
bool CMSIsAliveClosure::do_object_b(oop obj) { bool CMSIsAliveClosure::do_object_b(oop obj) {
HeapWord* addr = (HeapWord*)obj; HeapWord* addr = (HeapWord*)obj;

View file

@ -1701,9 +1701,9 @@ class SweepClosure: public BlkClosureCareful {
CMSCollector* _collector; // collector doing the work CMSCollector* _collector; // collector doing the work
ConcurrentMarkSweepGeneration* _g; // Generation being swept ConcurrentMarkSweepGeneration* _g; // Generation being swept
CompactibleFreeListSpace* _sp; // Space being swept CompactibleFreeListSpace* _sp; // Space being swept
HeapWord* _limit;// the address at which the sweep should stop because HeapWord* _limit;// the address at or above which the sweep should stop
// we do not expect blocks eligible for sweeping past // because we do not expect newly garbage blocks
// that address. // eligible for sweeping past that address.
Mutex* _freelistLock; // Free list lock (in space) Mutex* _freelistLock; // Free list lock (in space)
CMSBitMap* _bitMap; // Marking bit map (in CMSBitMap* _bitMap; // Marking bit map (in
// generation) // generation)
@ -1750,6 +1750,10 @@ class SweepClosure: public BlkClosureCareful {
void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
// Process a free chunk during sweeping. // Process a free chunk during sweeping.
void do_already_free_chunk(FreeChunk *fc); void do_already_free_chunk(FreeChunk *fc);
// Work method called when processing an already free or a
// freshly garbage chunk to do a lookahead and possibly a
// premptive flush if crossing over _limit.
void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
// Process a garbage chunk during sweeping. // Process a garbage chunk during sweeping.
size_t do_garbage_chunk(FreeChunk *fc); size_t do_garbage_chunk(FreeChunk *fc);
// Process a live chunk during sweeping. // Process a live chunk during sweeping.
@ -1758,8 +1762,6 @@ class SweepClosure: public BlkClosureCareful {
// Accessors. // Accessors.
HeapWord* freeFinger() const { return _freeFinger; } HeapWord* freeFinger() const { return _freeFinger; }
void set_freeFinger(HeapWord* v) { _freeFinger = v; } void set_freeFinger(HeapWord* v) { _freeFinger = v; }
size_t freeRangeSize() const { return _freeRangeSize; }
void set_freeRangeSize(size_t v) { _freeRangeSize = v; }
bool inFreeRange() const { return _inFreeRange; } bool inFreeRange() const { return _inFreeRange; }
void set_inFreeRange(bool v) { _inFreeRange = v; } void set_inFreeRange(bool v) { _inFreeRange = v; }
bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; }
@ -1779,14 +1781,16 @@ class SweepClosure: public BlkClosureCareful {
void do_yield_work(HeapWord* addr); void do_yield_work(HeapWord* addr);
// Debugging/Printing // Debugging/Printing
void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN; void print_free_block_coalesced(FreeChunk* fc) const;
public: public:
SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
CMSBitMap* bitMap, bool should_yield); CMSBitMap* bitMap, bool should_yield);
~SweepClosure(); ~SweepClosure() PRODUCT_RETURN;
size_t do_blk_careful(HeapWord* addr); size_t do_blk_careful(HeapWord* addr);
void print() const { print_on(tty); }
void print_on(outputStream *st) const;
}; };
// Closures related to weak references processing // Closures related to weak references processing

View file

@ -114,17 +114,11 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
linkNext(ptr); linkNext(ptr);
if (ptr != NULL) ptr->linkPrev(this); if (ptr != NULL) ptr->linkPrev(this);
} }
void linkAfterNonNull(FreeChunk* ptr) {
assert(ptr != NULL, "precondition violation");
linkNext(ptr);
ptr->linkPrev(this);
}
void linkNext(FreeChunk* ptr) { _next = ptr; } void linkNext(FreeChunk* ptr) { _next = ptr; }
void linkPrev(FreeChunk* ptr) { void linkPrev(FreeChunk* ptr) {
LP64_ONLY(if (UseCompressedOops) _prev = ptr; else) LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
_prev = (FreeChunk*)((intptr_t)ptr | 0x1); _prev = (FreeChunk*)((intptr_t)ptr | 0x1);
} }
void clearPrev() { _prev = NULL; }
void clearNext() { _next = NULL; } void clearNext() { _next = NULL; }
void markNotFree() { void markNotFree() {
// Set _prev (klass) to null before (if) clearing the mark word below // Set _prev (klass) to null before (if) clearing the mark word below

View file

@ -300,8 +300,21 @@ void FreeList::verify_stats() const {
// dictionary for example, this might be the first block and // dictionary for example, this might be the first block and
// in that case there would be no place that we could record // in that case there would be no place that we could record
// the stats (which are kept in the block itself). // the stats (which are kept in the block itself).
assert(_allocation_stats.prevSweep() + _allocation_stats.splitBirths() + 1 // Total Stock + 1 assert((_allocation_stats.prevSweep() + _allocation_stats.splitBirths()
>= _allocation_stats.splitDeaths() + (ssize_t)count(), "Conservation Principle"); + _allocation_stats.coalBirths() + 1) // Total Production Stock + 1
>= (_allocation_stats.splitDeaths() + _allocation_stats.coalDeaths()
+ (ssize_t)count()), // Total Current Stock + depletion
err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
" violates Conservation Principle: "
"prevSweep(" SIZE_FORMAT ")"
" + splitBirths(" SIZE_FORMAT ")"
" + coalBirths(" SIZE_FORMAT ") + 1 >= "
" splitDeaths(" SIZE_FORMAT ")"
" coalDeaths(" SIZE_FORMAT ")"
" + count(" SSIZE_FORMAT ")",
this, _size, _allocation_stats.prevSweep(), _allocation_stats.splitBirths(),
_allocation_stats.splitBirths(), _allocation_stats.splitDeaths(),
_allocation_stats.coalDeaths(), count()));
} }
void FreeList::assert_proper_lock_protection_work() const { void FreeList::assert_proper_lock_protection_work() const {

File diff suppressed because it is too large Load diff

View file

@ -131,22 +131,22 @@ class CMBitMap : public CMBitMapRO {
void mark(HeapWord* addr) { void mark(HeapWord* addr) {
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
"outside underlying space?"); "outside underlying space?");
_bm.at_put(heapWordToOffset(addr), true); _bm.set_bit(heapWordToOffset(addr));
} }
void clear(HeapWord* addr) { void clear(HeapWord* addr) {
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
"outside underlying space?"); "outside underlying space?");
_bm.at_put(heapWordToOffset(addr), false); _bm.clear_bit(heapWordToOffset(addr));
} }
bool parMark(HeapWord* addr) { bool parMark(HeapWord* addr) {
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
"outside underlying space?"); "outside underlying space?");
return _bm.par_at_put(heapWordToOffset(addr), true); return _bm.par_set_bit(heapWordToOffset(addr));
} }
bool parClear(HeapWord* addr) { bool parClear(HeapWord* addr) {
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
"outside underlying space?"); "outside underlying space?");
return _bm.par_at_put(heapWordToOffset(addr), false); return _bm.par_clear_bit(heapWordToOffset(addr));
} }
void markRange(MemRegion mr); void markRange(MemRegion mr);
void clearAll(); void clearAll();
@ -736,12 +736,14 @@ public:
// will dump the contents of its reference fields, as well as // will dump the contents of its reference fields, as well as
// liveness information for the object and its referents. The dump // liveness information for the object and its referents. The dump
// will be written to a file with the following name: // will be written to a file with the following name:
// G1PrintReachableBaseFile + "." + str. use_prev_marking decides // G1PrintReachableBaseFile + "." + str.
// whether the prev (use_prev_marking == true) or next // vo decides whether the prev (vo == UsePrevMarking), the next
// (use_prev_marking == false) marking information will be used to // (vo == UseNextMarking) marking information, or the mark word
// determine the liveness of each object / referent. If all is true, // (vo == UseMarkWord) will be used to determine the liveness of
// all objects in the heap will be dumped, otherwise only the live // each object / referent.
// ones. In the dump the following symbols / abbreviations are used: // If all is true, all objects in the heap will be dumped, otherwise
// only the live ones. In the dump the following symbols / breviations
// are used:
// M : an explicitly live object (its bitmap bit is set) // M : an explicitly live object (its bitmap bit is set)
// > : an implicitly live object (over tams) // > : an implicitly live object (over tams)
// O : an object outside the G1 heap (typically: in the perm gen) // O : an object outside the G1 heap (typically: in the perm gen)
@ -749,7 +751,7 @@ public:
// AND MARKED : indicates that an object is both explicitly and // AND MARKED : indicates that an object is both explicitly and
// implicitly live (it should be one or the other, not both) // implicitly live (it should be one or the other, not both)
void print_reachable(const char* str, void print_reachable(const char* str,
bool use_prev_marking, bool all) PRODUCT_RETURN; VerifyOption vo, bool all) PRODUCT_RETURN;
// Clear the next marking bitmap (will be called concurrently). // Clear the next marking bitmap (will be called concurrently).
void clearNextBitmap(); void clearNextBitmap();
@ -809,10 +811,19 @@ public:
// It indicates that a new collection set is being chosen. // It indicates that a new collection set is being chosen.
void newCSet(); void newCSet();
// It registers a collection set heap region with CM. This is used // It registers a collection set heap region with CM. This is used
// to determine whether any heap regions are located above the finger. // to determine whether any heap regions are located above the finger.
void registerCSetRegion(HeapRegion* hr); void registerCSetRegion(HeapRegion* hr);
// Resets the region fields of any active CMTask whose region fields
// are in the collection set (i.e. the region currently claimed by
// the CMTask will be evacuated and may be used, subsequently, as
// an alloc region). When this happens the region fields in the CMTask
// are stale and, hence, should be cleared causing the worker thread
// to claim a new region.
void reset_active_task_region_fields_in_cset();
// Registers the maximum region-end associated with a set of // Registers the maximum region-end associated with a set of
// regions with CM. Again this is used to determine whether any // regions with CM. Again this is used to determine whether any
// heap regions are located above the finger. // heap regions are located above the finger.
@ -822,9 +833,10 @@ public:
// _min_finger then we need to gray objects. // _min_finger then we need to gray objects.
// This routine is like registerCSetRegion but for an entire // This routine is like registerCSetRegion but for an entire
// collection of regions. // collection of regions.
if (max_finger > _min_finger) if (max_finger > _min_finger) {
_should_gray_objects = true; _should_gray_objects = true;
} }
}
// Returns "true" if at least one mark has been completed. // Returns "true" if at least one mark has been completed.
bool at_least_one_mark_complete() { return _at_least_one_mark_complete; } bool at_least_one_mark_complete() { return _at_least_one_mark_complete; }
@ -869,14 +881,18 @@ public:
// The following indicate whether a given verbose level has been // The following indicate whether a given verbose level has been
// set. Notice that anything above stats is conditional to // set. Notice that anything above stats is conditional to
// _MARKING_VERBOSE_ having been set to 1 // _MARKING_VERBOSE_ having been set to 1
bool verbose_stats() bool verbose_stats() {
{ return _verbose_level >= stats_verbose; } return _verbose_level >= stats_verbose;
bool verbose_low() }
{ return _MARKING_VERBOSE_ && _verbose_level >= low_verbose; } bool verbose_low() {
bool verbose_medium() return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
{ return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose; } }
bool verbose_high() bool verbose_medium() {
{ return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; } return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
}
bool verbose_high() {
return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
}
}; };
// A class representing a marking task. // A class representing a marking task.
@ -919,7 +935,7 @@ private:
double _start_time_ms; double _start_time_ms;
// the oop closure used for iterations over oops // the oop closure used for iterations over oops
OopClosure* _oop_closure; G1CMOopClosure* _cm_oop_closure;
// the region this task is scanning, NULL if we're not scanning any // the region this task is scanning, NULL if we're not scanning any
HeapRegion* _curr_region; HeapRegion* _curr_region;
@ -1039,9 +1055,6 @@ private:
void setup_for_region(HeapRegion* hr); void setup_for_region(HeapRegion* hr);
// it brings up-to-date the limit of the region // it brings up-to-date the limit of the region
void update_region_limit(); void update_region_limit();
// it resets the local fields after a task has finished scanning a
// region
void giveup_current_region();
// called when either the words scanned or the refs visited limit // called when either the words scanned or the refs visited limit
// has been reached // has been reached
@ -1055,9 +1068,10 @@ private:
// respective limit and calls reached_limit() if they have // respective limit and calls reached_limit() if they have
void check_limits() { void check_limits() {
if (_words_scanned >= _words_scanned_limit || if (_words_scanned >= _words_scanned_limit ||
_refs_reached >= _refs_reached_limit) _refs_reached >= _refs_reached_limit) {
reached_limit(); reached_limit();
} }
}
// this is supposed to be called regularly during a marking step as // this is supposed to be called regularly during a marking step as
// it checks a bunch of conditions that might cause the marking step // it checks a bunch of conditions that might cause the marking step
// to abort // to abort
@ -1094,6 +1108,11 @@ public:
// exit the termination protocol after it's entered it. // exit the termination protocol after it's entered it.
virtual bool should_exit_termination(); virtual bool should_exit_termination();
// Resets the local region fields after a task has finished scanning a
// region; or when they have become stale as a result of the region
// being evacuated.
void giveup_current_region();
HeapWord* finger() { return _finger; } HeapWord* finger() { return _finger; }
bool has_aborted() { return _has_aborted; } bool has_aborted() { return _has_aborted; }
@ -1111,32 +1130,17 @@ public:
// Clears any recorded partially scanned region // Clears any recorded partially scanned region
void clear_aborted_region() { set_aborted_region(MemRegion()); } void clear_aborted_region() { set_aborted_region(MemRegion()); }
void set_oop_closure(OopClosure* oop_closure) { void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
_oop_closure = oop_closure;
}
// It grays the object by marking it and, if necessary, pushing it // It grays the object by marking it and, if necessary, pushing it
// on the local queue // on the local queue
void deal_with_reference(oop obj); inline void deal_with_reference(oop obj);
// It scans an object and visits its children. // It scans an object and visits its children.
void scan_object(oop obj) { void scan_object(oop obj);
assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
if (_cm->verbose_high())
gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT,
_task_id, (void*) obj);
size_t obj_size = obj->size();
_words_scanned += obj_size;
obj->oop_iterate(_oop_closure);
statsOnly( ++_objs_scanned );
check_limits();
}
// It pushes an object on the local queue. // It pushes an object on the local queue.
void push(oop obj); inline void push(oop obj);
// These two move entries to/from the global stack. // These two move entries to/from the global stack.
void move_entries_to_global_stack(); void move_entries_to_global_stack();

View file

@ -0,0 +1,156 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
inline void CMTask::push(oop obj) {
HeapWord* objAddr = (HeapWord*) obj;
assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
assert(!_g1h->is_on_master_free_list(
_g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
assert(!_g1h->is_obj_ill(obj), "invariant");
assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj);
}
if (!_task_queue->push(obj)) {
// The local task queue looks full. We need to push some entries
// to the global stack.
if (_cm->verbose_medium()) {
gclog_or_tty->print_cr("[%d] task queue overflow, "
"moving entries to the global stack",
_task_id);
}
move_entries_to_global_stack();
// this should succeed since, even if we overflow the global
// stack, we should have definitely removed some entries from the
// local queue. So, there must be space on it.
bool success = _task_queue->push(obj);
assert(success, "invariant");
}
statsOnly( int tmp_size = _task_queue->size();
if (tmp_size > _local_max_size) {
_local_max_size = tmp_size;
}
++_local_pushes );
}
// This determines whether the method below will check both the local
// and global fingers when determining whether to push on the stack a
// gray object (value 1) or whether it will only check the global one
// (value 0). The tradeoffs are that the former will be a bit more
// accurate and possibly push less on the stack, but it might also be
// a little bit slower.
#define _CHECK_BOTH_FINGERS_ 1
inline void CMTask::deal_with_reference(oop obj) {
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%d] we're dealing with reference = "PTR_FORMAT,
_task_id, (void*) obj);
}
++_refs_reached;
HeapWord* objAddr = (HeapWord*) obj;
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
if (_g1h->is_in_g1_reserved(objAddr)) {
assert(obj != NULL, "null check is implicit");
if (!_nextMarkBitMap->isMarked(objAddr)) {
// Only get the containing region if the object is not marked on the
// bitmap (otherwise, it's a waste of time since we won't do
// anything with it).
HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
if (!hr->obj_allocated_since_next_marking(obj)) {
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%d] "PTR_FORMAT" is not considered marked",
_task_id, (void*) obj);
}
// we need to mark it first
if (_nextMarkBitMap->parMark(objAddr)) {
// No OrderAccess:store_load() is needed. It is implicit in the
// CAS done in parMark(objAddr) above
HeapWord* global_finger = _cm->finger();
#if _CHECK_BOTH_FINGERS_
// we will check both the local and global fingers
if (_finger != NULL && objAddr < _finger) {
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%d] below the local finger ("PTR_FORMAT"), "
"pushing it", _task_id, _finger);
}
push(obj);
} else if (_curr_region != NULL && objAddr < _region_limit) {
// do nothing
} else if (objAddr < global_finger) {
// Notice that the global finger might be moving forward
// concurrently. This is not a problem. In the worst case, we
// mark the object while it is above the global finger and, by
// the time we read the global finger, it has moved forward
// passed this object. In this case, the object will probably
// be visited when a task is scanning the region and will also
// be pushed on the stack. So, some duplicate work, but no
// correctness problems.
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%d] below the global finger "
"("PTR_FORMAT"), pushing it",
_task_id, global_finger);
}
push(obj);
} else {
// do nothing
}
#else // _CHECK_BOTH_FINGERS_
// we will only check the global finger
if (objAddr < global_finger) {
// see long comment above
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%d] below the global finger "
"("PTR_FORMAT"), pushing it",
_task_id, global_finger);
}
push(obj);
}
#endif // _CHECK_BOTH_FINGERS_
}
}
}
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP

File diff suppressed because it is too large Load diff

View file

@ -27,8 +27,10 @@
#include "gc_implementation/g1/concurrentMark.hpp" #include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp" #include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1HRPrinter.hpp"
#include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp" #include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp" #include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp" #include "gc_implementation/shared/hSpaceCounters.hpp"
#include "gc_implementation/parNew/parGCAllocBuffer.hpp" #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
@ -42,7 +44,6 @@
// heap subsets that will yield large amounts of garbage. // heap subsets that will yield large amounts of garbage.
class HeapRegion; class HeapRegion;
class HeapRegionSeq;
class HRRSCleanupTask; class HRRSCleanupTask;
class PermanentGenerationSpec; class PermanentGenerationSpec;
class GenerationSpec; class GenerationSpec;
@ -103,6 +104,19 @@ public:
size_t length() { return _length; } size_t length() { return _length; }
size_t survivor_length() { return _survivor_length; } size_t survivor_length() { return _survivor_length; }
// Currently we do not keep track of the used byte sum for the
// young list and the survivors and it'd be quite a lot of work to
// do so. When we'll eventually replace the young list with
// instances of HeapRegionLinkedList we'll get that for free. So,
// we'll report the more accurate information then.
size_t eden_used_bytes() {
assert(length() >= survivor_length(), "invariant");
return (length() - survivor_length()) * HeapRegion::GrainBytes;
}
size_t survivor_used_bytes() {
return survivor_length() * HeapRegion::GrainBytes;
}
void rs_length_sampling_init(); void rs_length_sampling_init();
bool rs_length_sampling_more(); bool rs_length_sampling_more();
void rs_length_sampling_next(); void rs_length_sampling_next();
@ -183,9 +197,6 @@ private:
// The part of _g1_storage that is currently committed. // The part of _g1_storage that is currently committed.
MemRegion _g1_committed; MemRegion _g1_committed;
// The maximum part of _g1_storage that has ever been committed.
MemRegion _g1_max_committed;
// The master free list. It will satisfy all new region allocations. // The master free list. It will satisfy all new region allocations.
MasterFreeRegionList _free_list; MasterFreeRegionList _free_list;
@ -209,7 +220,7 @@ private:
void rebuild_region_lists(); void rebuild_region_lists();
// The sequence of all heap regions in the heap. // The sequence of all heap regions in the heap.
HeapRegionSeq* _hrs; HeapRegionSeq _hrs;
// Alloc region used to satisfy mutator allocation requests. // Alloc region used to satisfy mutator allocation requests.
MutatorAllocRegion _mutator_alloc_region; MutatorAllocRegion _mutator_alloc_region;
@ -288,6 +299,8 @@ private:
size_t* _surviving_young_words; size_t* _surviving_young_words;
G1HRPrinter _hr_printer;
void setup_surviving_young_words(); void setup_surviving_young_words();
void update_surviving_young_words(size_t* surv_young_words); void update_surviving_young_words(size_t* surv_young_words);
void cleanup_surviving_young_words(); void cleanup_surviving_young_words();
@ -408,13 +421,15 @@ protected:
// Attempt to satisfy a humongous allocation request of the given // Attempt to satisfy a humongous allocation request of the given
// size by finding a contiguous set of free regions of num_regions // size by finding a contiguous set of free regions of num_regions
// length and remove them from the master free list. Return the // length and remove them from the master free list. Return the
// index of the first region or -1 if the search was unsuccessful. // index of the first region or G1_NULL_HRS_INDEX if the search
int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size); // was unsuccessful.
size_t humongous_obj_allocate_find_first(size_t num_regions,
size_t word_size);
// Initialize a contiguous set of free regions of length num_regions // Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single // and starting at index first so that they appear as a single
// humongous region. // humongous region.
HeapWord* humongous_obj_allocate_initialize_regions(int first, HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
size_t num_regions, size_t num_regions,
size_t word_size); size_t word_size);
@ -434,8 +449,7 @@ protected:
// * All allocation requests for new TLABs should go to // * All allocation requests for new TLABs should go to
// allocate_new_tlab(). // allocate_new_tlab().
// //
// * All non-TLAB allocation requests should go to mem_allocate() // * All non-TLAB allocation requests should go to mem_allocate().
// and mem_allocate() should never be called with is_tlab == true.
// //
// * If either call cannot satisfy the allocation request using the // * If either call cannot satisfy the allocation request using the
// current allocating region, they will try to get a new one. If // current allocating region, they will try to get a new one. If
@ -455,8 +469,6 @@ protected:
virtual HeapWord* allocate_new_tlab(size_t word_size); virtual HeapWord* allocate_new_tlab(size_t word_size);
virtual HeapWord* mem_allocate(size_t word_size, virtual HeapWord* mem_allocate(size_t word_size,
bool is_noref,
bool is_tlab, /* expected to be false */
bool* gc_overhead_limit_was_exceeded); bool* gc_overhead_limit_was_exceeded);
// The following three methods take a gc_count_before_ret // The following three methods take a gc_count_before_ret
@ -574,8 +586,8 @@ public:
void register_region_with_in_cset_fast_test(HeapRegion* r) { void register_region_with_in_cset_fast_test(HeapRegion* r) {
assert(_in_cset_fast_test_base != NULL, "sanity"); assert(_in_cset_fast_test_base != NULL, "sanity");
assert(r->in_collection_set(), "invariant"); assert(r->in_collection_set(), "invariant");
int index = r->hrs_index(); size_t index = r->hrs_index();
assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant"); assert(index < _in_cset_fast_test_length, "invariant");
assert(!_in_cset_fast_test_base[index], "invariant"); assert(!_in_cset_fast_test_base[index], "invariant");
_in_cset_fast_test_base[index] = true; _in_cset_fast_test_base[index] = true;
} }
@ -626,6 +638,8 @@ public:
return _full_collections_completed; return _full_collections_completed;
} }
G1HRPrinter* hr_printer() { return &_hr_printer; }
protected: protected:
// Shrink the garbage-first heap by at most the given size (in bytes!). // Shrink the garbage-first heap by at most the given size (in bytes!).
@ -741,6 +755,11 @@ protected:
HumongousRegionSet* humongous_proxy_set, HumongousRegionSet* humongous_proxy_set,
bool par); bool par);
// Notifies all the necessary spaces that the committed space has
// been updated (either expanded or shrunk). It should be called
// after _g1_storage is updated.
void update_committed_space(HeapWord* old_end, HeapWord* new_end);
// The concurrent marker (and the thread it runs in.) // The concurrent marker (and the thread it runs in.)
ConcurrentMark* _cm; ConcurrentMark* _cm;
ConcurrentMarkThread* _cmThread; ConcurrentMarkThread* _cmThread;
@ -803,7 +822,6 @@ protected:
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
void handle_evacuation_failure_common(oop obj, markOop m); void handle_evacuation_failure_common(oop obj, markOop m);
// Ensure that the relevant gc_alloc regions are set. // Ensure that the relevant gc_alloc regions are set.
void get_gc_alloc_regions(); void get_gc_alloc_regions();
// We're done with GC alloc regions. We are going to tear down the // We're done with GC alloc regions. We are going to tear down the
@ -954,15 +972,13 @@ public:
} }
// The total number of regions in the heap. // The total number of regions in the heap.
size_t n_regions(); size_t n_regions() { return _hrs.length(); }
// The max number of regions in the heap.
size_t max_regions() { return _hrs.max_length(); }
// The number of regions that are completely free. // The number of regions that are completely free.
size_t max_regions(); size_t free_regions() { return _free_list.length(); }
// The number of regions that are completely free.
size_t free_regions() {
return _free_list.length();
}
// The number of regions that are not completely free. // The number of regions that are not completely free.
size_t used_regions() { return n_regions() - free_regions(); } size_t used_regions() { return n_regions() - free_regions(); }
@ -970,6 +986,10 @@ public:
// The number of regions available for "regular" expansion. // The number of regions available for "regular" expansion.
size_t expansion_regions() { return _expansion_regions; } size_t expansion_regions() { return _expansion_regions; }
// Factory method for HeapRegion instances. It will return NULL if
// the allocation fails.
HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
@ -1131,17 +1151,15 @@ public:
// Iterate over heap regions, in address order, terminating the // Iterate over heap regions, in address order, terminating the
// iteration early if the "doHeapRegion" method returns "true". // iteration early if the "doHeapRegion" method returns "true".
void heap_region_iterate(HeapRegionClosure* blk); void heap_region_iterate(HeapRegionClosure* blk) const;
// Iterate over heap regions starting with r (or the first region if "r" // Iterate over heap regions starting with r (or the first region if "r"
// is NULL), in address order, terminating early if the "doHeapRegion" // is NULL), in address order, terminating early if the "doHeapRegion"
// method returns "true". // method returns "true".
void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk); void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
// As above but starting from the region at index idx. // Return the region with the given index. It assumes the index is valid.
void heap_region_iterate_from(int idx, HeapRegionClosure* blk); HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
HeapRegion* region_at(size_t idx);
// Divide the heap region sequence into "chunks" of some size (the number // Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some // of regions divided by the number of parallel threads times some
@ -1182,12 +1200,14 @@ public:
// A G1CollectedHeap will contain some number of heap regions. This // A G1CollectedHeap will contain some number of heap regions. This
// finds the region containing a given address, or else returns NULL. // finds the region containing a given address, or else returns NULL.
HeapRegion* heap_region_containing(const void* addr) const; template <class T>
inline HeapRegion* heap_region_containing(const T addr) const;
// Like the above, but requires "addr" to be in the heap (to avoid a // Like the above, but requires "addr" to be in the heap (to avoid a
// null-check), and unlike the above, may return an continuing humongous // null-check), and unlike the above, may return an continuing humongous
// region. // region.
HeapRegion* heap_region_containing_raw(const void* addr) const; template <class T>
inline HeapRegion* heap_region_containing_raw(const T addr) const;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is, // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
// each address in the (reserved) heap is a member of exactly // each address in the (reserved) heap is a member of exactly
@ -1249,11 +1269,17 @@ public:
return true; return true;
} }
bool is_in_young(oop obj) { bool is_in_young(const oop obj) {
HeapRegion* hr = heap_region_containing(obj); HeapRegion* hr = heap_region_containing(obj);
return hr != NULL && hr->is_young(); return hr != NULL && hr->is_young();
} }
#ifdef ASSERT
virtual bool is_in_partial_collection(const void* p);
#endif
virtual bool is_scavengable(const void* addr);
// We don't need barriers for initializing stores to objects // We don't need barriers for initializing stores to objects
// in the young gen: for the SATB pre-barrier, there is no // in the young gen: for the SATB pre-barrier, there is no
// pre-value that needs to be remembered; for the remembered-set // pre-value that needs to be remembered; for the remembered-set
@ -1280,10 +1306,6 @@ public:
return true; return true;
} }
// The boundary between a "large" and "small" array of primitives, in
// words.
virtual size_t large_typearray_limit();
// Returns "true" iff the given word_size is "very large". // Returns "true" iff the given word_size is "very large".
static bool isHumongous(size_t word_size) { static bool isHumongous(size_t word_size) {
// Note this has to be strictly greater-than as the TLABs // Note this has to be strictly greater-than as the TLABs
@ -1323,14 +1345,20 @@ public:
// Perform verification. // Perform verification.
// use_prev_marking == true -> use "prev" marking information, // vo == UsePrevMarking -> use "prev" marking information,
// use_prev_marking == false -> use "next" marking information // vo == UseNextMarking -> use "next" marking information
// vo == UseMarkWord -> use the mark word in the object header
//
// NOTE: Only the "prev" marking information is guaranteed to be // NOTE: Only the "prev" marking information is guaranteed to be
// consistent most of the time, so most calls to this should use // consistent most of the time, so most calls to this should use
// use_prev_marking == true. Currently, there is only one case where // vo == UsePrevMarking.
// this is called with use_prev_marking == false, which is to verify // Currently, there is only one case where this is called with
// the "next" marking information at the end of remark. // vo == UseNextMarking, which is to verify the "next" marking
void verify(bool allow_dirty, bool silent, bool use_prev_marking); // information at the end of remark.
// Currently there is only one place where this is called with
// vo == UseMarkWord, which is to verify the marking during a
// full GC.
void verify(bool allow_dirty, bool silent, VerifyOption vo);
// Override; it uses the "prev" marking information // Override; it uses the "prev" marking information
virtual void verify(bool allow_dirty, bool silent); virtual void verify(bool allow_dirty, bool silent);
@ -1349,10 +1377,9 @@ public:
// Override // Override
void print_tracing_info() const; void print_tracing_info() const;
// If "addr" is a pointer into the (reserved?) heap, returns a positive // The following two methods are helpful for debugging RSet issues.
// number indicating the "arena" within the heap in which "addr" falls. void print_cset_rsets() PRODUCT_RETURN;
// Or else returns 0. void print_all_rsets() PRODUCT_RETURN;
virtual int addr_to_arena_id(void* addr) const;
// Convenience function to be used in situations where the heap type can be // Convenience function to be used in situations where the heap type can be
// asserted to be this type. // asserted to be this type.
@ -1383,24 +1410,27 @@ public:
// bitmap off to the side. // bitmap off to the side.
void doConcurrentMark(); void doConcurrentMark();
// This is called from the marksweep collector which then does // Do a full concurrent marking, synchronously.
// a concurrent mark and verifies that the results agree with
// the stop the world marking.
void checkConcurrentMark();
void do_sync_mark(); void do_sync_mark();
bool isMarkedPrev(oop obj) const; bool isMarkedPrev(oop obj) const;
bool isMarkedNext(oop obj) const; bool isMarkedNext(oop obj) const;
// use_prev_marking == true -> use "prev" marking information, // vo == UsePrevMarking -> use "prev" marking information,
// use_prev_marking == false -> use "next" marking information // vo == UseNextMarking -> use "next" marking information,
// vo == UseMarkWord -> use mark word from object header
bool is_obj_dead_cond(const oop obj, bool is_obj_dead_cond(const oop obj,
const HeapRegion* hr, const HeapRegion* hr,
const bool use_prev_marking) const { const VerifyOption vo) const {
if (use_prev_marking) {
switch (vo) {
case VerifyOption_G1UsePrevMarking:
return is_obj_dead(obj, hr); return is_obj_dead(obj, hr);
} else { case VerifyOption_G1UseNextMarking:
return is_obj_ill(obj, hr); return is_obj_ill(obj, hr);
default:
assert(vo == VerifyOption_G1UseMarkWord, "must be");
return !obj->is_gc_marked();
} }
} }
@ -1441,18 +1471,24 @@ public:
// Added if it is in permanent gen it isn't dead. // Added if it is in permanent gen it isn't dead.
// Added if it is NULL it isn't dead. // Added if it is NULL it isn't dead.
// use_prev_marking == true -> use "prev" marking information, // vo == UsePrevMarking -> use "prev" marking information,
// use_prev_marking == false -> use "next" marking information // vo == UseNextMarking -> use "next" marking information,
// vo == UseMarkWord -> use mark word from object header
bool is_obj_dead_cond(const oop obj, bool is_obj_dead_cond(const oop obj,
const bool use_prev_marking) { const VerifyOption vo) const {
if (use_prev_marking) {
switch (vo) {
case VerifyOption_G1UsePrevMarking:
return is_obj_dead(obj); return is_obj_dead(obj);
} else { case VerifyOption_G1UseNextMarking:
return is_obj_ill(obj); return is_obj_ill(obj);
default:
assert(vo == VerifyOption_G1UseMarkWord, "must be");
return !obj->is_gc_marked();
} }
} }
bool is_obj_dead(const oop obj) { bool is_obj_dead(const oop obj) const {
const HeapRegion* hr = heap_region_containing(obj); const HeapRegion* hr = heap_region_containing(obj);
if (hr == NULL) { if (hr == NULL) {
if (Universe::heap()->is_in_permanent(obj)) if (Universe::heap()->is_in_permanent(obj))
@ -1463,7 +1499,7 @@ public:
else return is_obj_dead(obj, hr); else return is_obj_dead(obj, hr);
} }
bool is_obj_ill(const oop obj) { bool is_obj_ill(const oop obj) const {
const HeapRegion* hr = heap_region_containing(obj); const HeapRegion* hr = heap_region_containing(obj);
if (hr == NULL) { if (hr == NULL) {
if (Universe::heap()->is_in_permanent(obj)) if (Universe::heap()->is_in_permanent(obj))

View file

@ -34,9 +34,10 @@
// Inline functions for G1CollectedHeap // Inline functions for G1CollectedHeap
template <class T>
inline HeapRegion* inline HeapRegion*
G1CollectedHeap::heap_region_containing(const void* addr) const { G1CollectedHeap::heap_region_containing(const T addr) const {
HeapRegion* hr = _hrs->addr_to_region(addr); HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
// hr can be null if addr in perm_gen // hr can be null if addr in perm_gen
if (hr != NULL && hr->continuesHumongous()) { if (hr != NULL && hr->continuesHumongous()) {
hr = hr->humongous_start_region(); hr = hr->humongous_start_region();
@ -44,19 +45,16 @@ G1CollectedHeap::heap_region_containing(const void* addr) const {
return hr; return hr;
} }
template <class T>
inline HeapRegion* inline HeapRegion*
G1CollectedHeap::heap_region_containing_raw(const void* addr) const { G1CollectedHeap::heap_region_containing_raw(const T addr) const {
assert(_g1_reserved.contains(addr), "invariant"); assert(_g1_reserved.contains((const void*) addr), "invariant");
size_t index = pointer_delta(addr, _g1_reserved.start(), 1) HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
>> HeapRegion::LogOfHRGrainBytes;
HeapRegion* res = _hrs->at(index);
assert(res == _hrs->addr_to_region(addr), "sanity");
return res; return res;
} }
inline bool G1CollectedHeap::obj_in_cs(oop obj) { inline bool G1CollectedHeap::obj_in_cs(oop obj) {
HeapRegion* r = _hrs->addr_to_region(obj); HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
return r != NULL && r->in_collection_set(); return r != NULL && r->in_collection_set();
} }

Some files were not shown because too many files have changed in this diff Show more