This commit is contained in:
Karen Kinnear 2010-03-26 11:10:26 -04:00
commit 7aaaad73cf
241 changed files with 6074 additions and 1483 deletions

View file

@ -59,3 +59,4 @@ dcc938ac40cc45f1ef454d76020b5db5d943001c jdk7-b81
a30062be6d9ca1d48579826f870f85974300004e jdk7-b82 a30062be6d9ca1d48579826f870f85974300004e jdk7-b82
34c8199936a1682aa8587857f44cfaf37c2b6381 jdk7-b83 34c8199936a1682aa8587857f44cfaf37c2b6381 jdk7-b83
b1e55627a6980b9508854ed0c0f21d4f981b4494 jdk7-b84 b1e55627a6980b9508854ed0c0f21d4f981b4494 jdk7-b84
b6f633a93ae0ec4555ff4bf756f5e2150c9bdede jdk7-b85

View file

@ -59,3 +59,4 @@ a3242906c7747b5d9bcc3d118c7c3c69aa40f4b7 jdk7-b80
e1176f86805fe07fd9fb9da065dc51b47712ce76 jdk7-b82 e1176f86805fe07fd9fb9da065dc51b47712ce76 jdk7-b82
6880a3af9addb41541e80ebe8cde6f79ec402a58 jdk7-b83 6880a3af9addb41541e80ebe8cde6f79ec402a58 jdk7-b83
2f3ea057d1ad56cf3b269cdc4de2741411151982 jdk7-b84 2f3ea057d1ad56cf3b269cdc4de2741411151982 jdk7-b84
cf26288a114be67c39f2758959ce50b60f5ae330 jdk7-b85

View file

@ -59,3 +59,4 @@ e08a42a2a94d97ea8eedb187a94dbff822c8fbba jdk7-b81
1e8c1bfad1abb4b81407a0f2645e0fb85764ca48 jdk7-b82 1e8c1bfad1abb4b81407a0f2645e0fb85764ca48 jdk7-b82
fde0df7a2384f7fe33204a79678989807d9c2b98 jdk7-b83 fde0df7a2384f7fe33204a79678989807d9c2b98 jdk7-b83
68c8961a82e4a3ad2a67991e5d834192a81eb4cd jdk7-b84 68c8961a82e4a3ad2a67991e5d834192a81eb4cd jdk7-b84
c67a9df7bc0ca291f08f9a9cc05cb78ea15d25e6 jdk7-b85

View file

@ -81,3 +81,5 @@ ac59d4e6dae51ac5fc31a9a4940d1857f91161b1 hs16-b08
fafab5d5349c7c066d677538db67a1ee0fb33bd2 hs15-b05 fafab5d5349c7c066d677538db67a1ee0fb33bd2 hs15-b05
3f370a32906eb5ba993fabd7b4279be7f31052b9 jdk7-b83 3f370a32906eb5ba993fabd7b4279be7f31052b9 jdk7-b83
ffc8d176b84bcfb5ac21302b4feb3b0c0d69b97c jdk7-b84 ffc8d176b84bcfb5ac21302b4feb3b0c0d69b97c jdk7-b84
6c9796468b91dcbb39e09dfa1baf9779ac45eb66 jdk7-b85
418bc80ce13995149eadc9eecbba21d7a9fa02ae hs17-b10

View file

@ -174,7 +174,7 @@ lib_info* add_lib_info_fd(struct ps_prochandle* ph, const char* libname, int fd,
return NULL; return NULL;
} }
newlib->symtab = build_symtab(newlib->fd); newlib->symtab = build_symtab(newlib->fd, libname);
if (newlib->symtab == NULL) { if (newlib->symtab == NULL) {
print_debug("symbol table build failed for %s\n", newlib->name); print_debug("symbol table build failed for %s\n", newlib->name);
} }

View file

@ -53,8 +53,274 @@ typedef struct symtab {
struct hsearch_data *hash_table; struct hsearch_data *hash_table;
} symtab_t; } symtab_t;
// read symbol table from given fd.
struct symtab* build_symtab(int fd) { // Directory that contains global debuginfo files. In theory it
// should be possible to change this, but in a Java environment there
// is no obvious place to put a user interface to do it. Maybe this
// could be set with an environment variable.
static const char debug_file_directory[] = "/usr/lib/debug";
/* The CRC used in gnu_debuglink, retrieved from
http://sourceware.org/gdb/current/onlinedocs/gdb/Separate-Debug-Files.html#Separate-Debug-Files. */
unsigned int gnu_debuglink_crc32 (unsigned int crc,
unsigned char *buf, size_t len)
{
static const unsigned int crc32_table[256] =
{
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
0x2d02ef8d
};
unsigned char *end;
crc = ~crc & 0xffffffff;
for (end = buf + len; buf < end; ++buf)
crc = crc32_table[(crc ^ *buf) & 0xff] ^ (crc >> 8);
return ~crc & 0xffffffff;
}
/* Open a debuginfo file and check its CRC. If it exists and the CRC
matches return its fd. */
static int
open_debug_file (const char *pathname, unsigned int crc)
{
unsigned int file_crc = 0;
unsigned char buffer[8 * 1024];
int fd = pathmap_open(pathname);
if (fd < 0)
return -1;
lseek(fd, 0, SEEK_SET);
for (;;) {
int len = read(fd, buffer, sizeof buffer);
if (len <= 0)
break;
file_crc = gnu_debuglink_crc32(file_crc, buffer, len);
}
if (crc == file_crc)
return fd;
else {
close(fd);
return -1;
}
}
/* Find an ELF section. */
static struct elf_section *find_section_by_name(char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
ELF_SHDR* cursct = NULL;
char *strtab;
int cnt;
if (scn_cache[ehdr->e_shstrndx].c_data == NULL) {
if ((scn_cache[ehdr->e_shstrndx].c_data
= read_section_data(fd, ehdr, cursct)) == NULL) {
return NULL;
}
}
strtab = scn_cache[ehdr->e_shstrndx].c_data;
for (cursct = shbuf, cnt = 0;
cnt < ehdr->e_shnum;
cnt++, cursct++) {
if (strcmp(cursct->sh_name + strtab, name) == 0) {
scn_cache[cnt].c_data = read_section_data(fd, ehdr, cursct);
return &scn_cache[cnt];
}
}
return NULL;
}
/* Look for a ".gnu_debuglink" section. If one exists, try to open a
suitable debuginfo file. */
static int open_file_from_debug_link(const char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
int debug_fd;
struct elf_section *debug_link = find_section_by_name(".gnu_debuglink", fd, ehdr,
shbuf, scn_cache);
if (debug_link == NULL)
return -1;
char *debug_filename = debug_link->c_data;
int offset = (strlen(debug_filename) + 4) >> 2;
static unsigned int crc;
crc = ((unsigned int*)debug_link->c_data)[offset];
char *debug_pathname = malloc(strlen(debug_filename)
+ strlen(name)
+ strlen(".debug/")
+ strlen(debug_file_directory)
+ 2);
strcpy(debug_pathname, name);
char *last_slash = strrchr(debug_pathname, '/');
if (last_slash == NULL)
return -1;
/* Look in the same directory as the object. */
strcpy(last_slash+1, debug_filename);
debug_fd = open_debug_file(debug_pathname, crc);
if (debug_fd >= 0) {
free(debug_pathname);
return debug_fd;
}
/* Look in a subdirectory named ".debug". */
strcpy(last_slash+1, ".debug/");
strcat(last_slash, debug_filename);
debug_fd = open_debug_file(debug_pathname, crc);
if (debug_fd >= 0) {
free(debug_pathname);
return debug_fd;
}
/* Look in /usr/lib/debug + the full pathname. */
strcpy(debug_pathname, debug_file_directory);
strcat(debug_pathname, name);
last_slash = strrchr(debug_pathname, '/');
strcpy(last_slash+1, debug_filename);
debug_fd = open_debug_file(debug_pathname, crc);
if (debug_fd >= 0) {
free(debug_pathname);
return debug_fd;
}
free(debug_pathname);
return -1;
}
static struct symtab* build_symtab_internal(int fd, const char *filename, bool try_debuginfo);
/* Look for a ".gnu_debuglink" section. If one exists, try to open a
suitable debuginfo file and read a symbol table from it. */
static struct symtab *build_symtab_from_debug_link(const char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
fd = open_file_from_debug_link(name, fd, ehdr, shbuf, scn_cache);
if (fd >= 0) {
struct symtab *symtab = build_symtab_internal(fd, NULL, /* try_debuginfo */ false);
close(fd);
return symtab;
}
return NULL;
}
// Given a build_id, find the associated debuginfo file
static char *
build_id_to_debug_filename (size_t size, unsigned char *data)
{
char *filename, *s;
filename = malloc(strlen (debug_file_directory) + (sizeof "/.build-id/" - 1) + 1
+ 2 * size + (sizeof ".debug" - 1) + 1);
s = filename + sprintf (filename, "%s/.build-id/", debug_file_directory);
if (size > 0)
{
size--;
s += sprintf (s, "%02x", *data++);
}
if (size > 0)
*s++ = '/';
while (size-- > 0)
s += sprintf (s, "%02x", *data++);
strcpy (s, ".debug");
return filename;
}
// Read a build ID note. Try to open any associated debuginfo file
// and return its symtab
static struct symtab* build_symtab_from_build_id(Elf64_Nhdr *note)
{
int fd;
struct symtab *symtab = NULL;
unsigned char *bytes
= (unsigned char*)(note+1) + note->n_namesz;
unsigned char *filename
= (build_id_to_debug_filename (note->n_descsz, bytes));
fd = pathmap_open(filename);
if (fd >= 0) {
symtab = build_symtab_internal(fd, NULL, /* try_debuginfo */ false);
close(fd);
}
free(filename);
return symtab;
}
// read symbol table from given fd. If try_debuginfo) is true, also
// try to open an associated debuginfo file
static struct symtab* build_symtab_internal(int fd, const char *filename, bool try_debuginfo) {
ELF_EHDR ehdr; ELF_EHDR ehdr;
char *names = NULL; char *names = NULL;
struct symtab* symtab = NULL; struct symtab* symtab = NULL;
@ -66,6 +332,7 @@ struct symtab* build_symtab(int fd) {
ELF_SHDR* cursct = NULL; ELF_SHDR* cursct = NULL;
ELF_PHDR* phbuf = NULL; ELF_PHDR* phbuf = NULL;
ELF_PHDR* phdr = NULL; ELF_PHDR* phdr = NULL;
int sym_section = SHT_DYNSYM;
uintptr_t baseaddr = (uintptr_t)-1; uintptr_t baseaddr = (uintptr_t)-1;
@ -90,18 +357,23 @@ struct symtab* build_symtab(int fd) {
for (cursct = shbuf, cnt = 0; cnt < ehdr.e_shnum; cnt++) { for (cursct = shbuf, cnt = 0; cnt < ehdr.e_shnum; cnt++) {
scn_cache[cnt].c_shdr = cursct; scn_cache[cnt].c_shdr = cursct;
if (cursct->sh_type == SHT_SYMTAB || cursct->sh_type == SHT_STRTAB) { if (cursct->sh_type == SHT_SYMTAB || cursct->sh_type == SHT_STRTAB
|| cursct->sh_type == SHT_NOTE || cursct->sh_type == SHT_DYNSYM) {
if ( (scn_cache[cnt].c_data = read_section_data(fd, &ehdr, cursct)) == NULL) { if ( (scn_cache[cnt].c_data = read_section_data(fd, &ehdr, cursct)) == NULL) {
goto quit; goto quit;
} }
} }
if (cursct->sh_type == SHT_SYMTAB) {
// Full symbol table available so use that
sym_section = cursct->sh_type;
}
cursct++; cursct++;
} }
for (cnt = 1; cnt < ehdr.e_shnum; cnt++) { for (cnt = 1; cnt < ehdr.e_shnum; cnt++) {
ELF_SHDR *shdr = scn_cache[cnt].c_shdr; ELF_SHDR *shdr = scn_cache[cnt].c_shdr;
if (shdr->sh_type == SHT_SYMTAB) { if (shdr->sh_type == sym_section) {
ELF_SYM *syms; ELF_SYM *syms;
int j, n, rslt; int j, n, rslt;
size_t size; size_t size;
@ -163,6 +435,45 @@ struct symtab* build_symtab(int fd) {
} }
} }
// Look for a separate debuginfo file.
if (try_debuginfo) {
// We prefer a debug symtab to an object's own symtab, so look in
// the debuginfo file. We stash a copy of the old symtab in case
// there is no debuginfo.
struct symtab* prev_symtab = symtab;
symtab = NULL;
#ifdef NT_GNU_BUILD_ID
// First we look for a Build ID
for (cursct = shbuf, cnt = 0;
symtab == NULL && cnt < ehdr.e_shnum;
cnt++) {
if (cursct->sh_type == SHT_NOTE) {
Elf64_Nhdr *note = (Elf64_Nhdr *)scn_cache[cnt].c_data;
if (note->n_type == NT_GNU_BUILD_ID) {
symtab = build_symtab_from_build_id(note);
}
}
cursct++;
}
#endif
// Then, if that doesn't work, the debug link
if (symtab == NULL) {
symtab = build_symtab_from_debug_link(filename, fd, &ehdr, shbuf,
scn_cache);
}
// If we still haven't found a symtab, use the object's own symtab.
if (symtab != NULL) {
if (prev_symtab != NULL)
destroy_symtab(prev_symtab);
} else {
symtab = prev_symtab;
}
}
quit: quit:
if (shbuf) free(shbuf); if (shbuf) free(shbuf);
if (phbuf) free(phbuf); if (phbuf) free(phbuf);
@ -177,6 +488,11 @@ quit:
return symtab; return symtab;
} }
struct symtab* build_symtab(int fd, const char *filename) {
return build_symtab_internal(fd, filename, /* try_debuginfo */ true);
}
void destroy_symtab(struct symtab* symtab) { void destroy_symtab(struct symtab* symtab) {
if (!symtab) return; if (!symtab) return;
if (symtab->strs) free(symtab->strs); if (symtab->strs) free(symtab->strs);

View file

@ -32,7 +32,7 @@
struct symtab; struct symtab;
// build symbol table for a given ELF file descriptor // build symbol table for a given ELF file descriptor
struct symtab* build_symtab(int fd); struct symtab* build_symtab(int fd, const char *filename);
// destroy the symbol table // destroy the symbol table
void destroy_symtab(struct symtab* symtab); void destroy_symtab(struct symtab* symtab);

View file

@ -0,0 +1,5 @@
#!/bin/sh
nm --defined-only $* | awk '
{ if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 ";" }
'

View file

@ -290,6 +290,9 @@ SUNWprivate_1.1 {
# This is for Forte Analyzer profiling support. # This is for Forte Analyzer profiling support.
AsyncGetCallTrace; AsyncGetCallTrace;
# INSERT VTABLE SYMBOLS HERE
local: local:
*; *;
}; };

View file

@ -285,6 +285,9 @@ SUNWprivate_1.1 {
# This is for Forte Analyzer profiling support. # This is for Forte Analyzer profiling support.
AsyncGetCallTrace; AsyncGetCallTrace;
# INSERT VTABLE SYMBOLS HERE
local: local:
*; *;
}; };

View file

@ -121,14 +121,21 @@ JVM_OBJ_FILES = $(Obj_Files)
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES)) vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
mapfile : $(MAPFILE) mapfile : $(MAPFILE) vm.def
rm -f $@ rm -f $@
cat $^ > $@ awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \
{ system ("cat vm.def"); } \
else \
{ print $$0 } \
}' > $@ < $(MAPFILE)
mapfile_reorder : mapfile $(REORDERFILE) mapfile_reorder : mapfile $(REORDERFILE)
rm -f $@ rm -f $@
cat $^ > $@ cat $^ > $@
vm.def: $(Res_Files) $(Obj_Files)
sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
ifeq ($(ZERO_LIBARCH), ppc64) ifeq ($(ZERO_LIBARCH), ppc64)
STATIC_CXX = false STATIC_CXX = false
else else

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -377,6 +377,16 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
} }
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
__ call(SharedRuntime::deopt_blob()->unpack_with_reexecution());
__ delayed()->nop();
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
}
void ArrayCopyStub::emit_code(LIR_Assembler* ce) { void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
//---------------slow case: call to native----------------- //---------------slow case: call to native-----------------
__ bind(_entry); __ bind(_entry);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -143,3 +143,6 @@
static bool is_caller_save_register (LIR_Opr reg); static bool is_caller_save_register (LIR_Opr reg);
static bool is_caller_save_register (Register r); static bool is_caller_save_register (Register r);
// JSR 292
static LIR_Opr& method_handle_invoke_SP_save_opr() { return L7_opr; }

View file

@ -378,12 +378,7 @@ int LIR_Assembler::emit_exception_handler() {
int offset = code_offset(); int offset = code_offset();
if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) { __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
__ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
__ delayed()->nop();
}
__ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
__ delayed()->nop(); __ delayed()->nop();
debug_only(__ stop("should have gone to the caller");) debug_only(__ stop("should have gone to the caller");)
assert(code_offset() - offset <= exception_handler_size, "overflow"); assert(code_offset() - offset <= exception_handler_size, "overflow");
@ -685,29 +680,29 @@ void LIR_Assembler::align_call(LIR_Code) {
} }
void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) { void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
__ call(entry, rtype); __ call(op->addr(), rtype);
// the peephole pass fills the delay slot // the peephole pass fills the delay slot
} }
void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) { void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
RelocationHolder rspec = virtual_call_Relocation::spec(pc()); RelocationHolder rspec = virtual_call_Relocation::spec(pc());
__ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg); __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
__ relocate(rspec); __ relocate(rspec);
__ call(entry, relocInfo::none); __ call(op->addr(), relocInfo::none);
// the peephole pass fills the delay slot // the peephole pass fills the delay slot
} }
void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) { void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
add_debug_info_for_null_check_here(info); add_debug_info_for_null_check_here(op->info());
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch); __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
if (__ is_simm13(vtable_offset) ) { if (__ is_simm13(op->vtable_offset())) {
__ ld_ptr(G3_scratch, vtable_offset, G5_method); __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
} else { } else {
// This will generate 2 instructions // This will generate 2 instructions
__ set(vtable_offset, G5_method); __ set(op->vtable_offset(), G5_method);
// ld_ptr, set_hi, set // ld_ptr, set_hi, set
__ ld_ptr(G3_scratch, G5_method, G5_method); __ ld_ptr(G3_scratch, G5_method, G5_method);
} }
@ -717,6 +712,16 @@ void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
} }
void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
Unimplemented();
}
void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
Unimplemented();
}
// load with 32-bit displacement // load with 32-bit displacement
int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) { int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) {
int load_offset = code_offset(); int load_offset = code_offset();
@ -1067,7 +1072,8 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
LIR_Const* c = src->as_constant_ptr(); LIR_Const* c = src->as_constant_ptr();
switch (c->type()) { switch (c->type()) {
case T_INT: case T_INT:
case T_FLOAT: { case T_FLOAT:
case T_ADDRESS: {
Register src_reg = O7; Register src_reg = O7;
int value = c->as_jint_bits(); int value = c->as_jint_bits();
if (value == 0) { if (value == 0) {
@ -1123,7 +1129,8 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
} }
switch (c->type()) { switch (c->type()) {
case T_INT: case T_INT:
case T_FLOAT: { case T_FLOAT:
case T_ADDRESS: {
LIR_Opr tmp = FrameMap::O7_opr; LIR_Opr tmp = FrameMap::O7_opr;
int value = c->as_jint_bits(); int value = c->as_jint_bits();
if (value == 0) { if (value == 0) {
@ -1195,6 +1202,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
switch (c->type()) { switch (c->type()) {
case T_INT: case T_INT:
case T_ADDRESS:
{ {
jint con = c->as_jint(); jint con = c->as_jint();
if (to_reg->is_single_cpu()) { if (to_reg->is_single_cpu()) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -42,17 +42,6 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
} }
void C1_MacroAssembler::method_exit(bool restore_frame) {
// this code must be structured this way so that the return
// instruction can be a safepoint.
if (restore_frame) {
restore();
}
retl();
delayed()->nop();
}
void C1_MacroAssembler::explicit_null_check(Register base) { void C1_MacroAssembler::explicit_null_check(Register base) {
Unimplemented(); Unimplemented();
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -677,7 +677,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ add(I7, frame::pc_return_offset, Oissuing_pc->after_save()); __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save());
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
Oissuing_pc->after_save()); G2_thread, Oissuing_pc->after_save());
__ verify_not_null_oop(Oexception->after_save()); __ verify_not_null_oop(Oexception->after_save());
__ jmp(O0, 0); __ jmp(O0, 0);
__ delayed()->restore(); __ delayed()->restore();
@ -985,7 +985,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) { void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) {
Label no_deopt; Label no_deopt;
Label no_handler;
__ verify_not_null_oop(Oexception); __ verify_not_null_oop(Oexception);
@ -1003,9 +1002,14 @@ void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_map
// whether it had a handler or not we will deoptimize // whether it had a handler or not we will deoptimize
// by entering the deopt blob with a pending exception. // by entering the deopt blob with a pending exception.
#ifdef ASSERT
Label done;
__ tst(O0); __ tst(O0);
__ br(Assembler::zero, false, Assembler::pn, no_handler); __ br(Assembler::notZero, false, Assembler::pn, done);
__ delayed()->nop(); __ delayed()->nop();
__ stop("should have found address");
__ bind(done);
#endif
// restore the registers that were saved at the beginning and jump to the exception handler. // restore the registers that were saved at the beginning and jump to the exception handler.
restore_live_registers(sasm); restore_live_registers(sasm);
@ -1013,20 +1017,6 @@ void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_map
__ jmp(O0, 0); __ jmp(O0, 0);
__ delayed()->restore(); __ delayed()->restore();
__ bind(no_handler);
__ mov(L0, I7); // restore return address
// restore exception oop
__ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception->after_save());
__ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
__ restore();
AddressLiteral exc(Runtime1::entry_for(Runtime1::unwind_exception_id));
__ jump_to(exc, G4);
__ delayed()->nop();
oop_maps->add_gc_map(call_offset, oop_map); oop_maps->add_gc_map(call_offset, oop_map);
} }

View file

@ -244,9 +244,10 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg)
} }
void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
mov(arg_1, O0); mov(arg_1, O0);
MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 1); mov(arg_2, O1);
MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
} }
#endif /* CC_INTERP */ #endif /* CC_INTERP */

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -121,7 +121,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool check_exception = true); bool check_exception = true);
#ifndef CC_INTERP #ifndef CC_INTERP
void super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1); void super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2);
// Generate a subtype check: branch to ok_is_subtype if sub_klass is // Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers tmp1, tmp2 and tmp3. // a subtype of super_klass. Blows registers tmp1, tmp2 and tmp3.

View file

@ -379,7 +379,7 @@ class StubGenerator: public StubCodeGenerator {
__ save_frame(0); // compensates for compiler weakness __ save_frame(0); // compensates for compiler weakness
__ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
BLOCK_COMMENT("call exception_handler_for_return_address"); BLOCK_COMMENT("call exception_handler_for_return_address");
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Lscratch); __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch);
__ mov(O0, handler_reg); __ mov(O0, handler_reg);
__ restore(); // compensates for compiler weakness __ restore(); // compensates for compiler weakness

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -37,8 +37,13 @@ static bool returns_to_call_stub(address return_pc) {
enum /* platform_dependent_constants */ { enum /* platform_dependent_constants */ {
// %%%%%%%% May be able to shrink this a lot // %%%%%%%% May be able to shrink this a lot
code_size1 = 20000, // simply increase if too small (assembler will crash if too small) code_size1 = 20000, // simply increase if too small (assembler will crash if too small)
code_size2 = 20000 // simply increase if too small (assembler will crash if too small) code_size2 = 20000 // simply increase if too small (assembler will crash if too small)
};
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 5000
}; };
class Sparc { class Sparc {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1822,7 +1822,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller
__ super_call_VM_leaf(L7_thread_cache, __ super_call_VM_leaf(L7_thread_cache,
CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
Oissuing_pc->after_save()); G2_thread, Oissuing_pc->after_save());
// The caller's SP was adjusted upon method entry to accomodate // The caller's SP was adjusted upon method entry to accomodate
// the callee's non-argument locals. Undo that adjustment. // the callee's non-argument locals. Undo that adjustment.

View file

@ -8460,6 +8460,7 @@ void MacroAssembler::string_indexof(Register str1, Register str2,
subptr(str1, result); // Restore counter subptr(str1, result); // Restore counter
shrl(str1, 1); shrl(str1, 1);
addl(cnt1, str1); addl(cnt1, str1);
decrementl(cnt1);
lea(str1, Address(result, 2)); // Reload string lea(str1, Address(result, 2)); // Reload string
// Load substr // Load substr

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -373,6 +373,14 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
} }
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
__ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack_with_reexecution()));
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
}
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry); __ bind(_entry);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -126,3 +126,6 @@
assert(i >= 0 && i < nof_caller_save_xmm_regs, "out of bounds"); assert(i >= 0 && i < nof_caller_save_xmm_regs, "out of bounds");
return _caller_save_xmm_regs[i]; return _caller_save_xmm_regs[i];
} }
// JSR 292
static LIR_Opr& method_handle_invoke_SP_save_opr() { return rbp_opr; }

View file

@ -436,40 +436,18 @@ int LIR_Assembler::emit_exception_handler() {
int offset = code_offset(); int offset = code_offset();
// if the method does not have an exception handler, then there is // the exception oop and pc are in rax, and rdx
// no reason to search for one
if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) {
// the exception oop and pc are in rax, and rdx
// no other registers need to be preserved, so invalidate them
__ invalidate_registers(false, true, true, false, true, true);
// check that there is really an exception
__ verify_not_null_oop(rax);
// search an exception handler (rax: exception oop, rdx: throwing pc)
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
// if the call returns here, then the exception handler for particular
// exception doesn't exist -> unwind activation and forward exception to caller
}
// the exception oop is in rax,
// no other registers need to be preserved, so invalidate them // no other registers need to be preserved, so invalidate them
__ invalidate_registers(false, true, true, true, true, true); __ invalidate_registers(false, true, true, false, true, true);
// check that there is really an exception // check that there is really an exception
__ verify_not_null_oop(rax); __ verify_not_null_oop(rax);
// unlock the receiver/klass if necessary // search an exception handler (rax: exception oop, rdx: throwing pc)
// rax,: exception __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
ciMethod* method = compilation()->method();
if (method->is_synchronized() && GenerateSynchronizationCode) { __ stop("should not reach here");
monitorexit(FrameMap::rbx_oop_opr, FrameMap::rcx_opr, SYNC_header, 0, rax);
}
// unwind activation and forward exception to caller
// rax,: exception
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
assert(code_offset() - offset <= exception_handler_size, "overflow"); assert(code_offset() - offset <= exception_handler_size, "overflow");
__ end_a_stub(); __ end_a_stub();
@ -495,8 +473,10 @@ int LIR_Assembler::emit_deopt_handler() {
int offset = code_offset(); int offset = code_offset();
InternalAddress here(__ pc()); InternalAddress here(__ pc());
__ pushptr(here.addr()); __ pushptr(here.addr());
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
assert(code_offset() - offset <= deopt_handler_size, "overflow"); assert(code_offset() - offset <= deopt_handler_size, "overflow");
__ end_a_stub(); __ end_a_stub();
@ -593,7 +573,7 @@ void LIR_Assembler::return_op(LIR_Opr result) {
} }
// Pop the stack before the safepoint code // Pop the stack before the safepoint code
__ leave(); __ remove_frame(initial_frame_size_in_bytes());
bool result_is_oop = result->is_valid() ? result->is_oop() : false; bool result_is_oop = result->is_valid() ? result->is_oop() : false;
@ -648,7 +628,8 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
LIR_Const* c = src->as_constant_ptr(); LIR_Const* c = src->as_constant_ptr();
switch (c->type()) { switch (c->type()) {
case T_INT: { case T_INT:
case T_ADDRESS: {
assert(patch_code == lir_patch_none, "no patching handled here"); assert(patch_code == lir_patch_none, "no patching handled here");
__ movl(dest->as_register(), c->as_jint()); __ movl(dest->as_register(), c->as_jint());
break; break;
@ -731,6 +712,7 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
switch (c->type()) { switch (c->type()) {
case T_INT: // fall through case T_INT: // fall through
case T_FLOAT: case T_FLOAT:
case T_ADDRESS:
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
break; break;
@ -766,6 +748,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
switch (type) { switch (type) {
case T_INT: // fall through case T_INT: // fall through
case T_FLOAT: case T_FLOAT:
case T_ADDRESS:
__ movl(as_Address(addr), c->as_jint_bits()); __ movl(as_Address(addr), c->as_jint_bits());
break; break;
@ -2738,6 +2721,7 @@ void LIR_Assembler::align_call(LIR_Code code) {
switch (code) { switch (code) {
case lir_static_call: case lir_static_call:
case lir_optvirtual_call: case lir_optvirtual_call:
case lir_dynamic_call:
offset += NativeCall::displacement_offset; offset += NativeCall::displacement_offset;
break; break;
case lir_icvirtual_call: case lir_icvirtual_call:
@ -2753,30 +2737,41 @@ void LIR_Assembler::align_call(LIR_Code code) {
} }
void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) { void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned"); "must be aligned");
__ call(AddressLiteral(entry, rtype)); __ call(AddressLiteral(op->addr(), rtype));
add_call_info(code_offset(), info); add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
} }
void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) { void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
RelocationHolder rh = virtual_call_Relocation::spec(pc()); RelocationHolder rh = virtual_call_Relocation::spec(pc());
__ movoop(IC_Klass, (jobject)Universe::non_oop_word()); __ movoop(IC_Klass, (jobject)Universe::non_oop_word());
assert(!os::is_MP() || assert(!os::is_MP() ||
(__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned"); "must be aligned");
__ call(AddressLiteral(entry, rh)); __ call(AddressLiteral(op->addr(), rh));
add_call_info(code_offset(), info); add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
} }
/* Currently, vtable-dispatch is only enabled for sparc platforms */ /* Currently, vtable-dispatch is only enabled for sparc platforms */
void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) { void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
ShouldNotReachHere(); ShouldNotReachHere();
} }
void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
__ movptr(FrameMap::method_handle_invoke_SP_save_opr()->as_register(), rsp);
}
void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
__ movptr(rsp, FrameMap::method_handle_invoke_SP_save_opr()->as_register());
}
void LIR_Assembler::emit_static_call_stub() { void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc(); address call_pc = __ pc();
address stub = __ start_a_stub(call_stub_size); address stub = __ start_a_stub(call_stub_size);
@ -2829,10 +2824,12 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
} else { } else {
unwind_id = Runtime1::handle_exception_nofpu_id; unwind_id = Runtime1::handle_exception_nofpu_id;
} }
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
} else { } else {
unwind_id = Runtime1::unwind_exception_id; // remove the activation
__ remove_frame(initial_frame_size_in_bytes());
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
} }
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
// enough room for two byte trap // enough room for two byte trap
__ nop(); __ nop();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -317,14 +317,6 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
} }
void C1_MacroAssembler::method_exit(bool restore_frame) {
if (restore_frame) {
leave();
}
ret(0);
}
void C1_MacroAssembler::build_frame(int frame_size_in_bytes) { void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
// Make sure there is enough stack space for this method's activation. // Make sure there is enough stack space for this method's activation.
// Note that we do this before doing an enter(). This matches the // Note that we do this before doing an enter(). This matches the
@ -333,7 +325,7 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
// between the two compilers. // between the two compilers.
generate_stack_overflow_check(frame_size_in_bytes); generate_stack_overflow_check(frame_size_in_bytes);
enter(); push(rbp);
#ifdef TIERED #ifdef TIERED
// c2 leaves fpu stack dirty. Clean it on entry // c2 leaves fpu stack dirty. Clean it on entry
if (UseSSE < 2 ) { if (UseSSE < 2 ) {
@ -344,6 +336,12 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
} }
void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) {
increment(rsp, frame_size_in_bytes); // Does not emit code for frame_size == 0
pop(rbp);
}
void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) { void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
if (C1Breakpoint) int3(); if (C1Breakpoint) int3();
inline_cache_check(receiver, ic_klass); inline_cache_check(receiver, ic_klass);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -688,18 +688,21 @@ void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_map
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
oop_maps->add_gc_map(call_offset, oop_map); oop_maps->add_gc_map(call_offset, oop_map);
// rax,: handler address or NULL if no handler exists // rax,: handler address
// will be the deopt blob if nmethod was deoptimized while we looked up // will be the deopt blob if nmethod was deoptimized while we looked up
// handler regardless of whether handler existed in the nmethod. // handler regardless of whether handler existed in the nmethod.
// only rax, is valid at this time, all other registers have been destroyed by the runtime call // only rax, is valid at this time, all other registers have been destroyed by the runtime call
__ invalidate_registers(false, true, true, true, true, true); __ invalidate_registers(false, true, true, true, true, true);
#ifdef ASSERT
// Do we have an exception handler in the nmethod? // Do we have an exception handler in the nmethod?
Label no_handler;
Label done; Label done;
__ testptr(rax, rax); __ testptr(rax, rax);
__ jcc(Assembler::zero, no_handler); __ jcc(Assembler::notZero, done);
__ stop("no handler found");
__ bind(done);
#endif
// exception handler found // exception handler found
// patch the return address -> the stub will directly return to the exception handler // patch the return address -> the stub will directly return to the exception handler
@ -712,36 +715,14 @@ void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_map
__ leave(); __ leave();
__ ret(0); __ ret(0);
__ bind(no_handler);
// no exception handler found in this method, so the exception is
// forwarded to the caller (using the unwind code of the nmethod)
// there is no need to restore the registers
// restore the real return address that was saved before the RT-call
__ movptr(real_return_addr, Address(rsp, temp_1_off * VMRegImpl::stack_slot_size));
__ movptr(Address(rbp, 1*BytesPerWord), real_return_addr);
// load address of JavaThread object for thread-local data
NOT_LP64(__ get_thread(thread);)
// restore exception oop into rax, (convention for unwind code)
__ movptr(exception_oop, Address(thread, JavaThread::exception_oop_offset()));
// clear exception fields in JavaThread because they are no longer needed
// (fields must be cleared because they are processed by GC otherwise)
__ movptr(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD);
__ movptr(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD);
// pop the stub frame off
__ leave();
generate_unwind_exception(sasm);
__ stop("should not reach here");
} }
void Runtime1::generate_unwind_exception(StubAssembler *sasm) { void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// incoming parameters // incoming parameters
const Register exception_oop = rax; const Register exception_oop = rax;
// callee-saved copy of exception_oop during runtime call
const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
// other registers used in this stub // other registers used in this stub
const Register exception_pc = rdx; const Register exception_pc = rdx;
const Register handler_addr = rbx; const Register handler_addr = rbx;
@ -769,38 +750,39 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// clear the FPU stack in case any FPU results are left behind // clear the FPU stack in case any FPU results are left behind
__ empty_FPU_stack(); __ empty_FPU_stack();
// leave activation of nmethod // save exception_oop in callee-saved register to preserve it during runtime calls
__ leave(); __ verify_not_null_oop(exception_oop);
// store return address (is on top of stack after leave) __ movptr(exception_oop_callee_saved, exception_oop);
NOT_LP64(__ get_thread(thread);)
// Get return address (is on top of stack after leave).
__ movptr(exception_pc, Address(rsp, 0)); __ movptr(exception_pc, Address(rsp, 0));
__ verify_oop(exception_oop);
// save exception oop from rax, to stack before call
__ push(exception_oop);
// search the exception handler address of the caller (using the return address) // search the exception handler address of the caller (using the return address)
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), exception_pc); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
// rax,: exception handler address of the caller // rax: exception handler address of the caller
// only rax, is valid at this time, all other registers have been destroyed by the call // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
__ invalidate_registers(false, true, true, true, true, true); __ invalidate_registers(false, true, true, true, false, true);
// move result of call into correct register // move result of call into correct register
__ movptr(handler_addr, rax); __ movptr(handler_addr, rax);
// restore exception oop in rax, (required convention of exception handler) // Restore exception oop to RAX (required convention of exception handler).
__ pop(exception_oop); __ movptr(exception_oop, exception_oop_callee_saved);
__ verify_oop(exception_oop); // verify that there is really a valid exception in rax
__ verify_not_null_oop(exception_oop);
// get throwing pc (= return address). // get throwing pc (= return address).
// rdx has been destroyed by the call, so it must be set again // rdx has been destroyed by the call, so it must be set again
// the pop is also necessary to simulate the effect of a ret(0) // the pop is also necessary to simulate the effect of a ret(0)
__ pop(exception_pc); __ pop(exception_pc);
// verify that that there is really a valid exception in rax, // Restore SP from BP if the exception PC is a MethodHandle call site.
__ verify_not_null_oop(exception_oop); NOT_LP64(__ get_thread(thread);)
__ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0);
__ cmovptr(Assembler::notEqual, rsp, rbp);
// continue at exception handler (return address removed) // continue at exception handler (return address removed)
// note: do *not* remove arguments when unwinding the // note: do *not* remove arguments when unwinding the
@ -808,9 +790,9 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// all arguments on the stack when entering the // all arguments on the stack when entering the
// runtime to determine the exception handler // runtime to determine the exception handler
// (GC happens at call site with arguments!) // (GC happens at call site with arguments!)
// rax,: exception oop // rax: exception oop
// rdx: throwing pc // rdx: throwing pc
// rbx,: exception handler // rbx: exception handler
__ jmp(handler_addr); __ jmp(handler_addr);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -60,13 +60,13 @@ MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _mas
} }
#ifdef ASSERT #ifdef ASSERT
static void verify_argslot(MacroAssembler* _masm, Register rax_argslot, static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
const char* error_message) { const char* error_message) {
// Verify that argslot lies within (rsp, rbp]. // Verify that argslot lies within (rsp, rbp].
Label L_ok, L_bad; Label L_ok, L_bad;
__ cmpptr(rax_argslot, rbp); __ cmpptr(argslot_reg, rbp);
__ jccb(Assembler::above, L_bad); __ jccb(Assembler::above, L_bad);
__ cmpptr(rsp, rax_argslot); __ cmpptr(rsp, argslot_reg);
__ jccb(Assembler::below, L_ok); __ jccb(Assembler::below, L_ok);
__ bind(L_bad); __ bind(L_bad);
__ stop(error_message); __ stop(error_message);
@ -178,22 +178,6 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
// Now move the argslot down, to point to the opened-up space. // Now move the argslot down, to point to the opened-up space.
__ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr)); __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
if (TaggedStackInterpreter && arg_mask != _INSERT_NO_MASK) {
// The caller has specified a bitmask of tags to put into the opened space.
// This only works when the arg_slots value is an assembly-time constant.
int constant_arg_slots = arg_slots.as_constant() / stack_move_unit();
int tag_offset = Interpreter::tag_offset_in_bytes() - Interpreter::value_offset_in_bytes();
for (int slot = 0; slot < constant_arg_slots; slot++) {
BasicType slot_type = ((arg_mask & (1 << slot)) == 0 ? T_OBJECT : T_INT);
int slot_offset = Interpreter::stackElementSize() * slot;
Address tag_addr(rax_argslot, slot_offset + tag_offset);
__ movptr(tag_addr, frame::tag_for_basic_type(slot_type));
}
// Note that the new argument slots are tagged properly but contain
// garbage at this point. The value portions must be initialized
// by the caller. (Especially references!)
}
} }
// Helper to remove argument slots from the stack. // Helper to remove argument slots from the stack.
@ -206,18 +190,9 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
(!arg_slots.is_register() ? rsp : arg_slots.as_register())); (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
#ifdef ASSERT #ifdef ASSERT
{ // Verify that [argslot..argslot+size) lies within (rsp, rbp).
// Verify that [argslot..argslot+size) lies within (rsp, rbp). __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
Label L_ok, L_bad; verify_argslot(_masm, rbx_temp, "deleted argument(s) must fall within current frame");
__ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
__ cmpptr(rbx_temp, rbp);
__ jccb(Assembler::above, L_bad);
__ cmpptr(rsp, rax_argslot);
__ jccb(Assembler::below, L_ok);
__ bind(L_bad);
__ stop("deleted argument(s) must fall within current frame");
__ bind(L_ok);
}
if (arg_slots.is_register()) { if (arg_slots.is_register()) {
Label L_ok, L_bad; Label L_ok, L_bad;
__ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD); __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
@ -321,12 +296,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Address rcx_amh_conversion( rcx_recv, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes() ); Address rcx_amh_conversion( rcx_recv, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes() );
Address vmarg; // __ argument_address(vmargslot) Address vmarg; // __ argument_address(vmargslot)
int tag_offset = -1;
if (TaggedStackInterpreter) {
tag_offset = Interpreter::tag_offset_in_bytes() - Interpreter::value_offset_in_bytes();
assert(tag_offset = wordSize, "stack grows as expected");
}
const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
if (have_entry(ek)) { if (have_entry(ek)) {
@ -372,11 +341,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ mov(rsp, rsi); // cut the stack back to where the caller started __ mov(rsp, rsi); // cut the stack back to where the caller started
// Repush the arguments as if coming from the interpreter. // Repush the arguments as if coming from the interpreter.
if (TaggedStackInterpreter) __ push(frame::tag_for_basic_type(T_INT));
__ push(rdx_code); __ push(rdx_code);
if (TaggedStackInterpreter) __ push(frame::tag_for_basic_type(T_OBJECT));
__ push(rcx_fail); __ push(rcx_fail);
if (TaggedStackInterpreter) __ push(frame::tag_for_basic_type(T_OBJECT));
__ push(rax_want); __ push(rax_want);
Register rbx_method = rbx_temp; Register rbx_method = rbx_temp;
@ -397,7 +363,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// Do something that is at least causes a valid throw from the interpreter. // Do something that is at least causes a valid throw from the interpreter.
__ bind(no_method); __ bind(no_method);
__ pop(rax_want); __ pop(rax_want);
if (TaggedStackInterpreter) __ pop(rcx_fail);
__ pop(rcx_fail); __ pop(rcx_fail);
__ push(rax_want); __ push(rax_want);
__ push(rcx_fail); __ push(rcx_fail);
@ -510,18 +475,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case _bound_long_direct_mh: case _bound_long_direct_mh:
{ {
bool direct_to_method = (ek >= _bound_ref_direct_mh); bool direct_to_method = (ek >= _bound_ref_direct_mh);
BasicType arg_type = T_ILLEGAL; BasicType arg_type = T_ILLEGAL;
if (ek == _bound_long_mh || ek == _bound_long_direct_mh) { int arg_mask = _INSERT_NO_MASK;
arg_type = T_LONG; int arg_slots = -1;
} else if (ek == _bound_int_mh || ek == _bound_int_direct_mh) { get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
arg_type = T_INT;
} else {
assert(ek == _bound_ref_mh || ek == _bound_ref_direct_mh, "must be ref");
arg_type = T_OBJECT;
}
int arg_slots = type2size[arg_type];
int arg_mask = (arg_type == T_OBJECT ? _INSERT_REF_MASK :
arg_slots == 1 ? _INSERT_INT_MASK : _INSERT_LONG_MASK);
// make room for the new argument: // make room for the new argument:
__ movl(rax_argslot, rcx_bmh_vmargslot); __ movl(rax_argslot, rcx_bmh_vmargslot);
@ -660,13 +617,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
} }
break; break;
default: default:
assert(false, ""); ShouldNotReachHere();
} }
goto finish_int_conversion;
}
finish_int_conversion: // Do the requested conversion and store the value.
{
Register rbx_vminfo = rbx_temp; Register rbx_vminfo = rbx_temp;
__ movl(rbx_vminfo, rcx_amh_conversion); __ movl(rbx_vminfo, rcx_amh_conversion);
assert(CONV_VMINFO_SHIFT == 0, "preshifted"); assert(CONV_VMINFO_SHIFT == 0, "preshifted");
@ -692,7 +646,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ shrl(rdx_temp /*, rcx*/); __ shrl(rdx_temp /*, rcx*/);
__ bind(done); __ bind(done);
__ movl(vmarg, rdx_temp); __ movl(vmarg, rdx_temp); // Store the value.
__ xchgptr(rcx, rbx_vminfo); // restore rcx_recv __ xchgptr(rcx, rbx_vminfo); // restore rcx_recv
__ jump_to_method_handle_entry(rcx_recv, rdx_temp); __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
@ -715,9 +669,14 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
switch (ek) { switch (ek) {
case _adapter_opt_i2l: case _adapter_opt_i2l:
{ {
#ifdef _LP64
__ movslq(rdx_temp, vmarg1); // Load sign-extended
__ movq(vmarg1, rdx_temp); // Store into first slot
#else
__ movl(rdx_temp, vmarg1); __ movl(rdx_temp, vmarg1);
__ sarl(rdx_temp, 31); // __ extend_sign() __ sarl(rdx_temp, BitsPerInt - 1); // __ extend_sign()
__ movl(vmarg2, rdx_temp); // store second word __ movl(vmarg2, rdx_temp); // store second word
#endif
} }
break; break;
case _adapter_opt_unboxl: case _adapter_opt_unboxl:
@ -727,14 +686,19 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG); int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), ""); assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
__ null_check(rdx_temp, value_offset); __ null_check(rdx_temp, value_offset);
#ifdef _LP64
__ movq(rbx_temp, Address(rdx_temp, value_offset));
__ movq(vmarg1, rbx_temp);
#else
__ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt)); __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt));
__ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt)); __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt));
__ movl(vmarg1, rbx_temp); __ movl(vmarg1, rbx_temp);
__ movl(vmarg2, rdx_temp); __ movl(vmarg2, rdx_temp);
#endif
} }
break; break;
default: default:
assert(false, ""); ShouldNotReachHere();
} }
__ movptr(rcx_recv, rcx_mh_vmtarget); __ movptr(rcx_recv, rcx_mh_vmtarget);
@ -768,19 +732,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
if (ek == _adapter_opt_f2d) { if (ek == _adapter_opt_f2d) {
__ fld_s(vmarg); // load float to ST0 __ fld_s(vmarg); // load float to ST0
__ fstp_s(vmarg); // store single __ fstp_s(vmarg); // store single
} else if (!TaggedStackInterpreter) {
__ fld_d(vmarg); // load double to ST0
__ fstp_s(vmarg); // store single
} else { } else {
Address vmarg_tag = vmarg.plus_disp(tag_offset);
Address vmarg2 = vmarg.plus_disp(Interpreter::stackElementSize());
// vmarg2_tag does not participate in this code
Register rbx_tag = rbx_temp;
__ movl(rbx_tag, vmarg_tag); // preserve tag
__ movl(rdx_temp, vmarg2); // get second word of double
__ movl(vmarg_tag, rdx_temp); // align with first word
__ fld_d(vmarg); // load double to ST0 __ fld_d(vmarg); // load double to ST0
__ movl(vmarg_tag, rbx_tag); // restore tag
__ fstp_s(vmarg); // store single __ fstp_s(vmarg); // store single
} }
#endif //_LP64 #endif //_LP64
@ -812,19 +765,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case _adapter_opt_rot_2_up: case _adapter_opt_rot_2_up:
case _adapter_opt_rot_2_down: case _adapter_opt_rot_2_down:
{ {
int rotate = 0, swap_slots = 0; int swap_bytes = 0, rotate = 0;
switch ((int)ek) { get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
case _adapter_opt_swap_1: swap_slots = 1; break;
case _adapter_opt_swap_2: swap_slots = 2; break;
case _adapter_opt_rot_1_up: swap_slots = 1; rotate++; break;
case _adapter_opt_rot_1_down: swap_slots = 1; rotate--; break;
case _adapter_opt_rot_2_up: swap_slots = 2; rotate++; break;
case _adapter_opt_rot_2_down: swap_slots = 2; rotate--; break;
default: assert(false, "");
}
// the real size of the move must be doubled if TaggedStackInterpreter:
int swap_bytes = (int)( swap_slots * Interpreter::stackElementWords() * wordSize );
// 'argslot' is the position of the first argument to swap // 'argslot' is the position of the first argument to swap
__ movl(rax_argslot, rcx_amh_vmargslot); __ movl(rax_argslot, rcx_amh_vmargslot);
@ -925,8 +867,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// 'stack_move' is negative number of words to duplicate // 'stack_move' is negative number of words to duplicate
Register rdx_stack_move = rdx_temp; Register rdx_stack_move = rdx_temp;
__ movl(rdx_stack_move, rcx_amh_conversion); __ movl2ptr(rdx_stack_move, rcx_amh_conversion);
__ sarl(rdx_stack_move, CONV_STACK_MOVE_SHIFT); __ sarptr(rdx_stack_move, CONV_STACK_MOVE_SHIFT);
int argslot0_num = 0; int argslot0_num = 0;
Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num)); Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num));
@ -988,8 +930,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// 'stack_move' is number of words to drop // 'stack_move' is number of words to drop
Register rdi_stack_move = rdi; Register rdi_stack_move = rdi;
__ movl(rdi_stack_move, rcx_amh_conversion); __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
__ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT); __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
remove_arg_slots(_masm, rdi_stack_move, remove_arg_slots(_masm, rdi_stack_move,
rax_argslot, rbx_temp, rdx_temp); rax_argslot, rbx_temp, rdx_temp);
@ -1014,11 +956,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case _adapter_opt_spread_more: case _adapter_opt_spread_more:
{ {
// spread an array out into a group of arguments // spread an array out into a group of arguments
int length_constant = -1; int length_constant = get_ek_adapter_opt_spread_info(ek);
switch (ek) {
case _adapter_opt_spread_0: length_constant = 0; break;
case _adapter_opt_spread_1: length_constant = 1; break;
}
// find the address of the array argument // find the address of the array argument
__ movl(rax_argslot, rcx_amh_vmargslot); __ movl(rax_argslot, rcx_amh_vmargslot);
@ -1079,8 +1017,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize())); __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize()));
// 'stack_move' is negative number of words to insert // 'stack_move' is negative number of words to insert
Register rdi_stack_move = rdi; Register rdi_stack_move = rdi;
__ movl(rdi_stack_move, rcx_amh_conversion); __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
__ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT); __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
Register rsi_temp = rsi_array; // spill this Register rsi_temp = rsi_array; // spill this
insert_arg_slots(_masm, rdi_stack_move, -1, insert_arg_slots(_masm, rdi_stack_move, -1,
rax_argslot, rbx_temp, rsi_temp); rax_argslot, rbx_temp, rsi_temp);
@ -1114,10 +1052,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(rbx_temp, Address(rsi_source, 0)); __ movptr(rbx_temp, Address(rsi_source, 0));
__ movptr(Address(rax_argslot, 0), rbx_temp); __ movptr(Address(rax_argslot, 0), rbx_temp);
__ addptr(rsi_source, type2aelembytes(elem_type)); __ addptr(rsi_source, type2aelembytes(elem_type));
if (TaggedStackInterpreter) {
__ movptr(Address(rax_argslot, tag_offset),
frame::tag_for_basic_type(elem_type));
}
__ addptr(rax_argslot, Interpreter::stackElementSize()); __ addptr(rax_argslot, Interpreter::stackElementSize());
__ cmpptr(rax_argslot, rdx_argslot_limit); __ cmpptr(rax_argslot, rdx_argslot_limit);
__ jccb(Assembler::less, loop); __ jccb(Assembler::less, loop);
@ -1131,11 +1065,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(rbx_temp, Address(rsi_array, elem_offset)); __ movptr(rbx_temp, Address(rsi_array, elem_offset));
__ movptr(Address(rax_argslot, slot_offset), rbx_temp); __ movptr(Address(rax_argslot, slot_offset), rbx_temp);
elem_offset += type2aelembytes(elem_type); elem_offset += type2aelembytes(elem_type);
if (TaggedStackInterpreter) { slot_offset += Interpreter::stackElementSize();
__ movptr(Address(rax_argslot, slot_offset + tag_offset),
frame::tag_for_basic_type(elem_type));
}
slot_offset += Interpreter::stackElementSize();
} }
} }

View file

@ -369,7 +369,7 @@ class StubGenerator: public StubCodeGenerator {
// The pending exception in Thread is converted into a Java-level exception. // The pending exception in Thread is converted into a Java-level exception.
// //
// Contract with Java-level exception handlers: // Contract with Java-level exception handlers:
// rax,: exception // rax: exception
// rdx: throwing pc // rdx: throwing pc
// //
// NOTE: At entry of this stub, exception-pc must be on stack !! // NOTE: At entry of this stub, exception-pc must be on stack !!
@ -377,6 +377,12 @@ class StubGenerator: public StubCodeGenerator {
address generate_forward_exception() { address generate_forward_exception() {
StubCodeMark mark(this, "StubRoutines", "forward exception"); StubCodeMark mark(this, "StubRoutines", "forward exception");
address start = __ pc(); address start = __ pc();
const Register thread = rcx;
// other registers used in this stub
const Register exception_oop = rax;
const Register handler_addr = rbx;
const Register exception_pc = rdx;
// Upon entry, the sp points to the return address returning into Java // Upon entry, the sp points to the return address returning into Java
// (interpreted or compiled) code; i.e., the return address becomes the // (interpreted or compiled) code; i.e., the return address becomes the
@ -389,8 +395,8 @@ class StubGenerator: public StubCodeGenerator {
#ifdef ASSERT #ifdef ASSERT
// make sure this code is only executed if there is a pending exception // make sure this code is only executed if there is a pending exception
{ Label L; { Label L;
__ get_thread(rcx); __ get_thread(thread);
__ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, L); __ jcc(Assembler::notEqual, L);
__ stop("StubRoutines::forward exception: no pending exception (1)"); __ stop("StubRoutines::forward exception: no pending exception (1)");
__ bind(L); __ bind(L);
@ -398,33 +404,40 @@ class StubGenerator: public StubCodeGenerator {
#endif #endif
// compute exception handler into rbx, // compute exception handler into rbx,
__ movptr(rax, Address(rsp, 0)); __ get_thread(thread);
__ movptr(exception_pc, Address(rsp, 0));
BLOCK_COMMENT("call exception_handler_for_return_address"); BLOCK_COMMENT("call exception_handler_for_return_address");
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rax); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
__ mov(rbx, rax); __ mov(handler_addr, rax);
// setup rax, & rdx, remove return address & clear pending exception // setup rax & rdx, remove return address & clear pending exception
__ get_thread(rcx); __ get_thread(thread);
__ pop(rdx); __ pop(exception_pc);
__ movptr(rax, Address(rcx, Thread::pending_exception_offset())); __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
__ movptr(Address(rcx, Thread::pending_exception_offset()), NULL_WORD); __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
#ifdef ASSERT #ifdef ASSERT
// make sure exception is set // make sure exception is set
{ Label L; { Label L;
__ testptr(rax, rax); __ testptr(exception_oop, exception_oop);
__ jcc(Assembler::notEqual, L); __ jcc(Assembler::notEqual, L);
__ stop("StubRoutines::forward exception: no pending exception (2)"); __ stop("StubRoutines::forward exception: no pending exception (2)");
__ bind(L); __ bind(L);
} }
#endif #endif
// Verify that there is really a valid exception in RAX.
__ verify_oop(exception_oop);
// Restore SP from BP if the exception PC is a MethodHandle call site.
__ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0);
__ cmovptr(Assembler::notEqual, rsp, rbp);
// continue at exception handler (return address removed) // continue at exception handler (return address removed)
// rax,: exception // rax: exception
// rbx,: exception handler // rbx: exception handler
// rdx: throwing pc // rdx: throwing pc
__ verify_oop(rax); __ jmp(handler_addr);
__ jmp(rbx);
return start; return start;
} }
@ -2263,16 +2276,6 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers // arraycopy stubs used by compilers
generate_arraycopy_stubs(); generate_arraycopy_stubs();
// generic method handle stubs
if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
ek < MethodHandles::_EK_LIMIT;
ek = MethodHandles::EntryKind(1 + (int)ek)) {
StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
MethodHandles::generate_method_handle_stub(_masm, ek);
}
}
generate_math_stubs(); generate_math_stubs();
} }

View file

@ -466,7 +466,7 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("call exception_handler_for_return_address"); BLOCK_COMMENT("call exception_handler_for_return_address");
__ call_VM_leaf(CAST_FROM_FN_PTR(address, __ call_VM_leaf(CAST_FROM_FN_PTR(address,
SharedRuntime::exception_handler_for_return_address), SharedRuntime::exception_handler_for_return_address),
c_rarg0); r15_thread, c_rarg0);
__ mov(rbx, rax); __ mov(rbx, rax);
// setup rax & rdx, remove return address & clear pending exception // setup rax & rdx, remove return address & clear pending exception
@ -3009,16 +3009,6 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers // arraycopy stubs used by compilers
generate_arraycopy_stubs(); generate_arraycopy_stubs();
// generic method handle stubs
if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
ek < MethodHandles::_EK_LIMIT;
ek = MethodHandles::EntryKind(1 + (int)ek)) {
StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
MethodHandles::generate_method_handle_stub(_masm, ek);
}
}
generate_math_stubs(); generate_math_stubs();
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,11 @@ enum platform_dependent_constants {
code_size2 = 22000 // simply increase if too small (assembler will crash if too small) code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
}; };
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 5000
};
class x86 { class x86 {
friend class StubGenerator; friend class StubGenerator;
friend class VMStructs; friend class VMStructs;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,12 +28,14 @@
static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; } static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
enum platform_dependent_constants enum platform_dependent_constants {
{ code_size1 = 19000, // simply increase if too small (assembler will crash if too small)
code_size1 = 19000, // simply increase if too small (assembler will code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
// crash if too small) };
code_size2 = 22000 // simply increase if too small (assembler will
// crash if too small) // MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 13000
}; };
class x86 { class x86 {

View file

@ -1550,6 +1550,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
void TemplateInterpreterGenerator::generate_throw_exception() { void TemplateInterpreterGenerator::generate_throw_exception() {
// Entry point in previous activation (i.e., if the caller was interpreted) // Entry point in previous activation (i.e., if the caller was interpreted)
Interpreter::_rethrow_exception_entry = __ pc(); Interpreter::_rethrow_exception_entry = __ pc();
const Register thread = rcx;
// Restore sp to interpreter_frame_last_sp even though we are going // Restore sp to interpreter_frame_last_sp even though we are going
// to empty the expression stack for the exception processing. // to empty the expression stack for the exception processing.
@ -1598,10 +1599,10 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// Set the popframe_processing bit in pending_popframe_condition indicating that we are // Set the popframe_processing bit in pending_popframe_condition indicating that we are
// currently handling popframe, so that call_VMs that may happen later do not trigger new // currently handling popframe, so that call_VMs that may happen later do not trigger new
// popframe handling cycles. // popframe handling cycles.
__ get_thread(rcx); __ get_thread(thread);
__ movl(rdx, Address(rcx, JavaThread::popframe_condition_offset())); __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
__ orl(rdx, JavaThread::popframe_processing_bit); __ orl(rdx, JavaThread::popframe_processing_bit);
__ movl(Address(rcx, JavaThread::popframe_condition_offset()), rdx); __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
{ {
// Check to see whether we are returning to a deoptimized frame. // Check to see whether we are returning to a deoptimized frame.
@ -1629,8 +1630,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ subptr(rdi, rax); __ subptr(rdi, rax);
__ addptr(rdi, wordSize); __ addptr(rdi, wordSize);
// Save these arguments // Save these arguments
__ get_thread(rcx); __ get_thread(thread);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), rcx, rax, rdi); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), thread, rax, rdi);
__ remove_activation(vtos, rdx, __ remove_activation(vtos, rdx,
/* throw_monitor_exception */ false, /* throw_monitor_exception */ false,
@ -1638,8 +1639,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
/* notify_jvmdi */ false); /* notify_jvmdi */ false);
// Inform deoptimization that it is responsible for restoring these arguments // Inform deoptimization that it is responsible for restoring these arguments
__ get_thread(rcx); __ get_thread(thread);
__ movl(Address(rcx, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit); __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit);
// Continue in deoptimization handler // Continue in deoptimization handler
__ jmp(rdx); __ jmp(rdx);
@ -1665,12 +1666,12 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// expression stack if necessary. // expression stack if necessary.
__ mov(rax, rsp); __ mov(rax, rsp);
__ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
__ get_thread(rcx); __ get_thread(thread);
// PC must point into interpreter here // PC must point into interpreter here
__ set_last_Java_frame(rcx, noreg, rbp, __ pc()); __ set_last_Java_frame(thread, noreg, rbp, __ pc());
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), rcx, rax, rbx); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
__ get_thread(rcx); __ get_thread(thread);
__ reset_last_Java_frame(rcx, true, true); __ reset_last_Java_frame(thread, true, true);
// Restore the last_sp and null it out // Restore the last_sp and null it out
__ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
@ -1684,8 +1685,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
} }
// Clear the popframe condition flag // Clear the popframe condition flag
__ get_thread(rcx); __ get_thread(thread);
__ movl(Address(rcx, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive); __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
__ dispatch_next(vtos); __ dispatch_next(vtos);
// end of PopFrame support // end of PopFrame support
@ -1694,27 +1695,27 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// preserve exception over this code sequence // preserve exception over this code sequence
__ pop_ptr(rax); __ pop_ptr(rax);
__ get_thread(rcx); __ get_thread(thread);
__ movptr(Address(rcx, JavaThread::vm_result_offset()), rax); __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
// remove the activation (without doing throws on illegalMonitorExceptions) // remove the activation (without doing throws on illegalMonitorExceptions)
__ remove_activation(vtos, rdx, false, true, false); __ remove_activation(vtos, rdx, false, true, false);
// restore exception // restore exception
__ get_thread(rcx); __ get_thread(thread);
__ movptr(rax, Address(rcx, JavaThread::vm_result_offset())); __ movptr(rax, Address(thread, JavaThread::vm_result_offset()));
__ movptr(Address(rcx, JavaThread::vm_result_offset()), NULL_WORD); __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
__ verify_oop(rax); __ verify_oop(rax);
// Inbetween activations - previous activation type unknown yet // Inbetween activations - previous activation type unknown yet
// compute continuation point - the continuation point expects // compute continuation point - the continuation point expects
// the following registers set up: // the following registers set up:
// //
// rax,: exception // rax: exception
// rdx: return address/pc that threw exception // rdx: return address/pc that threw exception
// rsp: expression stack of caller // rsp: expression stack of caller
// rbp,: rbp, of caller // rbp: rbp, of caller
__ push(rax); // save exception __ push(rax); // save exception
__ push(rdx); // save return address __ push(rdx); // save return address
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rdx); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, rdx);
__ mov(rbx, rax); // save exception handler __ mov(rbx, rax); // save exception handler
__ pop(rdx); // restore return address __ pop(rdx); // restore return address
__ pop(rax); // restore exception __ pop(rax); // restore exception
@ -1728,6 +1729,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// //
address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
address entry = __ pc(); address entry = __ pc();
const Register thread = rcx;
__ restore_bcp(); __ restore_bcp();
__ restore_locals(); __ restore_locals();
@ -1735,8 +1737,8 @@ address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state
__ empty_FPU_stack(); __ empty_FPU_stack();
__ load_earlyret_value(state); __ load_earlyret_value(state);
__ get_thread(rcx); __ get_thread(thread);
__ movptr(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset())); __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
// Clear the earlyret state // Clear the earlyret state

View file

@ -1741,7 +1741,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ push(rdx); // save return address __ push(rdx); // save return address
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
SharedRuntime::exception_handler_for_return_address), SharedRuntime::exception_handler_for_return_address),
rdx); r15_thread, rdx);
__ mov(rbx, rax); // save exception handler __ mov(rbx, rax); // save exception handler
__ pop(rdx); // restore return address __ pop(rdx); // restore return address
__ pop(rax); // restore exception __ pop(rax); // restore exception

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2915,12 +2915,8 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
__ andl(recv, 0xFF); __ andl(recv, 0xFF);
// recv count is 0 based? // recv count is 0 based?
Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1)); Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
if (is_invokedynamic) { __ movptr(recv, recv_addr);
__ lea(recv, recv_addr); __ verify_oop(recv);
} else {
__ movptr(recv, recv_addr);
__ verify_oop(recv);
}
} }
// do null check if needed // do null check if needed

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2860,12 +2860,8 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
__ andl(recv, 0xFF); __ andl(recv, 0xFF);
if (TaggedStackInterpreter) __ shll(recv, 1); // index*2 if (TaggedStackInterpreter) __ shll(recv, 1); // index*2
Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1)); Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
if (is_invokedynamic) { __ movptr(recv, recv_addr);
__ lea(recv, recv_addr); __ verify_oop(recv);
} else {
__ movptr(recv, recv_addr);
__ verify_oop(recv);
}
} }
// do null check if needed // do null check if needed

View file

@ -1,6 +1,6 @@
/* /*
* Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2007, 2008 Red Hat, Inc. * Copyright 2007, 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,11 +29,10 @@
// //
define_pd_global(bool, DontYieldALot, false); define_pd_global(bool, DontYieldALot, false);
#ifdef _LP64
define_pd_global(intx, ThreadStackSize, 1536); define_pd_global(intx, ThreadStackSize, 1536);
#ifdef _LP64
define_pd_global(intx, VMThreadStackSize, 1024); define_pd_global(intx, VMThreadStackSize, 1024);
#else #else
define_pd_global(intx, ThreadStackSize, 1024);
define_pd_global(intx, VMThreadStackSize, 512); define_pd_global(intx, VMThreadStackSize, 512);
#endif // _LP64 #endif // _LP64
define_pd_global(intx, SurvivorRatio, 8); define_pd_global(intx, SurvivorRatio, 8);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -222,11 +222,15 @@ void Canonicalizer::do_ArrayLength (ArrayLength* x) {
} }
} else { } else {
LoadField* lf = x->array()->as_LoadField(); LoadField* lf = x->array()->as_LoadField();
if (lf != NULL && lf->field()->is_constant()) { if (lf != NULL) {
ciObject* c = lf->field()->constant_value().as_object(); ciField* field = lf->field();
if (c->is_array()) { if (field->is_constant() && field->is_static()) {
ciArray* array = (ciArray*) c; // final static field
set_constant(array->length()); ciObject* c = field->constant_value().as_object();
if (c->is_array()) {
ciArray* array = (ciArray*) c;
set_constant(array->length());
}
} }
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -415,6 +415,28 @@ class PatchingStub: public CodeStub {
}; };
//------------------------------------------------------------------------------
// DeoptimizeStub
//
class DeoptimizeStub : public CodeStub {
private:
CodeEmitInfo* _info;
public:
DeoptimizeStub(CodeEmitInfo* info) : _info(new CodeEmitInfo(info)) {}
virtual void emit_code(LIR_Assembler* e);
virtual CodeEmitInfo* info() const { return _info; }
virtual bool is_exception_throw_stub() const { return true; }
virtual void visit(LIR_OpVisitState* visitor) {
visitor->do_slow_case(_info);
}
#ifndef PRODUCT
virtual void print_name(outputStream* out) const { out->print("DeoptimizeStub"); }
#endif // PRODUCT
};
class SimpleExceptionStub: public CodeStub { class SimpleExceptionStub: public CodeStub {
private: private:
LIR_Opr _obj; LIR_Opr _obj;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1524,18 +1524,14 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code = Bytecodes::_invokespecial; code = Bytecodes::_invokespecial;
} }
if (code == Bytecodes::_invokedynamic) {
BAILOUT("invokedynamic NYI"); // FIXME
return;
}
// NEEDS_CLEANUP // NEEDS_CLEANUP
// I've added the target-is_loaded() test below but I don't really understand // I've added the target-is_loaded() test below but I don't really understand
// how klass->is_loaded() can be true and yet target->is_loaded() is false. // how klass->is_loaded() can be true and yet target->is_loaded() is false.
// this happened while running the JCK invokevirtual tests under doit. TKR // this happened while running the JCK invokevirtual tests under doit. TKR
ciMethod* cha_monomorphic_target = NULL; ciMethod* cha_monomorphic_target = NULL;
ciMethod* exact_target = NULL; ciMethod* exact_target = NULL;
if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded()) { if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
!target->is_method_handle_invoke()) {
Value receiver = NULL; Value receiver = NULL;
ciInstanceKlass* receiver_klass = NULL; ciInstanceKlass* receiver_klass = NULL;
bool type_is_exact = false; bool type_is_exact = false;
@ -1681,11 +1677,20 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
CHECK_BAILOUT(); CHECK_BAILOUT();
// inlining not successful => standard invoke // inlining not successful => standard invoke
bool is_static = code == Bytecodes::_invokestatic;
ValueType* result_type = as_ValueType(target->return_type());
Values* args = state()->pop_arguments(target->arg_size_no_receiver());
Value recv = is_static ? NULL : apop();
bool is_loaded = target->is_loaded(); bool is_loaded = target->is_loaded();
bool has_receiver =
code == Bytecodes::_invokespecial ||
code == Bytecodes::_invokevirtual ||
code == Bytecodes::_invokeinterface;
bool is_invokedynamic = code == Bytecodes::_invokedynamic;
ValueType* result_type = as_ValueType(target->return_type());
// We require the debug info to be the "state before" because
// invokedynamics may deoptimize.
ValueStack* state_before = is_invokedynamic ? state()->copy() : NULL;
Values* args = state()->pop_arguments(target->arg_size_no_receiver());
Value recv = has_receiver ? apop() : NULL;
int vtable_index = methodOopDesc::invalid_vtable_index; int vtable_index = methodOopDesc::invalid_vtable_index;
#ifdef SPARC #ifdef SPARC
@ -1723,7 +1728,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
profile_call(recv, target_klass); profile_call(recv, target_klass);
} }
Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target); Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before);
// push result // push result
append_split(result); append_split(result);
@ -2862,20 +2867,18 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
_initial_state = state_at_entry(); _initial_state = state_at_entry();
start_block->merge(_initial_state); start_block->merge(_initial_state);
BlockBegin* sync_handler = NULL; // setup an exception handler to do the unlocking and/or
if (method()->is_synchronized() || _compilation->env()->dtrace_method_probes()) { // notification and unwind the frame.
// setup an exception handler to do the unlocking and/or notification BlockBegin* sync_handler = new BlockBegin(-1);
sync_handler = new BlockBegin(-1); sync_handler->set(BlockBegin::exception_entry_flag);
sync_handler->set(BlockBegin::exception_entry_flag); sync_handler->set(BlockBegin::is_on_work_list_flag);
sync_handler->set(BlockBegin::is_on_work_list_flag); sync_handler->set(BlockBegin::default_exception_handler_flag);
sync_handler->set(BlockBegin::default_exception_handler_flag);
ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0); ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
XHandler* h = new XHandler(desc); XHandler* h = new XHandler(desc);
h->set_entry_block(sync_handler); h->set_entry_block(sync_handler);
scope_data()->xhandlers()->append(h); scope_data()->xhandlers()->append(h);
scope_data()->set_has_handler(); scope_data()->set_has_handler();
}
// complete graph // complete graph
_vmap = new ValueMap(); _vmap = new ValueMap();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -259,10 +259,10 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only)
} }
void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) { void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke) {
// record the safepoint before recording the debug info for enclosing scopes // record the safepoint before recording the debug info for enclosing scopes
recorder->add_safepoint(pc_offset, _oop_map->deep_copy()); recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
_scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/); _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, is_method_handle_invoke);
recorder->end_safepoint(pc_offset); recorder->end_safepoint(pc_offset);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -242,7 +242,7 @@ class IRScopeDebugInfo: public CompilationResourceObj {
//Whether we should reexecute this bytecode for deopt //Whether we should reexecute this bytecode for deopt
bool should_reexecute(); bool should_reexecute();
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool topmost) { void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool topmost, bool is_method_handle_invoke = false) {
if (caller() != NULL) { if (caller() != NULL) {
// Order is significant: Must record caller first. // Order is significant: Must record caller first.
caller()->record_debug_info(recorder, pc_offset, false/*topmost*/); caller()->record_debug_info(recorder, pc_offset, false/*topmost*/);
@ -252,7 +252,6 @@ class IRScopeDebugInfo: public CompilationResourceObj {
DebugToken* monvals = recorder->create_monitor_values(monitors()); DebugToken* monvals = recorder->create_monitor_values(monitors());
// reexecute allowed only for the topmost frame // reexecute allowed only for the topmost frame
bool reexecute = topmost ? should_reexecute() : false; bool reexecute = topmost ? should_reexecute() : false;
bool is_method_handle_invoke = false;
bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis. bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, is_method_handle_invoke, return_oop, locvals, expvals, monvals); recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, is_method_handle_invoke, return_oop, locvals, expvals, monvals);
} }
@ -303,7 +302,7 @@ class CodeEmitInfo: public CompilationResourceObj {
int bci() const { return _bci; } int bci() const { return _bci; }
void add_register_oop(LIR_Opr opr); void add_register_oop(LIR_Opr opr);
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset); void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke = false);
CodeEmitInfo* next() const { return _next; } CodeEmitInfo* next() const { return _next; }
void set_next(CodeEmitInfo* next) { _next = next; } void set_next(CodeEmitInfo* next) { _next = next; }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -334,13 +334,14 @@ void Intrinsic::state_values_do(void f(Value*)) {
Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values* args, Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values* args,
int vtable_index, ciMethod* target) int vtable_index, ciMethod* target, ValueStack* state_before)
: StateSplit(result_type) : StateSplit(result_type)
, _code(code) , _code(code)
, _recv(recv) , _recv(recv)
, _args(args) , _args(args)
, _vtable_index(vtable_index) , _vtable_index(vtable_index)
, _target(target) , _target(target)
, _state_before(state_before)
{ {
set_flag(TargetIsLoadedFlag, target->is_loaded()); set_flag(TargetIsLoadedFlag, target->is_loaded());
set_flag(TargetIsFinalFlag, target_is_loaded() && target->is_final_method()); set_flag(TargetIsFinalFlag, target_is_loaded() && target->is_final_method());
@ -355,6 +356,9 @@ Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values*
_signature = new BasicTypeList(number_of_arguments() + (has_receiver() ? 1 : 0)); _signature = new BasicTypeList(number_of_arguments() + (has_receiver() ? 1 : 0));
if (has_receiver()) { if (has_receiver()) {
_signature->append(as_BasicType(receiver()->type())); _signature->append(as_BasicType(receiver()->type()));
} else if (is_invokedynamic()) {
// Add the synthetic MethodHandle argument to the signature.
_signature->append(T_OBJECT);
} }
for (int i = 0; i < number_of_arguments(); i++) { for (int i = 0; i < number_of_arguments(); i++) {
ValueType* t = argument_at(i)->type(); ValueType* t = argument_at(i)->type();
@ -364,6 +368,13 @@ Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values*
} }
void Invoke::state_values_do(void f(Value*)) {
StateSplit::state_values_do(f);
if (state_before() != NULL) state_before()->values_do(f);
if (state() != NULL) state()->values_do(f);
}
// Implementation of Contant // Implementation of Contant
intx Constant::hash() const { intx Constant::hash() const {
if (_state == NULL) { if (_state == NULL) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1134,17 +1134,18 @@ BASE(StateSplit, Instruction)
LEAF(Invoke, StateSplit) LEAF(Invoke, StateSplit)
private: private:
Bytecodes::Code _code; Bytecodes::Code _code;
Value _recv; Value _recv;
Values* _args; Values* _args;
BasicTypeList* _signature; BasicTypeList* _signature;
int _vtable_index; int _vtable_index;
ciMethod* _target; ciMethod* _target;
ValueStack* _state_before; // Required for deoptimization.
public: public:
// creation // creation
Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values* args, Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values* args,
int vtable_index, ciMethod* target); int vtable_index, ciMethod* target, ValueStack* state_before);
// accessors // accessors
Bytecodes::Code code() const { return _code; } Bytecodes::Code code() const { return _code; }
@ -1155,6 +1156,7 @@ LEAF(Invoke, StateSplit)
int vtable_index() const { return _vtable_index; } int vtable_index() const { return _vtable_index; }
BasicTypeList* signature() const { return _signature; } BasicTypeList* signature() const { return _signature; }
ciMethod* target() const { return _target; } ciMethod* target() const { return _target; }
ValueStack* state_before() const { return _state_before; }
// Returns false if target is not loaded // Returns false if target is not loaded
bool target_is_final() const { return check_flag(TargetIsFinalFlag); } bool target_is_final() const { return check_flag(TargetIsFinalFlag); }
@ -1162,6 +1164,9 @@ LEAF(Invoke, StateSplit)
// Returns false if target is not loaded // Returns false if target is not loaded
bool target_is_strictfp() const { return check_flag(TargetIsStrictfpFlag); } bool target_is_strictfp() const { return check_flag(TargetIsStrictfpFlag); }
// JSR 292 support
bool is_invokedynamic() const { return code() == Bytecodes::_invokedynamic; }
// generic // generic
virtual bool can_trap() const { return true; } virtual bool can_trap() const { return true; }
virtual void input_values_do(void f(Value*)) { virtual void input_values_do(void f(Value*)) {
@ -1169,6 +1174,7 @@ LEAF(Invoke, StateSplit)
if (has_receiver()) f(&_recv); if (has_receiver()) f(&_recv);
for (int i = 0; i < _args->length(); i++) f(_args->adr_at(i)); for (int i = 0; i < _args->length(); i++) f(_args->adr_at(i));
} }
virtual void state_values_do(void f(Value*));
}; };

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -76,7 +76,7 @@ LIR_Opr LIR_OprFact::value_type(ValueType* type) {
return LIR_OprFact::oopConst(type->as_ObjectType()->encoding()); return LIR_OprFact::oopConst(type->as_ObjectType()->encoding());
} }
} }
case addressTag: return LIR_OprFact::intConst(type->as_AddressConstant()->value()); case addressTag: return LIR_OprFact::addressConst(type->as_AddressConstant()->value());
case intTag : return LIR_OprFact::intConst(type->as_IntConstant()->value()); case intTag : return LIR_OprFact::intConst(type->as_IntConstant()->value());
case floatTag : return LIR_OprFact::floatConst(type->as_FloatConstant()->value()); case floatTag : return LIR_OprFact::floatConst(type->as_FloatConstant()->value());
case longTag : return LIR_OprFact::longConst(type->as_LongConstant()->value()); case longTag : return LIR_OprFact::longConst(type->as_LongConstant()->value());
@ -89,7 +89,7 @@ LIR_Opr LIR_OprFact::value_type(ValueType* type) {
LIR_Opr LIR_OprFact::dummy_value_type(ValueType* type) { LIR_Opr LIR_OprFact::dummy_value_type(ValueType* type) {
switch (type->tag()) { switch (type->tag()) {
case objectTag: return LIR_OprFact::oopConst(NULL); case objectTag: return LIR_OprFact::oopConst(NULL);
case addressTag: case addressTag:return LIR_OprFact::addressConst(0);
case intTag: return LIR_OprFact::intConst(0); case intTag: return LIR_OprFact::intConst(0);
case floatTag: return LIR_OprFact::floatConst(0.0); case floatTag: return LIR_OprFact::floatConst(0.0);
case longTag: return LIR_OprFact::longConst(0); case longTag: return LIR_OprFact::longConst(0);
@ -689,9 +689,10 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
case lir_static_call: case lir_static_call:
case lir_optvirtual_call: case lir_optvirtual_call:
case lir_icvirtual_call: case lir_icvirtual_call:
case lir_virtual_call: { case lir_virtual_call:
assert(op->as_OpJavaCall() != NULL, "must be"); case lir_dynamic_call: {
LIR_OpJavaCall* opJavaCall = (LIR_OpJavaCall*)op; LIR_OpJavaCall* opJavaCall = op->as_OpJavaCall();
assert(opJavaCall != NULL, "must be");
if (opJavaCall->_receiver->is_valid()) do_input(opJavaCall->_receiver); if (opJavaCall->_receiver->is_valid()) do_input(opJavaCall->_receiver);
@ -704,6 +705,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
} }
if (opJavaCall->_info) do_info(opJavaCall->_info); if (opJavaCall->_info) do_info(opJavaCall->_info);
if (opJavaCall->is_method_handle_invoke()) do_temp(FrameMap::method_handle_invoke_SP_save_opr());
do_call(); do_call();
if (opJavaCall->_result->is_valid()) do_output(opJavaCall->_result); if (opJavaCall->_result->is_valid()) do_output(opJavaCall->_result);
@ -1410,6 +1412,7 @@ void LIR_OprDesc::print(outputStream* out) const {
// LIR_Address // LIR_Address
void LIR_Const::print_value_on(outputStream* out) const { void LIR_Const::print_value_on(outputStream* out) const {
switch (type()) { switch (type()) {
case T_ADDRESS:out->print("address:%d",as_jint()); break;
case T_INT: out->print("int:%d", as_jint()); break; case T_INT: out->print("int:%d", as_jint()); break;
case T_LONG: out->print("lng:%lld", as_jlong()); break; case T_LONG: out->print("lng:%lld", as_jlong()); break;
case T_FLOAT: out->print("flt:%f", as_jfloat()); break; case T_FLOAT: out->print("flt:%f", as_jfloat()); break;
@ -1590,6 +1593,7 @@ const char * LIR_Op::name() const {
case lir_optvirtual_call: s = "optvirtual"; break; case lir_optvirtual_call: s = "optvirtual"; break;
case lir_icvirtual_call: s = "icvirtual"; break; case lir_icvirtual_call: s = "icvirtual"; break;
case lir_virtual_call: s = "virtual"; break; case lir_virtual_call: s = "virtual"; break;
case lir_dynamic_call: s = "dynamic"; break;
// LIR_OpArrayCopy // LIR_OpArrayCopy
case lir_arraycopy: s = "arraycopy"; break; case lir_arraycopy: s = "arraycopy"; break;
// LIR_OpLock // LIR_OpLock

View file

@ -85,9 +85,10 @@ class LIR_Const: public LIR_OprPtr {
void type_check(BasicType t) const { assert(type() == t, "type check"); } void type_check(BasicType t) const { assert(type() == t, "type check"); }
void type_check(BasicType t1, BasicType t2) const { assert(type() == t1 || type() == t2, "type check"); } void type_check(BasicType t1, BasicType t2) const { assert(type() == t1 || type() == t2, "type check"); }
void type_check(BasicType t1, BasicType t2, BasicType t3) const { assert(type() == t1 || type() == t2 || type() == t3, "type check"); }
public: public:
LIR_Const(jint i) { _value.set_type(T_INT); _value.set_jint(i); } LIR_Const(jint i, bool is_address=false) { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); }
LIR_Const(jlong l) { _value.set_type(T_LONG); _value.set_jlong(l); } LIR_Const(jlong l) { _value.set_type(T_LONG); _value.set_jlong(l); }
LIR_Const(jfloat f) { _value.set_type(T_FLOAT); _value.set_jfloat(f); } LIR_Const(jfloat f) { _value.set_type(T_FLOAT); _value.set_jfloat(f); }
LIR_Const(jdouble d) { _value.set_type(T_DOUBLE); _value.set_jdouble(d); } LIR_Const(jdouble d) { _value.set_type(T_DOUBLE); _value.set_jdouble(d); }
@ -105,7 +106,7 @@ class LIR_Const: public LIR_OprPtr {
virtual BasicType type() const { return _value.get_type(); } virtual BasicType type() const { return _value.get_type(); }
virtual LIR_Const* as_constant() { return this; } virtual LIR_Const* as_constant() { return this; }
jint as_jint() const { type_check(T_INT ); return _value.get_jint(); } jint as_jint() const { type_check(T_INT, T_ADDRESS); return _value.get_jint(); }
jlong as_jlong() const { type_check(T_LONG ); return _value.get_jlong(); } jlong as_jlong() const { type_check(T_LONG ); return _value.get_jlong(); }
jfloat as_jfloat() const { type_check(T_FLOAT ); return _value.get_jfloat(); } jfloat as_jfloat() const { type_check(T_FLOAT ); return _value.get_jfloat(); }
jdouble as_jdouble() const { type_check(T_DOUBLE); return _value.get_jdouble(); } jdouble as_jdouble() const { type_check(T_DOUBLE); return _value.get_jdouble(); }
@ -120,7 +121,7 @@ class LIR_Const: public LIR_OprPtr {
#endif #endif
jint as_jint_bits() const { type_check(T_FLOAT, T_INT); return _value.get_jint(); } jint as_jint_bits() const { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); }
jint as_jint_lo_bits() const { jint as_jint_lo_bits() const {
if (type() == T_DOUBLE) { if (type() == T_DOUBLE) {
return low(jlong_cast(_value.get_jdouble())); return low(jlong_cast(_value.get_jdouble()));
@ -718,6 +719,7 @@ class LIR_OprFact: public AllStatic {
static LIR_Opr intptrConst(void* p) { return (LIR_Opr)(new LIR_Const(p)); } static LIR_Opr intptrConst(void* p) { return (LIR_Opr)(new LIR_Const(p)); }
static LIR_Opr intptrConst(intptr_t v) { return (LIR_Opr)(new LIR_Const((void*)v)); } static LIR_Opr intptrConst(intptr_t v) { return (LIR_Opr)(new LIR_Const((void*)v)); }
static LIR_Opr illegal() { return (LIR_Opr)-1; } static LIR_Opr illegal() { return (LIR_Opr)-1; }
static LIR_Opr addressConst(jint i) { return (LIR_Opr)(new LIR_Const(i, true)); }
static LIR_Opr value_type(ValueType* type); static LIR_Opr value_type(ValueType* type);
static LIR_Opr dummy_value_type(ValueType* type); static LIR_Opr dummy_value_type(ValueType* type);
@ -840,6 +842,7 @@ enum LIR_Code {
, lir_optvirtual_call , lir_optvirtual_call
, lir_icvirtual_call , lir_icvirtual_call
, lir_virtual_call , lir_virtual_call
, lir_dynamic_call
, end_opJavaCall , end_opJavaCall
, begin_opArrayCopy , begin_opArrayCopy
, lir_arraycopy , lir_arraycopy
@ -1052,6 +1055,16 @@ class LIR_OpJavaCall: public LIR_OpCall {
LIR_Opr receiver() const { return _receiver; } LIR_Opr receiver() const { return _receiver; }
ciMethod* method() const { return _method; } ciMethod* method() const { return _method; }
// JSR 292 support.
bool is_invokedynamic() const { return code() == lir_dynamic_call; }
bool is_method_handle_invoke() const {
return
is_invokedynamic() // An invokedynamic is always a MethodHandle call site.
||
(method()->holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
method()->name() == ciSymbol::invoke_name());
}
intptr_t vtable_offset() const { intptr_t vtable_offset() const {
assert(_code == lir_virtual_call, "only have vtable for real vcall"); assert(_code == lir_virtual_call, "only have vtable for real vcall");
return (intptr_t) addr(); return (intptr_t) addr();
@ -1766,6 +1779,10 @@ class LIR_List: public CompilationResourceObj {
intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) { intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) {
append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info)); append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info));
} }
void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
}
void get_thread(LIR_Opr result) { append(new LIR_Op0(lir_get_thread, result)); } void get_thread(LIR_Opr result) { append(new LIR_Op0(lir_get_thread, result)); }
void word_align() { append(new LIR_Op0(lir_word_align)); } void word_align() { append(new LIR_Op0(lir_word_align)); }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -301,9 +301,9 @@ void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
} }
void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) { void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke) {
flush_debug_info(pc_offset); flush_debug_info(pc_offset);
cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset); cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, is_method_handle_invoke);
if (cinfo->exception_handlers() != NULL) { if (cinfo->exception_handlers() != NULL) {
compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers()); compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
} }
@ -413,6 +413,12 @@ void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
verify_oop_map(op->info()); verify_oop_map(op->info());
// JSR 292
// Preserve the SP over MethodHandle call sites.
if (op->is_method_handle_invoke()) {
preserve_SP(op);
}
if (os::is_MP()) { if (os::is_MP()) {
// must align calls sites, otherwise they can't be updated atomically on MP hardware // must align calls sites, otherwise they can't be updated atomically on MP hardware
align_call(op->code()); align_call(op->code());
@ -423,19 +429,25 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
switch (op->code()) { switch (op->code()) {
case lir_static_call: case lir_static_call:
call(op->addr(), relocInfo::static_call_type, op->info()); call(op, relocInfo::static_call_type);
break; break;
case lir_optvirtual_call: case lir_optvirtual_call:
call(op->addr(), relocInfo::opt_virtual_call_type, op->info()); case lir_dynamic_call:
call(op, relocInfo::opt_virtual_call_type);
break; break;
case lir_icvirtual_call: case lir_icvirtual_call:
ic_call(op->addr(), op->info()); ic_call(op);
break; break;
case lir_virtual_call: case lir_virtual_call:
vtable_call(op->vtable_offset(), op->info()); vtable_call(op);
break; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
if (op->is_method_handle_invoke()) {
restore_SP(op);
}
#if defined(X86) && defined(TIERED) #if defined(X86) && defined(TIERED)
// C2 leave fpu stack dirty clean it // C2 leave fpu stack dirty clean it
if (UseSSE < 2) { if (UseSSE < 2) {

View file

@ -82,7 +82,7 @@ class LIR_Assembler: public CompilationResourceObj {
Address as_Address_hi(LIR_Address* addr); Address as_Address_hi(LIR_Address* addr);
// debug information // debug information
void add_call_info(int pc_offset, CodeEmitInfo* cinfo); void add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke = false);
void add_debug_info_for_branch(CodeEmitInfo* info); void add_debug_info_for_branch(CodeEmitInfo* info);
void add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo); void add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo);
void add_debug_info_for_div0_here(CodeEmitInfo* info); void add_debug_info_for_div0_here(CodeEmitInfo* info);
@ -205,9 +205,13 @@ class LIR_Assembler: public CompilationResourceObj {
void comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr result, LIR_Op2* op); void comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr result, LIR_Op2* op);
void cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr result); void cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr result);
void ic_call(address destination, CodeEmitInfo* info); void call( LIR_OpJavaCall* op, relocInfo::relocType rtype);
void vtable_call(int vtable_offset, CodeEmitInfo* info); void ic_call( LIR_OpJavaCall* op);
void call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info); void vtable_call( LIR_OpJavaCall* op);
// JSR 292
void preserve_SP(LIR_OpJavaCall* op);
void restore_SP( LIR_OpJavaCall* op);
void osr_entry(); void osr_entry();

View file

@ -2284,7 +2284,7 @@ void LIRGenerator::do_OsrEntry(OsrEntry* x) {
void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) { void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
int i = x->has_receiver() ? 1 : 0; int i = (x->has_receiver() || x->is_invokedynamic()) ? 1 : 0;
for (; i < args->length(); i++) { for (; i < args->length(); i++) {
LIRItem* param = args->at(i); LIRItem* param = args->at(i);
LIR_Opr loc = arg_list->at(i); LIR_Opr loc = arg_list->at(i);
@ -2322,6 +2322,10 @@ LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
LIRItem* receiver = new LIRItem(x->receiver(), this); LIRItem* receiver = new LIRItem(x->receiver(), this);
argument_items->append(receiver); argument_items->append(receiver);
} }
if (x->is_invokedynamic()) {
// Insert a dummy for the synthetic MethodHandle argument.
argument_items->append(NULL);
}
int idx = x->has_receiver() ? 1 : 0; int idx = x->has_receiver() ? 1 : 0;
for (int i = 0; i < x->number_of_arguments(); i++) { for (int i = 0; i < x->number_of_arguments(); i++) {
LIRItem* param = new LIRItem(x->argument_at(i), this); LIRItem* param = new LIRItem(x->argument_at(i), this);
@ -2371,6 +2375,9 @@ void LIRGenerator::do_Invoke(Invoke* x) {
CodeEmitInfo* info = state_for(x, x->state()); CodeEmitInfo* info = state_for(x, x->state());
// invokedynamics can deoptimize.
CodeEmitInfo* deopt_info = x->is_invokedynamic() ? state_for(x, x->state_before()) : NULL;
invoke_load_arguments(x, args, arg_list); invoke_load_arguments(x, args, arg_list);
if (x->has_receiver()) { if (x->has_receiver()) {
@ -2407,6 +2414,47 @@ void LIRGenerator::do_Invoke(Invoke* x) {
__ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info); __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
} }
break; break;
case Bytecodes::_invokedynamic: {
ciBytecodeStream bcs(x->scope()->method());
bcs.force_bci(x->bci());
assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
ciCPCache* cpcache = bcs.get_cpcache();
// Get CallSite offset from constant pool cache pointer.
int index = bcs.get_method_index();
size_t call_site_offset = cpcache->get_f1_offset(index);
// If this invokedynamic call site hasn't been executed yet in
// the interpreter, the CallSite object in the constant pool
// cache is still null and we need to deoptimize.
if (cpcache->is_f1_null_at(index)) {
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so
// clone all handlers. This is handled transparently in other
// places by the CodeEmitInfo cloning logic but is handled
// specially here because a stub isn't being used.
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
__ jump(deopt_stub);
}
// Use the receiver register for the synthetic MethodHandle
// argument.
receiver = LIR_Assembler::receiverOpr();
LIR_Opr tmp = new_register(objectType);
// Load CallSite object from constant pool cache.
__ oop2reg(cpcache->constant_encoding(), tmp);
__ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
// Load target MethodHandle from CallSite object.
__ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
__ call_dynamic(x->target(), receiver, result_register,
SharedRuntime::get_resolve_opt_virtual_call_stub(),
arg_list, info);
break;
}
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
break; break;

View file

@ -2479,6 +2479,15 @@ int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<Scope
return 2; return 2;
} }
case T_ADDRESS: {
#ifdef _LP64
scope_values->append(new ConstantLongValue(c->as_jint()));
#else
scope_values->append(new ConstantIntValue(c->as_jint()));
#endif
return 1;
}
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
return -1; return -1;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@ class C1_MacroAssembler: public MacroAssembler {
void inline_cache_check(Register receiver, Register iCache); void inline_cache_check(Register receiver, Register iCache);
void build_frame(int frame_size_in_bytes); void build_frame(int frame_size_in_bytes);
void method_exit(bool restore_frame); void remove_frame(int frame_size_in_bytes);
void unverified_entry(Register receiver, Register ic_klass); void unverified_entry(Register receiver, Register ic_klass);
void verified_entry(); void verified_entry();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2009-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -40,6 +40,16 @@ size_t ciCPCache::get_f1_offset(int index) {
} }
// ------------------------------------------------------------------
// ciCPCache::is_f1_null_at
bool ciCPCache::is_f1_null_at(int index) {
VM_ENTRY_MARK;
constantPoolCacheOop cpcache = (constantPoolCacheOop) get_oop();
oop f1 = cpcache->secondary_entry_at(index)->f1();
return (f1 == NULL);
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciCPCache::print // ciCPCache::print
// //

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2009-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -39,5 +39,7 @@ public:
// requested entry. // requested entry.
size_t get_f1_offset(int index); size_t get_f1_offset(int index);
bool is_f1_null_at(int index);
void print(); void print();
}; };

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -249,7 +249,6 @@ BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
size += round_to(buffer_size, oopSize); size += round_to(buffer_size, oopSize);
assert(name != NULL, "must provide a name"); assert(name != NULL, "must provide a name");
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) BufferBlob(name, size); blob = new (size) BufferBlob(name, size);
} }
@ -271,7 +270,6 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
unsigned int size = allocation_size(cb, sizeof(BufferBlob)); unsigned int size = allocation_size(cb, sizeof(BufferBlob));
assert(name != NULL, "must provide a name"); assert(name != NULL, "must provide a name");
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) BufferBlob(name, size, cb); blob = new (size) BufferBlob(name, size, cb);
} }
@ -298,10 +296,48 @@ void BufferBlob::free( BufferBlob *blob ) {
MemoryService::track_code_cache_memory_usage(); MemoryService::track_code_cache_memory_usage();
} }
bool BufferBlob::is_adapter_blob() const {
return (strcmp(AdapterHandlerEntry::name, name()) == 0); //----------------------------------------------------------------------------------------------------
// Implementation of AdapterBlob
AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
AdapterBlob* blob = NULL;
unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) AdapterBlob(size, cb);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
return blob;
} }
//----------------------------------------------------------------------------------------------------
// Implementation of MethodHandlesAdapterBlob
MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
MethodHandlesAdapterBlob* blob = NULL;
unsigned int size = sizeof(MethodHandlesAdapterBlob);
// align the size to CodeEntryAlignment
size = align_code_offset(size);
size += round_to(buffer_size, oopSize);
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) MethodHandlesAdapterBlob(size);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
return blob;
}
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// Implementation of RuntimeStub // Implementation of RuntimeStub

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -90,14 +90,15 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
void flush(); void flush();
// Typing // Typing
virtual bool is_buffer_blob() const { return false; } virtual bool is_buffer_blob() const { return false; }
virtual bool is_nmethod() const { return false; } virtual bool is_nmethod() const { return false; }
virtual bool is_runtime_stub() const { return false; } virtual bool is_runtime_stub() const { return false; }
virtual bool is_deoptimization_stub() const { return false; } virtual bool is_deoptimization_stub() const { return false; }
virtual bool is_uncommon_trap_stub() const { return false; } virtual bool is_uncommon_trap_stub() const { return false; }
virtual bool is_exception_stub() const { return false; } virtual bool is_exception_stub() const { return false; }
virtual bool is_safepoint_stub() const { return false; } virtual bool is_safepoint_stub() const { return false; }
virtual bool is_adapter_blob() const { return false; } virtual bool is_adapter_blob() const { return false; }
virtual bool is_method_handles_adapter_blob() const { return false; }
virtual bool is_compiled_by_c2() const { return false; } virtual bool is_compiled_by_c2() const { return false; }
virtual bool is_compiled_by_c1() const { return false; } virtual bool is_compiled_by_c1() const { return false; }
@ -221,6 +222,9 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
class BufferBlob: public CodeBlob { class BufferBlob: public CodeBlob {
friend class VMStructs; friend class VMStructs;
friend class AdapterBlob;
friend class MethodHandlesAdapterBlob;
private: private:
// Creation support // Creation support
BufferBlob(const char* name, int size); BufferBlob(const char* name, int size);
@ -236,8 +240,7 @@ class BufferBlob: public CodeBlob {
static void free(BufferBlob* buf); static void free(BufferBlob* buf);
// Typing // Typing
bool is_buffer_blob() const { return true; } virtual bool is_buffer_blob() const { return true; }
bool is_adapter_blob() const;
// GC/Verification support // GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ } void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ }
@ -254,6 +257,40 @@ class BufferBlob: public CodeBlob {
}; };
//----------------------------------------------------------------------------------------------------
// AdapterBlob: used to hold C2I/I2C adapters
class AdapterBlob: public BufferBlob {
private:
AdapterBlob(int size) : BufferBlob("I2C/C2I adapters", size) {}
AdapterBlob(int size, CodeBuffer* cb) : BufferBlob("I2C/C2I adapters", size, cb) {}
public:
// Creation
static AdapterBlob* create(CodeBuffer* cb);
// Typing
virtual bool is_adapter_blob() const { return true; }
};
//----------------------------------------------------------------------------------------------------
// MethodHandlesAdapterBlob: used to hold MethodHandles adapters
class MethodHandlesAdapterBlob: public BufferBlob {
private:
MethodHandlesAdapterBlob(int size) : BufferBlob("MethodHandles adapters", size) {}
MethodHandlesAdapterBlob(int size, CodeBuffer* cb) : BufferBlob("MethodHandles adapters", size, cb) {}
public:
// Creation
static MethodHandlesAdapterBlob* create(int buffer_size);
// Typing
virtual bool is_method_handles_adapter_blob() const { return true; }
};
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// RuntimeStub: describes stubs used by compiled code to call a (static) C++ runtime routine // RuntimeStub: describes stubs used by compiled code to call a (static) C++ runtime routine

View file

@ -988,10 +988,12 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
} }
if (method->is_not_compilable(comp_level)) return NULL; if (method->is_not_compilable(comp_level)) return NULL;
nmethod* saved = CodeCache::find_and_remove_saved_code(method()); if (UseCodeCacheFlushing) {
if (saved != NULL) { nmethod* saved = CodeCache::find_and_remove_saved_code(method());
method->set_code(method, saved); if (saved != NULL) {
return saved; method->set_code(method, saved);
return saved;
}
} }
} else { } else {

View file

@ -3704,7 +3704,14 @@ void CMTask::do_marking_step(double time_target_ms) {
// enough to point to the next possible object header (the // enough to point to the next possible object header (the
// bitmap knows by how much we need to move it as it knows its // bitmap knows by how much we need to move it as it knows its
// granularity). // granularity).
move_finger_to(_nextMarkBitMap->nextWord(_finger)); assert(_finger < _region_limit, "invariant");
HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger);
// Check if bitmap iteration was aborted while scanning the last object
if (new_finger >= _region_limit) {
giveup_current_region();
} else {
move_finger_to(new_finger);
}
} }
} }
// At this point we have either completed iterating over the // At this point we have either completed iterating over the

View file

@ -24,8 +24,8 @@
class G1CollectedHeap; class G1CollectedHeap;
class CMTask; class CMTask;
typedef GenericTaskQueue<oop> CMTaskQueue; typedef GenericTaskQueue<oop> CMTaskQueue;
typedef GenericTaskQueueSet<oop> CMTaskQueueSet; typedef GenericTaskQueueSet<CMTaskQueue> CMTaskQueueSet;
// A generic CM bit map. This is essentially a wrapper around the BitMap // A generic CM bit map. This is essentially a wrapper around the BitMap
// class, with one bit per (1<<_shifter) HeapWords. // class, with one bit per (1<<_shifter) HeapWords.

View file

@ -2102,18 +2102,21 @@ size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
// Return the remaining space in the cur alloc region, but not less than // Return the remaining space in the cur alloc region, but not less than
// the min TLAB size. // the min TLAB size.
// Also, no more than half the region size, since we can't allow tlabs to
// grow big enough to accomodate humongous objects.
// We need to story it locally, since it might change between when we // Also, this value can be at most the humongous object threshold,
// test for NULL and when we use it later. // since we can't allow tlabs to grow big enough to accomodate
// humongous objects.
// We need to store the cur alloc region locally, since it might change
// between when we test for NULL and when we use it later.
ContiguousSpace* cur_alloc_space = _cur_alloc_region; ContiguousSpace* cur_alloc_space = _cur_alloc_region;
size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
if (cur_alloc_space == NULL) { if (cur_alloc_space == NULL) {
return HeapRegion::GrainBytes/2; return max_tlab_size;
} else { } else {
return MAX2(MIN2(cur_alloc_space->free(), return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize),
(size_t)(HeapRegion::GrainBytes/2)), max_tlab_size);
(size_t)MinTLABSize);
} }
} }

View file

@ -56,8 +56,8 @@ class ConcurrentZFThread;
# define IF_G1_DETAILED_STATS(code) # define IF_G1_DETAILED_STATS(code)
#endif #endif
typedef GenericTaskQueue<StarTask> RefToScanQueue; typedef GenericTaskQueue<StarTask> RefToScanQueue;
typedef GenericTaskQueueSet<StarTask> RefToScanQueueSet; typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
@ -1055,7 +1055,12 @@ public:
// Returns "true" iff the given word_size is "very large". // Returns "true" iff the given word_size is "very large".
static bool isHumongous(size_t word_size) { static bool isHumongous(size_t word_size) {
return word_size >= _humongous_object_threshold_in_words; // Note this has to be strictly greater-than as the TLABs
// are capped at the humongous thresold and we want to
// ensure that we don't try to allocate a TLAB as
// humongous and that we don't allocate a humongous
// object in a TLAB.
return word_size > _humongous_object_threshold_in_words;
} }
// Update mod union table with the set of dirty cards. // Update mod union table with the set of dirty cards.

View file

@ -101,6 +101,8 @@ void G1MarkSweep::allocate_stacks() {
GenMarkSweep::_marking_stack = GenMarkSweep::_marking_stack =
new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true); new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
GenMarkSweep::_objarray_stack =
new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
int size = SystemDictionary::number_of_classes() * 2; int size = SystemDictionary::number_of_classes() * 2;
GenMarkSweep::_revisit_klass_stack = GenMarkSweep::_revisit_klass_stack =

View file

@ -175,6 +175,7 @@ psAdaptiveSizePolicy.hpp gcUtil.hpp
psAdaptiveSizePolicy.hpp adaptiveSizePolicy.hpp psAdaptiveSizePolicy.hpp adaptiveSizePolicy.hpp
psCompactionManager.cpp gcTaskManager.hpp psCompactionManager.cpp gcTaskManager.hpp
psCompactionManager.cpp objArrayKlass.inline.hpp
psCompactionManager.cpp objectStartArray.hpp psCompactionManager.cpp objectStartArray.hpp
psCompactionManager.cpp oop.hpp psCompactionManager.cpp oop.hpp
psCompactionManager.cpp oop.inline.hpp psCompactionManager.cpp oop.inline.hpp
@ -189,6 +190,9 @@ psCompactionManager.cpp systemDictionary.hpp
psCompactionManager.hpp allocation.hpp psCompactionManager.hpp allocation.hpp
psCompactionManager.hpp taskqueue.hpp psCompactionManager.hpp taskqueue.hpp
psCompactionManager.inline.hpp psCompactionManager.hpp
psCompactionManager.inline.hpp psParallelCompact.hpp
psGCAdaptivePolicyCounters.hpp gcAdaptivePolicyCounters.hpp psGCAdaptivePolicyCounters.hpp gcAdaptivePolicyCounters.hpp
psGCAdaptivePolicyCounters.hpp gcPolicyCounters.hpp psGCAdaptivePolicyCounters.hpp gcPolicyCounters.hpp
psGCAdaptivePolicyCounters.hpp psAdaptiveSizePolicy.hpp psGCAdaptivePolicyCounters.hpp psAdaptiveSizePolicy.hpp
@ -379,12 +383,12 @@ pcTasks.cpp fprofiler.hpp
pcTasks.cpp jniHandles.hpp pcTasks.cpp jniHandles.hpp
pcTasks.cpp jvmtiExport.hpp pcTasks.cpp jvmtiExport.hpp
pcTasks.cpp management.hpp pcTasks.cpp management.hpp
pcTasks.cpp objArrayKlass.inline.hpp
pcTasks.cpp psParallelCompact.hpp pcTasks.cpp psParallelCompact.hpp
pcTasks.cpp pcTasks.hpp pcTasks.cpp pcTasks.hpp
pcTasks.cpp oop.inline.hpp pcTasks.cpp oop.inline.hpp
pcTasks.cpp oop.pcgc.inline.hpp pcTasks.cpp oop.pcgc.inline.hpp
pcTasks.cpp systemDictionary.hpp pcTasks.cpp systemDictionary.hpp
pcTasks.cpp taskqueue.hpp
pcTasks.cpp thread.hpp pcTasks.cpp thread.hpp
pcTasks.cpp universe.hpp pcTasks.cpp universe.hpp
pcTasks.cpp vmThread.hpp pcTasks.cpp vmThread.hpp

View file

@ -48,7 +48,7 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
_vm_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs); _vm_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
// Do the real work // Do the real work
cm->drain_marking_stacks(&mark_and_push_closure); cm->follow_marking_stacks();
} }
@ -118,7 +118,7 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
} }
// Do the real work // Do the real work
cm->drain_marking_stacks(&mark_and_push_closure); cm->follow_marking_stacks();
// cm->deallocate_stacks(); // cm->deallocate_stacks();
} }
@ -196,17 +196,19 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
oop obj = NULL; oop obj = NULL;
ObjArrayTask task;
int random_seed = 17; int random_seed = 17;
while(true) { do {
if (ParCompactionManager::steal(which, &random_seed, obj)) { while (ParCompactionManager::steal_objarray(which, &random_seed, task)) {
obj->follow_contents(cm); objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
cm->drain_marking_stacks(&mark_and_push_closure); k->oop_follow_contents(cm, task.obj(), task.index());
} else { cm->follow_marking_stacks();
if (terminator()->offer_termination()) {
break;
}
} }
} while (ParCompactionManager::steal(which, &random_seed, obj)) {
obj->follow_contents(cm);
cm->follow_marking_stacks();
}
} while (!terminator()->offer_termination());
} }
// //

View file

@ -28,6 +28,8 @@
PSOldGen* ParCompactionManager::_old_gen = NULL; PSOldGen* ParCompactionManager::_old_gen = NULL;
ParCompactionManager** ParCompactionManager::_manager_array = NULL; ParCompactionManager** ParCompactionManager::_manager_array = NULL;
OopTaskQueueSet* ParCompactionManager::_stack_array = NULL; OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
ParCompactionManager::ObjArrayTaskQueueSet*
ParCompactionManager::_objarray_queues = NULL;
ObjectStartArray* ParCompactionManager::_start_array = NULL; ObjectStartArray* ParCompactionManager::_start_array = NULL;
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL; ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
RegionTaskQueueSet* ParCompactionManager::_region_array = NULL; RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
@ -46,6 +48,11 @@ ParCompactionManager::ParCompactionManager() :
// We want the overflow stack to be permanent // We want the overflow stack to be permanent
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true); _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
_objarray_queue.initialize();
_objarray_overflow_stack =
new (ResourceObj::C_HEAP) ObjArrayOverflowStack(10, true);
#ifdef USE_RegionTaskQueueWithOverflow #ifdef USE_RegionTaskQueueWithOverflow
region_stack()->initialize(); region_stack()->initialize();
#else #else
@ -69,6 +76,7 @@ ParCompactionManager::ParCompactionManager() :
ParCompactionManager::~ParCompactionManager() { ParCompactionManager::~ParCompactionManager() {
delete _overflow_stack; delete _overflow_stack;
delete _objarray_overflow_stack;
delete _revisit_klass_stack; delete _revisit_klass_stack;
delete _revisit_mdo_stack; delete _revisit_mdo_stack;
// _manager_array and _stack_array are statics // _manager_array and _stack_array are statics
@ -86,18 +94,21 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
assert(_manager_array == NULL, "Attempt to initialize twice"); assert(_manager_array == NULL, "Attempt to initialize twice");
_manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 ); _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
guarantee(_manager_array != NULL, "Could not initialize promotion manager"); guarantee(_manager_array != NULL, "Could not allocate manager_array");
_stack_array = new OopTaskQueueSet(parallel_gc_threads); _stack_array = new OopTaskQueueSet(parallel_gc_threads);
guarantee(_stack_array != NULL, "Count not initialize promotion manager"); guarantee(_stack_array != NULL, "Could not allocate stack_array");
_objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues");
_region_array = new RegionTaskQueueSet(parallel_gc_threads); _region_array = new RegionTaskQueueSet(parallel_gc_threads);
guarantee(_region_array != NULL, "Count not initialize promotion manager"); guarantee(_region_array != NULL, "Could not allocate region_array");
// Create and register the ParCompactionManager(s) for the worker threads. // Create and register the ParCompactionManager(s) for the worker threads.
for(uint i=0; i<parallel_gc_threads; i++) { for(uint i=0; i<parallel_gc_threads; i++) {
_manager_array[i] = new ParCompactionManager(); _manager_array[i] = new ParCompactionManager();
guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager"); guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
stack_array()->register_queue(i, _manager_array[i]->marking_stack()); stack_array()->register_queue(i, _manager_array[i]->marking_stack());
_objarray_queues->register_queue(i, &_manager_array[i]->_objarray_queue);
#ifdef USE_RegionTaskQueueWithOverflow #ifdef USE_RegionTaskQueueWithOverflow
region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue()); region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
#else #else
@ -203,36 +214,30 @@ void ParCompactionManager::reset() {
} }
} }
void ParCompactionManager::drain_marking_stacks(OopClosure* blk) { void ParCompactionManager::follow_marking_stacks() {
#ifdef ASSERT
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
MutableSpace* to_space = heap->young_gen()->to_space();
MutableSpace* old_space = heap->old_gen()->object_space();
MutableSpace* perm_space = heap->perm_gen()->object_space();
#endif /* ASSERT */
do { do {
// Drain the overflow stack first, to allow stealing from the marking stack.
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
while(!overflow_stack()->is_empty()) {
oop obj = overflow_stack()->pop();
obj->follow_contents(this);
}
oop obj; oop obj;
// obj is a reference!!! while (!overflow_stack()->is_empty()) {
overflow_stack()->pop()->follow_contents(this);
}
while (marking_stack()->pop_local(obj)) { while (marking_stack()->pop_local(obj)) {
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
obj->follow_contents(this); obj->follow_contents(this);
} }
} while((marking_stack()->size() != 0) || (overflow_stack()->length() != 0));
assert(marking_stack()->size() == 0, "Sanity"); // Process ObjArrays one at a time to avoid marking stack bloat.
assert(overflow_stack()->length() == 0, "Sanity"); ObjArrayTask task;
if (!_objarray_overflow_stack->is_empty()) {
task = _objarray_overflow_stack->pop();
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
k->oop_follow_contents(this, task.obj(), task.index());
} else if (_objarray_queue.pop_local(task)) {
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
k->oop_follow_contents(this, task.obj(), task.index());
}
} while (!marking_stacks_empty());
assert(marking_stacks_empty(), "Sanity");
} }
void ParCompactionManager::drain_region_overflow_stack() { void ParCompactionManager::drain_region_overflow_stack() {

View file

@ -22,18 +22,6 @@
* *
*/ */
//
// psPromotionManager is used by a single thread to manage object survival
// during a scavenge. The promotion manager contains thread local data only.
//
// NOTE! Be carefull when allocating the stacks on cheap. If you are going
// to use a promotion manager in more than one thread, the stacks MUST be
// on cheap. This can lead to memory leaks, though, as they are not auto
// deallocated.
//
// FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
//
// Move to some global location // Move to some global location
#define HAS_BEEN_MOVED 0x1501d01d #define HAS_BEEN_MOVED 0x1501d01d
// End move to some global location // End move to some global location
@ -46,8 +34,6 @@ class ObjectStartArray;
class ParallelCompactData; class ParallelCompactData;
class ParMarkBitMap; class ParMarkBitMap;
// Move to it's own file if this works out.
class ParCompactionManager : public CHeapObj { class ParCompactionManager : public CHeapObj {
friend class ParallelTaskTerminator; friend class ParallelTaskTerminator;
friend class ParMarkBitMap; friend class ParMarkBitMap;
@ -72,14 +58,27 @@ class ParCompactionManager : public CHeapObj {
// ------------------------ End don't putback if not needed // ------------------------ End don't putback if not needed
private: private:
// 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB
#define OBJARRAY_QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
typedef GenericTaskQueue<ObjArrayTask, OBJARRAY_QUEUE_SIZE> ObjArrayTaskQueue;
typedef GenericTaskQueueSet<ObjArrayTaskQueue> ObjArrayTaskQueueSet;
#undef OBJARRAY_QUEUE_SIZE
static ParCompactionManager** _manager_array; static ParCompactionManager** _manager_array;
static OopTaskQueueSet* _stack_array; static OopTaskQueueSet* _stack_array;
static ObjArrayTaskQueueSet* _objarray_queues;
static ObjectStartArray* _start_array; static ObjectStartArray* _start_array;
static RegionTaskQueueSet* _region_array; static RegionTaskQueueSet* _region_array;
static PSOldGen* _old_gen; static PSOldGen* _old_gen;
private:
OopTaskQueue _marking_stack; OopTaskQueue _marking_stack;
GrowableArray<oop>* _overflow_stack; GrowableArray<oop>* _overflow_stack;
typedef GrowableArray<ObjArrayTask> ObjArrayOverflowStack;
ObjArrayTaskQueue _objarray_queue;
ObjArrayOverflowStack* _objarray_overflow_stack;
// Is there a way to reuse the _marking_stack for the // Is there a way to reuse the _marking_stack for the
// saving empty regions? For now just create a different // saving empty regions? For now just create a different
// type of TaskQueue. // type of TaskQueue.
@ -128,8 +127,8 @@ class ParCompactionManager : public CHeapObj {
// Pushes onto the region stack. If the region stack is full, // Pushes onto the region stack. If the region stack is full,
// pushes onto the region overflow stack. // pushes onto the region overflow stack.
void region_stack_push(size_t region_index); void region_stack_push(size_t region_index);
public:
public:
Action action() { return _action; } Action action() { return _action; }
void set_action(Action v) { _action = v; } void set_action(Action v) { _action = v; }
@ -163,6 +162,8 @@ class ParCompactionManager : public CHeapObj {
// Get a oop for scanning. If returns null, no oop were found. // Get a oop for scanning. If returns null, no oop were found.
oop retrieve_for_scanning(); oop retrieve_for_scanning();
inline void push_objarray(oop obj, size_t index);
// Save region for later processing. Must not fail. // Save region for later processing. Must not fail.
void save_for_processing(size_t region_index); void save_for_processing(size_t region_index);
// Get a region for processing. If returns null, no region were found. // Get a region for processing. If returns null, no region were found.
@ -175,12 +176,17 @@ class ParCompactionManager : public CHeapObj {
return stack_array()->steal(queue_num, seed, t); return stack_array()->steal(queue_num, seed, t);
} }
static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t) {
return _objarray_queues->steal(queue_num, seed, t);
}
static bool steal(int queue_num, int* seed, RegionTask& t) { static bool steal(int queue_num, int* seed, RegionTask& t) {
return region_array()->steal(queue_num, seed, t); return region_array()->steal(queue_num, seed, t);
} }
// Process tasks remaining on any stack // Process tasks remaining on any marking stack
void drain_marking_stacks(OopClosure *blk); void follow_marking_stacks();
inline bool marking_stacks_empty() const;
// Process tasks remaining on any stack // Process tasks remaining on any stack
void drain_region_stacks(); void drain_region_stacks();
@ -200,3 +206,8 @@ inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
"out of range manager_array access"); "out of range manager_array access");
return _manager_array[index]; return _manager_array[index];
} }
bool ParCompactionManager::marking_stacks_empty() const {
return _marking_stack.size() == 0 && _overflow_stack->is_empty() &&
_objarray_queue.size() == 0 && _objarray_overflow_stack->is_empty();
}

View file

@ -0,0 +1,32 @@
/*
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
void ParCompactionManager::push_objarray(oop obj, size_t index)
{
ObjArrayTask task(obj, index);
assert(task.is_valid(), "bad ObjArrayTask");
if (!_objarray_queue.push(task)) {
_objarray_overflow_stack->push(task);
}
}

View file

@ -479,6 +479,7 @@ void PSMarkSweep::allocate_stacks() {
_preserved_oop_stack = NULL; _preserved_oop_stack = NULL;
_marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true); _marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
_objarray_stack = new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
int size = SystemDictionary::number_of_classes() * 2; int size = SystemDictionary::number_of_classes() * 2;
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
@ -497,6 +498,7 @@ void PSMarkSweep::deallocate_stacks() {
} }
delete _marking_stack; delete _marking_stack;
delete _objarray_stack;
delete _revisit_klass_stack; delete _revisit_klass_stack;
delete _revisit_mdo_stack; delete _revisit_mdo_stack;
} }

View file

@ -785,7 +785,7 @@ PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closu
void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); } void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); } void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); } void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
@ -2376,7 +2376,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
// Follow code cache roots. // Follow code cache roots.
CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure, CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
purged_class); purged_class);
follow_stack(cm); // Flush marking stack. cm->follow_marking_stacks(); // Flush marking stack.
// Update subklass/sibling/implementor links of live klasses // Update subklass/sibling/implementor links of live klasses
// revisit_klass_stack is used in follow_weak_klass_links(). // revisit_klass_stack is used in follow_weak_klass_links().
@ -2389,8 +2389,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
SymbolTable::unlink(is_alive_closure()); SymbolTable::unlink(is_alive_closure());
StringTable::unlink(is_alive_closure()); StringTable::unlink(is_alive_closure());
assert(cm->marking_stack()->size() == 0, "stack should be empty by now"); assert(cm->marking_stacks_empty(), "marking stacks should be empty");
assert(cm->overflow_stack()->is_empty(), "stack should be empty by now");
} }
// This should be moved to the shared markSweep code! // This should be moved to the shared markSweep code!
@ -2709,22 +2708,6 @@ void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
young_gen->move_and_update(cm); young_gen->move_and_update(cm);
} }
void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
while(!cm->overflow_stack()->is_empty()) {
oop obj = cm->overflow_stack()->pop();
obj->follow_contents(cm);
}
oop obj;
// obj is a reference!!!
while (cm->marking_stack()->pop_local(obj)) {
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
obj->follow_contents(cm);
}
}
void void
PSParallelCompact::follow_weak_klass_links() { PSParallelCompact::follow_weak_klass_links() {
// All klasses on the revisit stack are marked at this point. // All klasses on the revisit stack are marked at this point.
@ -2745,7 +2728,7 @@ PSParallelCompact::follow_weak_klass_links() {
&keep_alive_closure); &keep_alive_closure);
} }
// revisit_klass_stack is cleared in reset() // revisit_klass_stack is cleared in reset()
follow_stack(cm); cm->follow_marking_stacks();
} }
} }
@ -2776,7 +2759,7 @@ void PSParallelCompact::follow_mdo_weak_refs() {
rms->at(j)->follow_weak_refs(is_alive_closure()); rms->at(j)->follow_weak_refs(is_alive_closure());
} }
// revisit_mdo_stack is cleared in reset() // revisit_mdo_stack is cleared in reset()
follow_stack(cm); cm->follow_marking_stacks();
} }
} }

View file

@ -901,7 +901,6 @@ class PSParallelCompact : AllStatic {
// Mark live objects // Mark live objects
static void marking_phase(ParCompactionManager* cm, static void marking_phase(ParCompactionManager* cm,
bool maximum_heap_compaction); bool maximum_heap_compaction);
static void follow_stack(ParCompactionManager* cm);
static void follow_weak_klass_links(); static void follow_weak_klass_links();
static void follow_mdo_weak_refs(); static void follow_mdo_weak_refs();
@ -1276,7 +1275,7 @@ inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
} }
} }
} }
follow_stack(cm); cm->follow_marking_stacks();
} }
template <class T> template <class T>

View file

@ -25,8 +25,9 @@
#include "incls/_precompiled.incl" #include "incls/_precompiled.incl"
#include "incls/_markSweep.cpp.incl" #include "incls/_markSweep.cpp.incl"
GrowableArray<oop>* MarkSweep::_marking_stack = NULL; GrowableArray<oop>* MarkSweep::_marking_stack = NULL;
GrowableArray<Klass*>* MarkSweep::_revisit_klass_stack = NULL; GrowableArray<ObjArrayTask>* MarkSweep::_objarray_stack = NULL;
GrowableArray<Klass*>* MarkSweep::_revisit_klass_stack = NULL;
GrowableArray<DataLayout*>* MarkSweep::_revisit_mdo_stack = NULL; GrowableArray<DataLayout*>* MarkSweep::_revisit_mdo_stack = NULL;
GrowableArray<oop>* MarkSweep::_preserved_oop_stack = NULL; GrowableArray<oop>* MarkSweep::_preserved_oop_stack = NULL;
@ -104,11 +105,19 @@ void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(p); }
void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); } void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
void MarkSweep::follow_stack() { void MarkSweep::follow_stack() {
while (!_marking_stack->is_empty()) { do {
oop obj = _marking_stack->pop(); while (!_marking_stack->is_empty()) {
assert (obj->is_gc_marked(), "p must be marked"); oop obj = _marking_stack->pop();
obj->follow_contents(); assert (obj->is_gc_marked(), "p must be marked");
} obj->follow_contents();
}
// Process ObjArrays one at a time to avoid marking stack bloat.
if (!_objarray_stack->is_empty()) {
ObjArrayTask task = _objarray_stack->pop();
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
k->oop_follow_contents(task.obj(), task.index());
}
} while (!_marking_stack->is_empty() || !_objarray_stack->is_empty());
} }
MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure; MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure;

View file

@ -110,8 +110,9 @@ class MarkSweep : AllStatic {
// Vars // Vars
// //
protected: protected:
// Traversal stack used during phase1 // Traversal stacks used during phase1
static GrowableArray<oop>* _marking_stack; static GrowableArray<oop>* _marking_stack;
static GrowableArray<ObjArrayTask>* _objarray_stack;
// Stack for live klasses to revisit at end of marking phase // Stack for live klasses to revisit at end of marking phase
static GrowableArray<Klass*>* _revisit_klass_stack; static GrowableArray<Klass*>* _revisit_klass_stack;
// Set (stack) of MDO's to revisit at end of marking phase // Set (stack) of MDO's to revisit at end of marking phase
@ -188,6 +189,7 @@ class MarkSweep : AllStatic {
template <class T> static inline void mark_and_follow(T* p); template <class T> static inline void mark_and_follow(T* p);
// Check mark and maybe push on marking stack // Check mark and maybe push on marking stack
template <class T> static inline void mark_and_push(T* p); template <class T> static inline void mark_and_push(T* p);
static inline void push_objarray(oop obj, size_t index);
static void follow_stack(); // Empty marking stack. static void follow_stack(); // Empty marking stack.

View file

@ -77,6 +77,12 @@ template <class T> inline void MarkSweep::mark_and_push(T* p) {
} }
} }
void MarkSweep::push_objarray(oop obj, size_t index) {
ObjArrayTask task(obj, index);
assert(task.is_valid(), "bad ObjArrayTask");
_objarray_stack->push(task);
}
template <class T> inline void MarkSweep::adjust_pointer(T* p, bool isroot) { template <class T> inline void MarkSweep::adjust_pointer(T* p, bool isroot) {
T heap_oop = oopDesc::load_heap_oop(p); T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) { if (!oopDesc::is_null(heap_oop)) {

View file

@ -1,5 +1,5 @@
// //
// Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. // Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
@ -246,6 +246,7 @@ c1_LIRGenerator.cpp c1_LIRAssembler.hpp
c1_LIRGenerator.cpp c1_LIRGenerator.hpp c1_LIRGenerator.cpp c1_LIRGenerator.hpp
c1_LIRGenerator.cpp c1_ValueStack.hpp c1_LIRGenerator.cpp c1_ValueStack.hpp
c1_LIRGenerator.cpp ciArrayKlass.hpp c1_LIRGenerator.cpp ciArrayKlass.hpp
c1_LIRGenerator.cpp ciCPCache.hpp
c1_LIRGenerator.cpp ciInstance.hpp c1_LIRGenerator.cpp ciInstance.hpp
c1_LIRGenerator.cpp heapRegion.hpp c1_LIRGenerator.cpp heapRegion.hpp
c1_LIRGenerator.cpp sharedRuntime.hpp c1_LIRGenerator.cpp sharedRuntime.hpp

View file

@ -541,6 +541,7 @@ ciConstantPoolCache.hpp resourceArea.hpp
ciCPCache.cpp cpCacheOop.hpp ciCPCache.cpp cpCacheOop.hpp
ciCPCache.cpp ciCPCache.hpp ciCPCache.cpp ciCPCache.hpp
ciCPCache.cpp ciUtilities.hpp
ciCPCache.hpp ciClassList.hpp ciCPCache.hpp ciClassList.hpp
ciCPCache.hpp ciObject.hpp ciCPCache.hpp ciObject.hpp
@ -2016,6 +2017,7 @@ init.cpp handles.inline.hpp
init.cpp icBuffer.hpp init.cpp icBuffer.hpp
init.cpp icache.hpp init.cpp icache.hpp
init.cpp init.hpp init.cpp init.hpp
init.cpp methodHandles.hpp
init.cpp safepoint.hpp init.cpp safepoint.hpp
init.cpp sharedRuntime.hpp init.cpp sharedRuntime.hpp
init.cpp universe.hpp init.cpp universe.hpp
@ -2726,8 +2728,10 @@ markOop.inline.hpp markOop.hpp
markSweep.cpp compileBroker.hpp markSweep.cpp compileBroker.hpp
markSweep.cpp methodDataOop.hpp markSweep.cpp methodDataOop.hpp
markSweep.cpp objArrayKlass.inline.hpp
markSweep.hpp collectedHeap.hpp markSweep.hpp collectedHeap.hpp
markSweep.hpp taskqueue.hpp
memRegion.cpp globals.hpp memRegion.cpp globals.hpp
memRegion.cpp memRegion.hpp memRegion.cpp memRegion.hpp
@ -2872,6 +2876,7 @@ methodHandles.cpp methodHandles.hpp
methodHandles.cpp oopFactory.hpp methodHandles.cpp oopFactory.hpp
methodHandles.cpp reflection.hpp methodHandles.cpp reflection.hpp
methodHandles.cpp signature.hpp methodHandles.cpp signature.hpp
methodHandles.cpp stubRoutines.hpp
methodHandles.cpp symbolTable.hpp methodHandles.cpp symbolTable.hpp
methodHandles_<arch>.cpp allocation.inline.hpp methodHandles_<arch>.cpp allocation.inline.hpp
@ -3056,8 +3061,10 @@ objArrayKlass.cpp copy.hpp
objArrayKlass.cpp genOopClosures.inline.hpp objArrayKlass.cpp genOopClosures.inline.hpp
objArrayKlass.cpp handles.inline.hpp objArrayKlass.cpp handles.inline.hpp
objArrayKlass.cpp instanceKlass.hpp objArrayKlass.cpp instanceKlass.hpp
objArrayKlass.cpp markSweep.inline.hpp
objArrayKlass.cpp mutexLocker.hpp objArrayKlass.cpp mutexLocker.hpp
objArrayKlass.cpp objArrayKlass.hpp objArrayKlass.cpp objArrayKlass.hpp
objArrayKlass.cpp objArrayKlass.inline.hpp
objArrayKlass.cpp objArrayKlassKlass.hpp objArrayKlass.cpp objArrayKlassKlass.hpp
objArrayKlass.cpp objArrayOop.hpp objArrayKlass.cpp objArrayOop.hpp
objArrayKlass.cpp oop.inline.hpp objArrayKlass.cpp oop.inline.hpp
@ -3068,11 +3075,12 @@ objArrayKlass.cpp systemDictionary.hpp
objArrayKlass.cpp universe.inline.hpp objArrayKlass.cpp universe.inline.hpp
objArrayKlass.cpp vmSymbols.hpp objArrayKlass.cpp vmSymbols.hpp
objArrayKlass.hpp arrayKlass.hpp objArrayKlass.hpp arrayKlass.hpp
objArrayKlass.hpp instanceKlass.hpp objArrayKlass.hpp instanceKlass.hpp
objArrayKlass.hpp specialized_oop_closures.hpp objArrayKlass.hpp specialized_oop_closures.hpp
objArrayKlass.inline.hpp objArrayKlass.hpp
objArrayKlassKlass.cpp collectedHeap.inline.hpp objArrayKlassKlass.cpp collectedHeap.inline.hpp
objArrayKlassKlass.cpp instanceKlass.hpp objArrayKlassKlass.cpp instanceKlass.hpp
objArrayKlassKlass.cpp javaClasses.hpp objArrayKlassKlass.cpp javaClasses.hpp
@ -4098,6 +4106,7 @@ task.cpp timer.hpp
task.hpp top.hpp task.hpp top.hpp
taskqueue.cpp debug.hpp taskqueue.cpp debug.hpp
taskqueue.cpp oop.inline.hpp
taskqueue.cpp os.hpp taskqueue.cpp os.hpp
taskqueue.cpp taskqueue.hpp taskqueue.cpp taskqueue.hpp
taskqueue.cpp thread_<os_family>.inline.hpp taskqueue.cpp thread_<os_family>.inline.hpp

View file

@ -115,10 +115,14 @@ objArrayKlass.cpp heapRegionSeq.inline.hpp
objArrayKlass.cpp g1CollectedHeap.inline.hpp objArrayKlass.cpp g1CollectedHeap.inline.hpp
objArrayKlass.cpp g1OopClosures.inline.hpp objArrayKlass.cpp g1OopClosures.inline.hpp
objArrayKlass.cpp oop.pcgc.inline.hpp objArrayKlass.cpp oop.pcgc.inline.hpp
objArrayKlass.cpp psCompactionManager.hpp
objArrayKlass.cpp psPromotionManager.inline.hpp objArrayKlass.cpp psPromotionManager.inline.hpp
objArrayKlass.cpp psScavenge.inline.hpp objArrayKlass.cpp psScavenge.inline.hpp
objArrayKlass.cpp parOopClosures.inline.hpp objArrayKlass.cpp parOopClosures.inline.hpp
objArrayKlass.inline.hpp psCompactionManager.inline.hpp
objArrayKlass.inline.hpp psParallelCompact.hpp
oop.pcgc.inline.hpp parNewGeneration.hpp oop.pcgc.inline.hpp parNewGeneration.hpp
oop.pcgc.inline.hpp parallelScavengeHeap.hpp oop.pcgc.inline.hpp parallelScavengeHeap.hpp
oop.pcgc.inline.hpp psCompactionManager.hpp oop.pcgc.inline.hpp psCompactionManager.hpp

View file

@ -159,6 +159,7 @@ void GenMarkSweep::allocate_stacks() {
_preserved_oop_stack = NULL; _preserved_oop_stack = NULL;
_marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true); _marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
_objarray_stack = new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
int size = SystemDictionary::number_of_classes() * 2; int size = SystemDictionary::number_of_classes() * 2;
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
@ -194,7 +195,6 @@ void GenMarkSweep::allocate_stacks() {
void GenMarkSweep::deallocate_stacks() { void GenMarkSweep::deallocate_stacks() {
if (!UseG1GC) { if (!UseG1GC) {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
gch->release_scratch(); gch->release_scratch();
@ -208,6 +208,7 @@ void GenMarkSweep::deallocate_stacks() {
} }
delete _marking_stack; delete _marking_stack;
delete _objarray_stack;
delete _revisit_klass_stack; delete _revisit_klass_stack;
delete _revisit_mdo_stack; delete _revisit_mdo_stack;

View file

@ -28,10 +28,10 @@ class CardTableRS;
class CardTableModRefBS; class CardTableModRefBS;
class DefNewGeneration; class DefNewGeneration;
template<class E> class GenericTaskQueue; template<class E, unsigned int N> class GenericTaskQueue;
typedef GenericTaskQueue<oop> OopTaskQueue; typedef GenericTaskQueue<oop, TASKQUEUE_SIZE> OopTaskQueue;
template<class E> class GenericTaskQueueSet; template<class T> class GenericTaskQueueSet;
typedef GenericTaskQueueSet<oop> OopTaskQueueSet; typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
// Closure for iterating roots from a particular generation // Closure for iterating roots from a particular generation
// Note: all classes deriving from this MUST call this do_barrier // Note: all classes deriving from this MUST call this do_barrier

View file

@ -314,24 +314,24 @@ void objArrayKlass::initialize(TRAPS) {
void objArrayKlass::oop_follow_contents(oop obj) { void objArrayKlass::oop_follow_contents(oop obj) {
assert (obj->is_array(), "obj must be array"); assert (obj->is_array(), "obj must be array");
objArrayOop a = objArrayOop(obj); objArrayOop(obj)->follow_header();
a->follow_header(); if (UseCompressedOops) {
ObjArrayKlass_OOP_ITERATE( \ objarray_follow_contents<narrowOop>(obj, 0);
a, p, \ } else {
/* we call mark_and_follow here to avoid excessive marking stack usage */ \ objarray_follow_contents<oop>(obj, 0);
MarkSweep::mark_and_follow(p)) }
} }
#ifndef SERIALGC #ifndef SERIALGC
void objArrayKlass::oop_follow_contents(ParCompactionManager* cm, void objArrayKlass::oop_follow_contents(ParCompactionManager* cm,
oop obj) { oop obj) {
assert (obj->is_array(), "obj must be array"); assert(obj->is_array(), "obj must be array");
objArrayOop a = objArrayOop(obj); objArrayOop(obj)->follow_header(cm);
a->follow_header(cm); if (UseCompressedOops) {
ObjArrayKlass_OOP_ITERATE( \ objarray_follow_contents<narrowOop>(cm, obj, 0);
a, p, \ } else {
/* we call mark_and_follow here to avoid excessive marking stack usage */ \ objarray_follow_contents<oop>(cm, obj, 0);
PSParallelCompact::mark_and_follow(cm, p)) }
} }
#endif // SERIALGC #endif // SERIALGC

View file

@ -91,10 +91,18 @@ class objArrayKlass : public arrayKlass {
// Garbage collection // Garbage collection
void oop_follow_contents(oop obj); void oop_follow_contents(oop obj);
inline void oop_follow_contents(oop obj, int index);
template <class T> inline void objarray_follow_contents(oop obj, int index);
int oop_adjust_pointers(oop obj); int oop_adjust_pointers(oop obj);
// Parallel Scavenge and Parallel Old // Parallel Scavenge and Parallel Old
PARALLEL_GC_DECLS PARALLEL_GC_DECLS
#ifndef SERIALGC
inline void oop_follow_contents(ParCompactionManager* cm, oop obj, int index);
template <class T> inline void
objarray_follow_contents(ParCompactionManager* cm, oop obj, int index);
#endif // !SERIALGC
// Iterators // Iterators
int oop_oop_iterate(oop obj, OopClosure* blk) { int oop_oop_iterate(oop obj, OopClosure* blk) {
@ -131,5 +139,4 @@ class objArrayKlass : public arrayKlass {
void oop_verify_on(oop obj, outputStream* st); void oop_verify_on(oop obj, outputStream* st);
void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty); void oop_verify_old_oop(oop obj, oop* p, bool allow_dirty);
void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty); void oop_verify_old_oop(oop obj, narrowOop* p, bool allow_dirty);
}; };

View file

@ -0,0 +1,89 @@
/*
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
void objArrayKlass::oop_follow_contents(oop obj, int index) {
if (UseCompressedOops) {
objarray_follow_contents<narrowOop>(obj, index);
} else {
objarray_follow_contents<oop>(obj, index);
}
}
template <class T>
void objArrayKlass::objarray_follow_contents(oop obj, int index) {
objArrayOop a = objArrayOop(obj);
const size_t len = size_t(a->length());
const size_t beg_index = size_t(index);
assert(beg_index < len || len == 0, "index too large");
const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
const size_t end_index = beg_index + stride;
T* const base = (T*)a->base();
T* const beg = base + beg_index;
T* const end = base + end_index;
// Push the non-NULL elements of the next stride on the marking stack.
for (T* e = beg; e < end; e++) {
MarkSweep::mark_and_push<T>(e);
}
if (end_index < len) {
MarkSweep::push_objarray(a, end_index); // Push the continuation.
}
}
#ifndef SERIALGC
void objArrayKlass::oop_follow_contents(ParCompactionManager* cm, oop obj,
int index) {
if (UseCompressedOops) {
objarray_follow_contents<narrowOop>(cm, obj, index);
} else {
objarray_follow_contents<oop>(cm, obj, index);
}
}
template <class T>
void objArrayKlass::objarray_follow_contents(ParCompactionManager* cm, oop obj,
int index) {
objArrayOop a = objArrayOop(obj);
const size_t len = size_t(a->length());
const size_t beg_index = size_t(index);
assert(beg_index < len || len == 0, "index too large");
const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
const size_t end_index = beg_index + stride;
T* const base = (T*)a->base();
T* const beg = base + beg_index;
T* const end = base + end_index;
// Push the non-NULL elements of the next stride on the marking stack.
for (T* e = beg; e < end; e++) {
PSParallelCompact::mark_and_push<T>(cm, e);
}
if (end_index < len) {
cm->push_objarray(a, end_index); // Push the continuation.
}
}
#endif // #ifndef SERIALGC

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2088,29 +2088,41 @@ bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invari
BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl, BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl,
int scale, Node* offset, int scale, Node* offset,
Node* init, Node* limit, Node* stride, Node* init, Node* limit, Node* stride,
Node* range) { Node* range, bool upper) {
DEBUG_ONLY(ttyLocker ttyl);
if (TraceLoopPredicate) tty->print("rc_predicate ");
Node* max_idx_expr = init; Node* max_idx_expr = init;
int stride_con = stride->get_int(); int stride_con = stride->get_int();
if ((stride_con > 0) == (scale > 0)) { if ((stride_con > 0) == (scale > 0) == upper) {
max_idx_expr = new (C, 3) SubINode(limit, stride); max_idx_expr = new (C, 3) SubINode(limit, stride);
register_new_node(max_idx_expr, ctrl); register_new_node(max_idx_expr, ctrl);
if (TraceLoopPredicate) tty->print("(limit - stride) ");
} else {
if (TraceLoopPredicate) tty->print("init ");
} }
if (scale != 1) { if (scale != 1) {
ConNode* con_scale = _igvn.intcon(scale); ConNode* con_scale = _igvn.intcon(scale);
max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale); max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale);
register_new_node(max_idx_expr, ctrl); register_new_node(max_idx_expr, ctrl);
if (TraceLoopPredicate) tty->print("* %d ", scale);
} }
if (offset && (!offset->is_Con() || offset->get_int() != 0)){ if (offset && (!offset->is_Con() || offset->get_int() != 0)){
max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset); max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset);
register_new_node(max_idx_expr, ctrl); register_new_node(max_idx_expr, ctrl);
if (TraceLoopPredicate)
if (offset->is_Con()) tty->print("+ %d ", offset->get_int());
else tty->print("+ offset ");
} }
CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range); CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range);
register_new_node(cmp, ctrl); register_new_node(cmp, ctrl);
BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt); BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt);
register_new_node(bol, ctrl); register_new_node(bol, ctrl);
if (TraceLoopPredicate) tty->print_cr("<u range");
return bol; return bol;
} }
@ -2187,7 +2199,6 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
while (if_proj_list.size() > 0) { while (if_proj_list.size() > 0) {
// Following are changed to nonnull when a predicate can be hoisted // Following are changed to nonnull when a predicate can be hoisted
ProjNode* new_predicate_proj = NULL; ProjNode* new_predicate_proj = NULL;
BoolNode* new_predicate_bol = NULL;
ProjNode* proj = if_proj_list.pop()->as_Proj(); ProjNode* proj = if_proj_list.pop()->as_Proj();
IfNode* iff = proj->in(0)->as_If(); IfNode* iff = proj->in(0)->as_If();
@ -2218,93 +2229,120 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
// Invariant test // Invariant test
new_predicate_proj = create_new_if_for_predicate(predicate_proj); new_predicate_proj = create_new_if_for_predicate(predicate_proj);
Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0); Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0);
new_predicate_bol = invar.clone(bol, ctrl)->as_Bool(); BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool();
if (TraceLoopPredicate) tty->print("invariant");
// Negate test if necessary
bool negated = false;
if (proj->_con != predicate_proj->_con) {
new_predicate_bol = new (C, 2) BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate());
register_new_node(new_predicate_bol, ctrl);
negated = true;
}
IfNode* new_predicate_iff = new_predicate_proj->in(0)->as_If();
_igvn.hash_delete(new_predicate_iff);
new_predicate_iff->set_req(1, new_predicate_bol);
if (TraceLoopPredicate) tty->print_cr("invariant if%s: %d", negated ? " negated" : "", new_predicate_iff->_idx);
} else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) { } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) {
// Range check (only for counted loops) assert(proj->_con == predicate_proj->_con, "must match");
new_predicate_proj = create_new_if_for_predicate(predicate_proj);
Node *ctrl = new_predicate_proj->in(0)->as_If()->in(0); // Range check for counted loops
const Node* cmp = bol->in(1)->as_Cmp(); const Node* cmp = bol->in(1)->as_Cmp();
Node* idx = cmp->in(1); Node* idx = cmp->in(1);
assert(!invar.is_invariant(idx), "index is variant"); assert(!invar.is_invariant(idx), "index is variant");
assert(cmp->in(2)->Opcode() == Op_LoadRange, "must be"); assert(cmp->in(2)->Opcode() == Op_LoadRange, "must be");
LoadRangeNode* ld_rng = (LoadRangeNode*)cmp->in(2); // LoadRangeNode Node* ld_rng = cmp->in(2); // LoadRangeNode
assert(invar.is_invariant(ld_rng), "load range must be invariant"); assert(invar.is_invariant(ld_rng), "load range must be invariant");
ld_rng = (LoadRangeNode*)invar.clone(ld_rng, ctrl);
int scale = 1; int scale = 1;
Node* offset = zero; Node* offset = zero;
bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset); bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset);
assert(ok, "must be index expression"); assert(ok, "must be index expression");
Node* init = cl->init_trip();
Node* limit = cl->limit();
Node* stride = cl->stride();
// Build if's for the upper and lower bound tests. The
// lower_bound test will dominate the upper bound test and all
// cloned or created nodes will use the lower bound test as
// their declared control.
ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj);
ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj);
assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate");
Node *ctrl = lower_bound_proj->in(0)->as_If()->in(0);
// Perform cloning to keep Invariance state correct since the
// late schedule will place invariant things in the loop.
ld_rng = invar.clone(ld_rng, ctrl);
if (offset && offset != zero) { if (offset && offset != zero) {
assert(invar.is_invariant(offset), "offset must be loop invariant"); assert(invar.is_invariant(offset), "offset must be loop invariant");
offset = invar.clone(offset, ctrl); offset = invar.clone(offset, ctrl);
} }
Node* init = cl->init_trip();
Node* limit = cl->limit();
Node* stride = cl->stride();
new_predicate_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, ld_rng);
if (TraceLoopPredicate) tty->print("range check");
}
if (new_predicate_proj == NULL) { // Test the lower bound
Node* lower_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, ld_rng, false);
IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If();
_igvn.hash_delete(lower_bound_iff);
lower_bound_iff->set_req(1, lower_bound_bol);
if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx);
// Test the upper bound
Node* upper_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, ld_rng, true);
IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If();
_igvn.hash_delete(upper_bound_iff);
upper_bound_iff->set_req(1, upper_bound_bol);
if (TraceLoopPredicate) tty->print_cr("upper bound check if: %d", lower_bound_iff->_idx);
// Fall through into rest of the clean up code which will move
// any dependent nodes onto the upper bound test.
new_predicate_proj = upper_bound_proj;
} else {
// The other proj of the "iff" is a uncommon trap projection, and we can assume // The other proj of the "iff" is a uncommon trap projection, and we can assume
// the other proj will not be executed ("executed" means uct raised). // the other proj will not be executed ("executed" means uct raised).
continue; continue;
} else {
// Success - attach condition (new_predicate_bol) to predicate if
invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate
IfNode* new_iff = new_predicate_proj->in(0)->as_If();
// Negate test if necessary
if (proj->_con != predicate_proj->_con) {
new_predicate_bol = new (C, 2) BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate());
register_new_node(new_predicate_bol, new_iff->in(0));
if (TraceLoopPredicate) tty->print_cr(" if negated: %d", iff->_idx);
} else {
if (TraceLoopPredicate) tty->print_cr(" if: %d", iff->_idx);
}
_igvn.hash_delete(new_iff);
new_iff->set_req(1, new_predicate_bol);
_igvn.hash_delete(iff);
iff->set_req(1, proj->is_IfFalse() ? cond_false : cond_true);
Node* ctrl = new_predicate_proj; // new control
ProjNode* dp = proj; // old control
assert(get_loop(dp) == loop, "guarenteed at the time of collecting proj");
// Find nodes (depends only on the test) off the surviving projection;
// move them outside the loop with the control of proj_clone
for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
Node* cd = dp->fast_out(i); // Control-dependent node
if (cd->depends_only_on_test()) {
assert(cd->in(0) == dp, "");
_igvn.hash_delete(cd);
cd->set_req(0, ctrl); // ctrl, not NULL
set_early_ctrl(cd);
_igvn._worklist.push(cd);
IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
if (new_loop != loop) {
if (!loop->_child) loop->_body.yank(cd);
if (!new_loop->_child ) new_loop->_body.push(cd);
}
--i;
--imax;
}
}
hoisted = true;
C->set_major_progress();
} }
// Success - attach condition (new_predicate_bol) to predicate if
invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate
// Eliminate the old if in the loop body
_igvn.hash_delete(iff);
iff->set_req(1, proj->is_IfFalse() ? cond_false : cond_true);
Node* ctrl = new_predicate_proj; // new control
ProjNode* dp = proj; // old control
assert(get_loop(dp) == loop, "guaranteed at the time of collecting proj");
// Find nodes (depends only on the test) off the surviving projection;
// move them outside the loop with the control of proj_clone
for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
Node* cd = dp->fast_out(i); // Control-dependent node
if (cd->depends_only_on_test()) {
assert(cd->in(0) == dp, "");
_igvn.hash_delete(cd);
cd->set_req(0, ctrl); // ctrl, not NULL
set_early_ctrl(cd);
_igvn._worklist.push(cd);
IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
if (new_loop != loop) {
if (!loop->_child) loop->_body.yank(cd);
if (!new_loop->_child ) new_loop->_body.push(cd);
}
--i;
--imax;
}
}
hoisted = true;
C->set_major_progress();
} // end while } // end while
#ifndef PRODUCT #ifndef PRODUCT
// report that the loop predication has been actually performed // report that the loop predication has been actually performed
// for this loop // for this loop
if (TraceLoopPredicate && hoisted) { if (TraceLoopPredicate && hoisted) {
tty->print("Loop Predication Performed:"); tty->print("Loop Predication Performed:");
loop->dump_head(); loop->dump_head();
} }
#endif #endif
return hoisted; return hoisted;

View file

@ -821,7 +821,7 @@ public:
BoolNode* rc_predicate(Node* ctrl, BoolNode* rc_predicate(Node* ctrl,
int scale, Node* offset, int scale, Node* offset,
Node* init, Node* limit, Node* stride, Node* init, Node* limit, Node* stride,
Node* range); Node* range, bool upper);
// Implementation of the loop predication to promote checks outside the loop // Implementation of the loop predication to promote checks outside the loop
bool loop_predication_impl(IdealLoopTree *loop); bool loop_predication_impl(IdealLoopTree *loop);

View file

@ -864,7 +864,7 @@ JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* t
thread->set_exception_handler_pc(handler_address); thread->set_exception_handler_pc(handler_address);
thread->set_exception_stack_size(0); thread->set_exception_stack_size(0);
// Check if the exception PC is a MethodHandle call. // Check if the exception PC is a MethodHandle call site.
thread->set_is_method_handle_exception(nm->is_method_handle_return(pc)); thread->set_is_method_handle_exception(nm->is_method_handle_return(pc));
} }
@ -952,7 +952,7 @@ address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address r
thread->set_vm_result(exception); thread->set_vm_result(exception);
// Frame not compiled (handles deoptimization blob) // Frame not compiled (handles deoptimization blob)
return SharedRuntime::raw_exception_handler_for_return_address(ret_pc); return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2008-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2008-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -82,6 +82,10 @@ const char* MethodHandles::_entry_names[_EK_LIMIT+1] = {
NULL NULL
}; };
// Adapters.
MethodHandlesAdapterBlob* MethodHandles::_adapter_code = NULL;
int MethodHandles::_adapter_code_size = StubRoutines::method_handles_adapters_code_size;
jobject MethodHandles::_raise_exception_method; jobject MethodHandles::_raise_exception_method;
#ifdef ASSERT #ifdef ASSERT
@ -95,6 +99,41 @@ bool MethodHandles::spot_check_entry_names() {
} }
#endif #endif
//------------------------------------------------------------------------------
// MethodHandles::generate_adapters
//
void MethodHandles::generate_adapters() {
if (!EnableMethodHandles || SystemDictionary::MethodHandle_klass() == NULL) return;
assert(_adapter_code == NULL, "generate only once");
ResourceMark rm;
TraceTime timer("MethodHandles adapters generation", TraceStartupTime);
_adapter_code = MethodHandlesAdapterBlob::create(_adapter_code_size);
if (_adapter_code == NULL)
vm_exit_out_of_memory(_adapter_code_size, "CodeCache: no room for MethodHandles adapters");
CodeBuffer code(_adapter_code->instructions_begin(), _adapter_code->instructions_size());
MethodHandlesAdapterGenerator g(&code);
g.generate();
}
//------------------------------------------------------------------------------
// MethodHandlesAdapterGenerator::generate
//
void MethodHandlesAdapterGenerator::generate() {
// Generate generic method handle adapters.
for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
ek < MethodHandles::_EK_LIMIT;
ek = MethodHandles::EntryKind(1 + (int)ek)) {
StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
MethodHandles::generate_method_handle_stub(_masm, ek);
}
}
void MethodHandles::set_enabled(bool z) { void MethodHandles::set_enabled(bool z) {
if (_enabled != z) { if (_enabled != z) {
guarantee(z && EnableMethodHandles, "can only enable once, and only if -XX:+EnableMethodHandles"); guarantee(z && EnableMethodHandles, "can only enable once, and only if -XX:+EnableMethodHandles");

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2008-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2008-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -115,6 +115,10 @@ class MethodHandles: AllStatic {
static const char* _entry_names[_EK_LIMIT+1]; static const char* _entry_names[_EK_LIMIT+1];
static jobject _raise_exception_method; static jobject _raise_exception_method;
// Adapters.
static MethodHandlesAdapterBlob* _adapter_code;
static int _adapter_code_size;
static bool ek_valid(EntryKind ek) { return (uint)ek < (uint)_EK_LIMIT; } static bool ek_valid(EntryKind ek) { return (uint)ek < (uint)_EK_LIMIT; }
static bool conv_op_valid(int op) { return (uint)op < (uint)CONV_OP_LIMIT; } static bool conv_op_valid(int op) { return (uint)op < (uint)CONV_OP_LIMIT; }
@ -133,6 +137,43 @@ class MethodHandles: AllStatic {
_entries[ek] = me; _entries[ek] = me;
} }
// Some adapter helper functions.
static void get_ek_bound_mh_info(EntryKind ek, BasicType& arg_type, int& arg_mask, int& arg_slots) {
switch (ek) {
case _bound_int_mh : // fall-thru
case _bound_int_direct_mh : arg_type = T_INT; arg_mask = _INSERT_INT_MASK; break;
case _bound_long_mh : // fall-thru
case _bound_long_direct_mh: arg_type = T_LONG; arg_mask = _INSERT_LONG_MASK; break;
case _bound_ref_mh : // fall-thru
case _bound_ref_direct_mh : arg_type = T_OBJECT; arg_mask = _INSERT_REF_MASK; break;
default: ShouldNotReachHere();
}
arg_slots = type2size[arg_type];
}
static void get_ek_adapter_opt_swap_rot_info(EntryKind ek, int& swap_bytes, int& rotate) {
int swap_slots = 0;
switch (ek) {
case _adapter_opt_swap_1: swap_slots = 1; rotate = 0; break;
case _adapter_opt_swap_2: swap_slots = 2; rotate = 0; break;
case _adapter_opt_rot_1_up: swap_slots = 1; rotate = 1; break;
case _adapter_opt_rot_1_down: swap_slots = 1; rotate = -1; break;
case _adapter_opt_rot_2_up: swap_slots = 2; rotate = 1; break;
case _adapter_opt_rot_2_down: swap_slots = 2; rotate = -1; break;
default: ShouldNotReachHere();
}
// Return the size of the stack slots to move in bytes.
swap_bytes = swap_slots * Interpreter::stackElementSize();
}
static int get_ek_adapter_opt_spread_info(EntryKind ek) {
switch (ek) {
case _adapter_opt_spread_0: return 0;
case _adapter_opt_spread_1: return 1;
default : return -1;
}
}
static methodOop raise_exception_method() { static methodOop raise_exception_method() {
oop rem = JNIHandles::resolve(_raise_exception_method); oop rem = JNIHandles::resolve(_raise_exception_method);
assert(rem == NULL || rem->is_method(), ""); assert(rem == NULL || rem->is_method(), "");
@ -230,7 +271,10 @@ class MethodHandles: AllStatic {
// bit values for suppress argument to expand_MemberName: // bit values for suppress argument to expand_MemberName:
enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 }; enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 };
// called from InterpreterGenerator and StubGenerator // Generate MethodHandles adapters.
static void generate_adapters();
// Called from InterpreterGenerator and MethodHandlesAdapterGenerator.
static address generate_method_handle_interpreter_entry(MacroAssembler* _masm); static address generate_method_handle_interpreter_entry(MacroAssembler* _masm);
static void generate_method_handle_stub(MacroAssembler* _masm, EntryKind ek); static void generate_method_handle_stub(MacroAssembler* _masm, EntryKind ek);
@ -385,13 +429,13 @@ class MethodHandles: AllStatic {
static void insert_arg_slots(MacroAssembler* _masm, static void insert_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots, RegisterOrConstant arg_slots,
int arg_mask, int arg_mask,
Register rax_argslot, Register argslot_reg,
Register rbx_temp, Register rdx_temp); Register temp_reg, Register temp2_reg);
static void remove_arg_slots(MacroAssembler* _masm, static void remove_arg_slots(MacroAssembler* _masm,
RegisterOrConstant arg_slots, RegisterOrConstant arg_slots,
Register rax_argslot, Register argslot_reg,
Register rbx_temp, Register rdx_temp); Register temp_reg, Register temp2_reg);
}; };
@ -447,3 +491,14 @@ class MethodHandleEntry {
address MethodHandles::from_compiled_entry(EntryKind ek) { return entry(ek)->from_compiled_entry(); } address MethodHandles::from_compiled_entry(EntryKind ek) { return entry(ek)->from_compiled_entry(); }
address MethodHandles::from_interpreted_entry(EntryKind ek) { return entry(ek)->from_interpreted_entry(); } address MethodHandles::from_interpreted_entry(EntryKind ek) { return entry(ek)->from_interpreted_entry(); }
//------------------------------------------------------------------------------
// MethodHandlesAdapterGenerator
//
class MethodHandlesAdapterGenerator : public StubCodeGenerator {
public:
MethodHandlesAdapterGenerator(CodeBuffer* code) : StubCodeGenerator(code) {}
void generate();
};

View file

@ -1346,9 +1346,7 @@ void Arguments::set_g1_gc_flags() {
} }
if (FLAG_IS_DEFAULT(MarkStackSize)) { if (FLAG_IS_DEFAULT(MarkStackSize)) {
// Size as a multiple of TaskQueueSuper::N which is larger FLAG_SET_DEFAULT(MarkStackSize, 128 * TASKQUEUE_SIZE);
// for 64-bit.
FLAG_SET_DEFAULT(MarkStackSize, 128 * TaskQueueSuper::total_size());
} }
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk", tty->print_cr("MarkStackSize: %uk MarkStackSizeMax: %uk",
@ -2859,6 +2857,12 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
} }
#endif // _LP64 #endif // _LP64
// MethodHandles code does not support TaggedStackInterpreter.
if (EnableMethodHandles && TaggedStackInterpreter) {
warning("TaggedStackInterpreter is not supported by MethodHandles code. Disabling TaggedStackInterpreter.");
TaggedStackInterpreter = false;
}
// Check the GC selections again. // Check the GC selections again.
if (!check_gc_consistency()) { if (!check_gc_consistency()) {
return JNI_EINVAL; return JNI_EINVAL;

View file

@ -1795,6 +1795,10 @@ class CommandLineFlags {
product(uintx, PreserveMarkStackSize, 1024, \ product(uintx, PreserveMarkStackSize, 1024, \
"Size for stack used in promotion failure handling") \ "Size for stack used in promotion failure handling") \
\ \
develop(uintx, ObjArrayMarkingStride, 512, \
"Number of ObjArray elements to push onto the marking stack" \
"before pushing a continuation entry") \
\
product_pd(bool, UseTLAB, "Use thread-local object allocation") \ product_pd(bool, UseTLAB, "Use thread-local object allocation") \
\ \
product_pd(bool, ResizeTLAB, \ product_pd(bool, ResizeTLAB, \

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -118,6 +118,9 @@ jint init_globals() {
javaClasses_init(); // must happen after vtable initialization javaClasses_init(); // must happen after vtable initialization
stubRoutines_init2(); // note: StubRoutines need 2-phase init stubRoutines_init2(); // note: StubRoutines need 2-phase init
// Generate MethodHandles adapters.
MethodHandles::generate_adapters();
// Although we'd like to, we can't easily do a heap verify // Although we'd like to, we can't easily do a heap verify
// here because the main thread isn't yet a JavaThread, so // here because the main thread isn't yet a JavaThread, so
// its TLAB may not be made parseable from the usual interfaces. // its TLAB may not be made parseable from the usual interfaces.

View file

@ -256,7 +256,7 @@ JRT_END
// The continuation address is the entry point of the exception handler of the // The continuation address is the entry point of the exception handler of the
// previous frame depending on the return address. // previous frame depending on the return address.
address SharedRuntime::raw_exception_handler_for_return_address(address return_address) { address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
assert(frame::verify_return_pc(return_address), "must be a return pc"); assert(frame::verify_return_pc(return_address), "must be a return pc");
// the fastest case first // the fastest case first
@ -264,6 +264,8 @@ address SharedRuntime::raw_exception_handler_for_return_address(address return_a
if (blob != NULL && blob->is_nmethod()) { if (blob != NULL && blob->is_nmethod()) {
nmethod* code = (nmethod*)blob; nmethod* code = (nmethod*)blob;
assert(code != NULL, "nmethod must be present"); assert(code != NULL, "nmethod must be present");
// Check if the return address is a MethodHandle call site.
thread->set_is_method_handle_exception(code->is_method_handle_return(return_address));
// native nmethods don't have exception handlers // native nmethods don't have exception handlers
assert(!code->is_native_method(), "no exception handler"); assert(!code->is_native_method(), "no exception handler");
assert(code->header_begin() != code->exception_begin(), "no exception handler"); assert(code->header_begin() != code->exception_begin(), "no exception handler");
@ -289,6 +291,8 @@ address SharedRuntime::raw_exception_handler_for_return_address(address return_a
if (blob->is_nmethod()) { if (blob->is_nmethod()) {
nmethod* code = (nmethod*)blob; nmethod* code = (nmethod*)blob;
assert(code != NULL, "nmethod must be present"); assert(code != NULL, "nmethod must be present");
// Check if the return address is a MethodHandle call site.
thread->set_is_method_handle_exception(code->is_method_handle_return(return_address));
assert(code->header_begin() != code->exception_begin(), "no exception handler"); assert(code->header_begin() != code->exception_begin(), "no exception handler");
return code->exception_begin(); return code->exception_begin();
} }
@ -309,10 +313,11 @@ address SharedRuntime::raw_exception_handler_for_return_address(address return_a
} }
JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(address return_address)) JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address))
return raw_exception_handler_for_return_address(return_address); return raw_exception_handler_for_return_address(thread, return_address);
JRT_END JRT_END
address SharedRuntime::get_poll_stub(address pc) { address SharedRuntime::get_poll_stub(address pc) {
address stub; address stub;
// Look up the code blob // Look up the code blob
@ -465,16 +470,6 @@ address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc,
t = table.entry_for(catch_pco, -1, 0); t = table.entry_for(catch_pco, -1, 0);
} }
#ifdef COMPILER1
if (nm->is_compiled_by_c1() && t == NULL && handler_bci == -1) {
// Exception is not handled by this frame so unwind. Note that
// this is not the same as how C2 does this. C2 emits a table
// entry that dispatches to the unwind code in the nmethod.
return NULL;
}
#endif /* COMPILER1 */
if (t == NULL) { if (t == NULL) {
tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci); tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci);
tty->print_cr(" Exception:"); tty->print_cr(" Exception:");
@ -587,7 +582,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
// 3. Implict null exception in nmethod // 3. Implict null exception in nmethod
if (!cb->is_nmethod()) { if (!cb->is_nmethod()) {
guarantee(cb->is_adapter_blob(), guarantee(cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(),
"exception happened outside interpreter, nmethods and vtable stubs (1)"); "exception happened outside interpreter, nmethods and vtable stubs (1)");
// There is no handler here, so we will simply unwind. // There is no handler here, so we will simply unwind.
return StubRoutines::throw_NullPointerException_at_call_entry(); return StubRoutines::throw_NullPointerException_at_call_entry();
@ -892,12 +887,13 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
RegisterMap cbl_map(thread, false); RegisterMap cbl_map(thread, false);
frame caller_frame = thread->last_frame().sender(&cbl_map); frame caller_frame = thread->last_frame().sender(&cbl_map);
CodeBlob* cb = caller_frame.cb(); CodeBlob* caller_cb = caller_frame.cb();
guarantee(cb != NULL && cb->is_nmethod(), "must be called from nmethod"); guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod");
nmethod* caller_nm = caller_cb->as_nmethod_or_null();
// make sure caller is not getting deoptimized // make sure caller is not getting deoptimized
// and removed before we are done with it. // and removed before we are done with it.
// CLEANUP - with lazy deopt shouldn't need this lock // CLEANUP - with lazy deopt shouldn't need this lock
nmethodLocker caller_lock((nmethod*)cb); nmethodLocker caller_lock(caller_nm);
// determine call info & receiver // determine call info & receiver
@ -929,6 +925,13 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
} }
#endif #endif
// JSR 292
// If the resolved method is a MethodHandle invoke target the call
// site must be a MethodHandle call site.
if (callee_method->is_method_handle_invoke()) {
assert(caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
}
// Compute entry points. This might require generation of C2I converter // Compute entry points. This might require generation of C2I converter
// frames, so we cannot be holding any locks here. Furthermore, the // frames, so we cannot be holding any locks here. Furthermore, the
// computation of the entry points is independent of patching the call. We // computation of the entry points is independent of patching the call. We
@ -940,13 +943,12 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
StaticCallInfo static_call_info; StaticCallInfo static_call_info;
CompiledICInfo virtual_call_info; CompiledICInfo virtual_call_info;
// Make sure the callee nmethod does not get deoptimized and removed before // Make sure the callee nmethod does not get deoptimized and removed before
// we are done patching the code. // we are done patching the code.
nmethod* nm = callee_method->code(); nmethod* callee_nm = callee_method->code();
nmethodLocker nl_callee(nm); nmethodLocker nl_callee(callee_nm);
#ifdef ASSERT #ifdef ASSERT
address dest_entry_point = nm == NULL ? 0 : nm->entry_point(); // used below address dest_entry_point = callee_nm == NULL ? 0 : callee_nm->entry_point(); // used below
#endif #endif
if (is_virtual) { if (is_virtual) {
@ -2077,7 +2079,6 @@ class AdapterHandlerTableIterator : public StackObj {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Implementation of AdapterHandlerLibrary // Implementation of AdapterHandlerLibrary
const char* AdapterHandlerEntry::name = "I2C/C2I adapters";
AdapterHandlerTable* AdapterHandlerLibrary::_adapters = NULL; AdapterHandlerTable* AdapterHandlerLibrary::_adapters = NULL;
AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL; AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL;
const int AdapterHandlerLibrary_size = 16*K; const int AdapterHandlerLibrary_size = 16*K;
@ -2129,7 +2130,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
ResourceMark rm; ResourceMark rm;
NOT_PRODUCT(int code_size); NOT_PRODUCT(int code_size);
BufferBlob *B = NULL; AdapterBlob* B = NULL;
AdapterHandlerEntry* entry = NULL; AdapterHandlerEntry* entry = NULL;
AdapterFingerPrint* fingerprint = NULL; AdapterFingerPrint* fingerprint = NULL;
{ {
@ -2179,7 +2180,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
// Create I2C & C2I handlers // Create I2C & C2I handlers
BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
if (buf != NULL) { if (buf != NULL) {
CodeBuffer buffer(buf->instructions_begin(), buf->instructions_size()); CodeBuffer buffer(buf->instructions_begin(), buf->instructions_size());
short buffer_locs[20]; short buffer_locs[20];
@ -2208,7 +2209,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
} }
#endif #endif
B = BufferBlob::create(AdapterHandlerEntry::name, &buffer); B = AdapterBlob::create(&buffer);
NOT_PRODUCT(code_size = buffer.code_size()); NOT_PRODUCT(code_size = buffer.code_size());
} }
if (B == NULL) { if (B == NULL) {
@ -2240,7 +2241,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
jio_snprintf(blob_id, jio_snprintf(blob_id,
sizeof(blob_id), sizeof(blob_id),
"%s(%s)@" PTR_FORMAT, "%s(%s)@" PTR_FORMAT,
AdapterHandlerEntry::name, B->name(),
fingerprint->as_string(), fingerprint->as_string(),
B->instructions_begin()); B->instructions_begin());
VTune::register_stub(blob_id, B->instructions_begin(), B->instructions_end()); VTune::register_stub(blob_id, B->instructions_begin(), B->instructions_end());

View file

@ -96,10 +96,9 @@ class SharedRuntime: AllStatic {
static jdouble dexp(jdouble x); static jdouble dexp(jdouble x);
static jdouble dpow(jdouble x, jdouble y); static jdouble dpow(jdouble x, jdouble y);
// exception handling across interpreter/compiler boundaries // exception handling across interpreter/compiler boundaries
static address raw_exception_handler_for_return_address(address return_address); static address raw_exception_handler_for_return_address(JavaThread* thread, address return_address);
static address exception_handler_for_return_address(address return_address); static address exception_handler_for_return_address(JavaThread* thread, address return_address);
#ifndef SERIALGC #ifndef SERIALGC
// G1 write barriers // G1 write barriers
@ -568,9 +567,6 @@ class AdapterHandlerEntry : public BasicHashtableEntry {
AdapterHandlerEntry(); AdapterHandlerEntry();
public: public:
// The name we give all buffer blobs
static const char* name;
address get_i2c_entry() { return _i2c_entry; } address get_i2c_entry() { return _i2c_entry; }
address get_c2i_entry() { return _c2i_entry; } address get_c2i_entry() { return _c2i_entry; }
address get_c2i_unverified_entry() { return _c2i_unverified_entry; } address get_c2i_unverified_entry() { return _c2i_unverified_entry; }

View file

@ -223,7 +223,7 @@ void vframeArrayElement::unpack_on_stack(int callee_parameters,
break; break;
case Deoptimization::Unpack_exception: case Deoptimization::Unpack_exception:
// exception is pending // exception is pending
pc = SharedRuntime::raw_exception_handler_for_return_address(pc); pc = SharedRuntime::raw_exception_handler_for_return_address(thread, pc);
// [phh] We're going to end up in some handler or other, so it doesn't // [phh] We're going to end up in some handler or other, so it doesn't
// matter what mdp we point to. See exception_handler_for_exception() // matter what mdp we point to. See exception_handler_for_exception()
// in interpreterRuntime.cpp. // in interpreterRuntime.cpp.

View file

@ -827,6 +827,8 @@ const int badCodeHeapFreeVal = 0xDD; // value used to zap
#define badHeapWord (::badHeapWordVal) #define badHeapWord (::badHeapWordVal)
#define badJNIHandle ((oop)::badJNIHandleVal) #define badJNIHandle ((oop)::badJNIHandleVal)
// Default TaskQueue size is 16K (32-bit) or 128K (64-bit)
#define TASKQUEUE_SIZE (NOT_LP64(1<<14) LP64_ONLY(1<<17))
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// Utility functions for bitfield manipulations // Utility functions for bitfield manipulations

View file

@ -31,10 +31,6 @@ uint ParallelTaskTerminator::_total_spins = 0;
uint ParallelTaskTerminator::_total_peeks = 0; uint ParallelTaskTerminator::_total_peeks = 0;
#endif #endif
bool TaskQueueSuper::peek() {
return _bottom != _age.top();
}
int TaskQueueSetSuper::randomParkAndMiller(int *seed0) { int TaskQueueSetSuper::randomParkAndMiller(int *seed0) {
const int a = 16807; const int a = 16807;
const int m = 2147483647; const int m = 2147483647;
@ -180,6 +176,13 @@ void ParallelTaskTerminator::reset_for_reuse() {
} }
} }
#ifdef ASSERT
bool ObjArrayTask::is_valid() const {
return _obj != NULL && _obj->is_objArray() && _index > 0 &&
_index < objArrayOop(_obj)->length();
}
#endif // ASSERT
bool RegionTaskQueueWithOverflow::is_empty() { bool RegionTaskQueueWithOverflow::is_empty() {
return (_region_queue.size() == 0) && return (_region_queue.size() == 0) &&
(_overflow_stack->length() == 0); (_overflow_stack->length() == 0);

View file

@ -22,6 +22,7 @@
* *
*/ */
template <unsigned int N>
class TaskQueueSuper: public CHeapObj { class TaskQueueSuper: public CHeapObj {
protected: protected:
// Internal type for indexing the queue; also used for the tag. // Internal type for indexing the queue; also used for the tag.
@ -30,10 +31,7 @@ protected:
// The first free element after the last one pushed (mod N). // The first free element after the last one pushed (mod N).
volatile uint _bottom; volatile uint _bottom;
enum { enum { MOD_N_MASK = N - 1 };
N = 1 << NOT_LP64(14) LP64_ONLY(17), // Queue size: 16K or 128K
MOD_N_MASK = N - 1 // To compute x mod N efficiently.
};
class Age { class Age {
public: public:
@ -84,12 +82,12 @@ protected:
// Returns a number in the range [0..N). If the result is "N-1", it should be // Returns a number in the range [0..N). If the result is "N-1", it should be
// interpreted as 0. // interpreted as 0.
uint dirty_size(uint bot, uint top) { uint dirty_size(uint bot, uint top) const {
return (bot - top) & MOD_N_MASK; return (bot - top) & MOD_N_MASK;
} }
// Returns the size corresponding to the given "bot" and "top". // Returns the size corresponding to the given "bot" and "top".
uint size(uint bot, uint top) { uint size(uint bot, uint top) const {
uint sz = dirty_size(bot, top); uint sz = dirty_size(bot, top);
// Has the queue "wrapped", so that bottom is less than top? There's a // Has the queue "wrapped", so that bottom is less than top? There's a
// complicated special case here. A pair of threads could perform pop_local // complicated special case here. A pair of threads could perform pop_local
@ -111,17 +109,17 @@ protected:
public: public:
TaskQueueSuper() : _bottom(0), _age() {} TaskQueueSuper() : _bottom(0), _age() {}
// Return "true" if the TaskQueue contains any tasks. // Return true if the TaskQueue contains any tasks.
bool peek(); bool peek() { return _bottom != _age.top(); }
// Return an estimate of the number of elements in the queue. // Return an estimate of the number of elements in the queue.
// The "careful" version admits the possibility of pop_local/pop_global // The "careful" version admits the possibility of pop_local/pop_global
// races. // races.
uint size() { uint size() const {
return size(_bottom, _age.top()); return size(_bottom, _age.top());
} }
uint dirty_size() { uint dirty_size() const {
return dirty_size(_bottom, _age.top()); return dirty_size(_bottom, _age.top());
} }
@ -132,19 +130,36 @@ public:
// Maximum number of elements allowed in the queue. This is two less // Maximum number of elements allowed in the queue. This is two less
// than the actual queue size, for somewhat complicated reasons. // than the actual queue size, for somewhat complicated reasons.
uint max_elems() { return N - 2; } uint max_elems() const { return N - 2; }
// Total size of queue. // Total size of queue.
static const uint total_size() { return N; } static const uint total_size() { return N; }
}; };
template<class E> class GenericTaskQueue: public TaskQueueSuper { template<class E, unsigned int N = TASKQUEUE_SIZE>
class GenericTaskQueue: public TaskQueueSuper<N> {
protected:
typedef typename TaskQueueSuper<N>::Age Age;
typedef typename TaskQueueSuper<N>::idx_t idx_t;
using TaskQueueSuper<N>::_bottom;
using TaskQueueSuper<N>::_age;
using TaskQueueSuper<N>::increment_index;
using TaskQueueSuper<N>::decrement_index;
using TaskQueueSuper<N>::dirty_size;
public:
using TaskQueueSuper<N>::max_elems;
using TaskQueueSuper<N>::size;
private: private:
// Slow paths for push, pop_local. (pop_global has no fast path.) // Slow paths for push, pop_local. (pop_global has no fast path.)
bool push_slow(E t, uint dirty_n_elems); bool push_slow(E t, uint dirty_n_elems);
bool pop_local_slow(uint localBot, Age oldAge); bool pop_local_slow(uint localBot, Age oldAge);
public: public:
typedef E element_type;
// Initializes the queue to empty. // Initializes the queue to empty.
GenericTaskQueue(); GenericTaskQueue();
@ -175,19 +190,19 @@ private:
volatile E* _elems; volatile E* _elems;
}; };
template<class E> template<class E, unsigned int N>
GenericTaskQueue<E>::GenericTaskQueue():TaskQueueSuper() { GenericTaskQueue<E, N>::GenericTaskQueue() {
assert(sizeof(Age) == sizeof(size_t), "Depends on this."); assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
} }
template<class E> template<class E, unsigned int N>
void GenericTaskQueue<E>::initialize() { void GenericTaskQueue<E, N>::initialize() {
_elems = NEW_C_HEAP_ARRAY(E, N); _elems = NEW_C_HEAP_ARRAY(E, N);
guarantee(_elems != NULL, "Allocation failed."); guarantee(_elems != NULL, "Allocation failed.");
} }
template<class E> template<class E, unsigned int N>
void GenericTaskQueue<E>::oops_do(OopClosure* f) { void GenericTaskQueue<E, N>::oops_do(OopClosure* f) {
// tty->print_cr("START OopTaskQueue::oops_do"); // tty->print_cr("START OopTaskQueue::oops_do");
uint iters = size(); uint iters = size();
uint index = _bottom; uint index = _bottom;
@ -203,21 +218,21 @@ void GenericTaskQueue<E>::oops_do(OopClosure* f) {
// tty->print_cr("END OopTaskQueue::oops_do"); // tty->print_cr("END OopTaskQueue::oops_do");
} }
template<class E, unsigned int N>
template<class E> bool GenericTaskQueue<E, N>::push_slow(E t, uint dirty_n_elems) {
bool GenericTaskQueue<E>::push_slow(E t, uint dirty_n_elems) {
if (dirty_n_elems == N - 1) { if (dirty_n_elems == N - 1) {
// Actually means 0, so do the push. // Actually means 0, so do the push.
uint localBot = _bottom; uint localBot = _bottom;
_elems[localBot] = t; // g++ complains if the volatile result of the assignment is unused.
const_cast<E&>(_elems[localBot] = t);
OrderAccess::release_store(&_bottom, increment_index(localBot)); OrderAccess::release_store(&_bottom, increment_index(localBot));
return true; return true;
} }
return false; return false;
} }
template<class E> template<class E, unsigned int N>
bool GenericTaskQueue<E>:: bool GenericTaskQueue<E, N>::
pop_local_slow(uint localBot, Age oldAge) { pop_local_slow(uint localBot, Age oldAge) {
// This queue was observed to contain exactly one element; either this // This queue was observed to contain exactly one element; either this
// thread will claim it, or a competing "pop_global". In either case, // thread will claim it, or a competing "pop_global". In either case,
@ -249,8 +264,8 @@ pop_local_slow(uint localBot, Age oldAge) {
return false; return false;
} }
template<class E> template<class E, unsigned int N>
bool GenericTaskQueue<E>::pop_global(E& t) { bool GenericTaskQueue<E, N>::pop_global(E& t) {
Age oldAge = _age.get(); Age oldAge = _age.get();
uint localBot = _bottom; uint localBot = _bottom;
uint n_elems = size(localBot, oldAge.top()); uint n_elems = size(localBot, oldAge.top());
@ -258,7 +273,7 @@ bool GenericTaskQueue<E>::pop_global(E& t) {
return false; return false;
} }
t = _elems[oldAge.top()]; const_cast<E&>(t = _elems[oldAge.top()]);
Age newAge(oldAge); Age newAge(oldAge);
newAge.increment(); newAge.increment();
Age resAge = _age.cmpxchg(newAge, oldAge); Age resAge = _age.cmpxchg(newAge, oldAge);
@ -269,8 +284,8 @@ bool GenericTaskQueue<E>::pop_global(E& t) {
return resAge == oldAge; return resAge == oldAge;
} }
template<class E> template<class E, unsigned int N>
GenericTaskQueue<E>::~GenericTaskQueue() { GenericTaskQueue<E, N>::~GenericTaskQueue() {
FREE_C_HEAP_ARRAY(E, _elems); FREE_C_HEAP_ARRAY(E, _elems);
} }
@ -283,16 +298,18 @@ public:
virtual bool peek() = 0; virtual bool peek() = 0;
}; };
template<class E> class GenericTaskQueueSet: public TaskQueueSetSuper { template<class T>
class GenericTaskQueueSet: public TaskQueueSetSuper {
private: private:
uint _n; uint _n;
GenericTaskQueue<E>** _queues; T** _queues;
public: public:
typedef typename T::element_type E;
GenericTaskQueueSet(int n) : _n(n) { GenericTaskQueueSet(int n) : _n(n) {
typedef GenericTaskQueue<E>* GenericTaskQueuePtr; typedef T* GenericTaskQueuePtr;
_queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n); _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n);
guarantee(_queues != NULL, "Allocation failure.");
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
_queues[i] = NULL; _queues[i] = NULL;
} }
@ -302,9 +319,9 @@ public:
bool steal_best_of_2(uint queue_num, int* seed, E& t); bool steal_best_of_2(uint queue_num, int* seed, E& t);
bool steal_best_of_all(uint queue_num, int* seed, E& t); bool steal_best_of_all(uint queue_num, int* seed, E& t);
void register_queue(uint i, GenericTaskQueue<E>* q); void register_queue(uint i, T* q);
GenericTaskQueue<E>* queue(uint n); T* queue(uint n);
// The thread with queue number "queue_num" (and whose random number seed // The thread with queue number "queue_num" (and whose random number seed
// is at "seed") is trying to steal a task from some other queue. (It // is at "seed") is trying to steal a task from some other queue. (It
@ -316,27 +333,27 @@ public:
bool peek(); bool peek();
}; };
template<class E> template<class T> void
void GenericTaskQueueSet<E>::register_queue(uint i, GenericTaskQueue<E>* q) { GenericTaskQueueSet<T>::register_queue(uint i, T* q) {
assert(i < _n, "index out of range."); assert(i < _n, "index out of range.");
_queues[i] = q; _queues[i] = q;
} }
template<class E> template<class T> T*
GenericTaskQueue<E>* GenericTaskQueueSet<E>::queue(uint i) { GenericTaskQueueSet<T>::queue(uint i) {
return _queues[i]; return _queues[i];
} }
template<class E> template<class T> bool
bool GenericTaskQueueSet<E>::steal(uint queue_num, int* seed, E& t) { GenericTaskQueueSet<T>::steal(uint queue_num, int* seed, E& t) {
for (uint i = 0; i < 2 * _n; i++) for (uint i = 0; i < 2 * _n; i++)
if (steal_best_of_2(queue_num, seed, t)) if (steal_best_of_2(queue_num, seed, t))
return true; return true;
return false; return false;
} }
template<class E> template<class T> bool
bool GenericTaskQueueSet<E>::steal_best_of_all(uint queue_num, int* seed, E& t) { GenericTaskQueueSet<T>::steal_best_of_all(uint queue_num, int* seed, E& t) {
if (_n > 2) { if (_n > 2) {
int best_k; int best_k;
uint best_sz = 0; uint best_sz = 0;
@ -359,8 +376,8 @@ bool GenericTaskQueueSet<E>::steal_best_of_all(uint queue_num, int* seed, E& t)
} }
} }
template<class E> template<class T> bool
bool GenericTaskQueueSet<E>::steal_1_random(uint queue_num, int* seed, E& t) { GenericTaskQueueSet<T>::steal_1_random(uint queue_num, int* seed, E& t) {
if (_n > 2) { if (_n > 2) {
uint k = queue_num; uint k = queue_num;
while (k == queue_num) k = randomParkAndMiller(seed) % _n; while (k == queue_num) k = randomParkAndMiller(seed) % _n;
@ -375,8 +392,8 @@ bool GenericTaskQueueSet<E>::steal_1_random(uint queue_num, int* seed, E& t) {
} }
} }
template<class E> template<class T> bool
bool GenericTaskQueueSet<E>::steal_best_of_2(uint queue_num, int* seed, E& t) { GenericTaskQueueSet<T>::steal_best_of_2(uint queue_num, int* seed, E& t) {
if (_n > 2) { if (_n > 2) {
uint k1 = queue_num; uint k1 = queue_num;
while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n; while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n;
@ -397,8 +414,8 @@ bool GenericTaskQueueSet<E>::steal_best_of_2(uint queue_num, int* seed, E& t) {
} }
} }
template<class E> template<class T>
bool GenericTaskQueueSet<E>::peek() { bool GenericTaskQueueSet<T>::peek() {
// Try all the queues. // Try all the queues.
for (uint j = 0; j < _n; j++) { for (uint j = 0; j < _n; j++) {
if (_queues[j]->peek()) if (_queues[j]->peek())
@ -468,14 +485,16 @@ public:
#endif #endif
}; };
template<class E> inline bool GenericTaskQueue<E>::push(E t) { template<class E, unsigned int N> inline bool
GenericTaskQueue<E, N>::push(E t) {
uint localBot = _bottom; uint localBot = _bottom;
assert((localBot >= 0) && (localBot < N), "_bottom out of range."); assert((localBot >= 0) && (localBot < N), "_bottom out of range.");
idx_t top = _age.top(); idx_t top = _age.top();
uint dirty_n_elems = dirty_size(localBot, top); uint dirty_n_elems = dirty_size(localBot, top);
assert((dirty_n_elems >= 0) && (dirty_n_elems < N), "n_elems out of range."); assert(dirty_n_elems < N, "n_elems out of range.");
if (dirty_n_elems < max_elems()) { if (dirty_n_elems < max_elems()) {
_elems[localBot] = t; // g++ complains if the volatile result of the assignment is unused.
const_cast<E&>(_elems[localBot] = t);
OrderAccess::release_store(&_bottom, increment_index(localBot)); OrderAccess::release_store(&_bottom, increment_index(localBot));
return true; return true;
} else { } else {
@ -483,7 +502,8 @@ template<class E> inline bool GenericTaskQueue<E>::push(E t) {
} }
} }
template<class E> inline bool GenericTaskQueue<E>::pop_local(E& t) { template<class E, unsigned int N> inline bool
GenericTaskQueue<E, N>::pop_local(E& t) {
uint localBot = _bottom; uint localBot = _bottom;
// This value cannot be N-1. That can only occur as a result of // This value cannot be N-1. That can only occur as a result of
// the assignment to bottom in this method. If it does, this method // the assignment to bottom in this method. If it does, this method
@ -497,7 +517,7 @@ template<class E> inline bool GenericTaskQueue<E>::pop_local(E& t) {
// This is necessary to prevent any read below from being reordered // This is necessary to prevent any read below from being reordered
// before the store just above. // before the store just above.
OrderAccess::fence(); OrderAccess::fence();
t = _elems[localBot]; const_cast<E&>(t = _elems[localBot]);
// This is a second read of "age"; the "size()" above is the first. // This is a second read of "age"; the "size()" above is the first.
// If there's still at least one element in the queue, based on the // If there's still at least one element in the queue, based on the
// "_bottom" and "age" we've read, then there can be no interference with // "_bottom" and "age" we've read, then there can be no interference with
@ -514,17 +534,23 @@ template<class E> inline bool GenericTaskQueue<E>::pop_local(E& t) {
} }
typedef oop Task; typedef oop Task;
typedef GenericTaskQueue<Task> OopTaskQueue; typedef GenericTaskQueue<Task> OopTaskQueue;
typedef GenericTaskQueueSet<Task> OopTaskQueueSet; typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
#ifdef _MSC_VER
#define COMPRESSED_OOP_MASK 1 #pragma warning(push)
// warning C4522: multiple assignment operators specified
#pragma warning(disable:4522)
#endif
// This is a container class for either an oop* or a narrowOop*. // This is a container class for either an oop* or a narrowOop*.
// Both are pushed onto a task queue and the consumer will test is_narrow() // Both are pushed onto a task queue and the consumer will test is_narrow()
// to determine which should be processed. // to determine which should be processed.
class StarTask { class StarTask {
void* _holder; // either union oop* or narrowOop* void* _holder; // either union oop* or narrowOop*
enum { COMPRESSED_OOP_MASK = 1 };
public: public:
StarTask(narrowOop* p) { StarTask(narrowOop* p) {
assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!"); assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
@ -540,20 +566,61 @@ class StarTask {
return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK); return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
} }
// Operators to preserve const/volatile in assignments required by gcc StarTask& operator=(const StarTask& t) {
void operator=(const volatile StarTask& t) volatile { _holder = t._holder; } _holder = t._holder;
return *this;
}
volatile StarTask& operator=(const volatile StarTask& t) volatile {
_holder = t._holder;
return *this;
}
bool is_narrow() const { bool is_narrow() const {
return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0); return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
} }
}; };
typedef GenericTaskQueue<StarTask> OopStarTaskQueue; class ObjArrayTask
typedef GenericTaskQueueSet<StarTask> OopStarTaskQueueSet; {
public:
ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { }
ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) {
assert(idx <= size_t(max_jint), "too big");
}
ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { }
ObjArrayTask& operator =(const ObjArrayTask& t) {
_obj = t._obj;
_index = t._index;
return *this;
}
volatile ObjArrayTask&
operator =(const volatile ObjArrayTask& t) volatile {
_obj = t._obj;
_index = t._index;
return *this;
}
inline oop obj() const { return _obj; }
inline int index() const { return _index; }
DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
private:
oop _obj;
int _index;
};
#ifdef _MSC_VER
#pragma warning(pop)
#endif
typedef GenericTaskQueue<StarTask> OopStarTaskQueue;
typedef GenericTaskQueueSet<OopStarTaskQueue> OopStarTaskQueueSet;
typedef size_t RegionTask; // index for region typedef size_t RegionTask; // index for region
typedef GenericTaskQueue<RegionTask> RegionTaskQueue; typedef GenericTaskQueue<RegionTask> RegionTaskQueue;
typedef GenericTaskQueueSet<RegionTask> RegionTaskQueueSet; typedef GenericTaskQueueSet<RegionTaskQueue> RegionTaskQueueSet;
class RegionTaskQueueWithOverflow: public CHeapObj { class RegionTaskQueueWithOverflow: public CHeapObj {
protected: protected:

View file

@ -0,0 +1,76 @@
/*
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
/**
* @test
* @bug 6930043
* @summary C2: SIGSEGV in javasoft.sqe.tests.lang.arr017.arr01702.arr01702.loop_forw(II)I
*
* @run main Test6930043
*/
import java.io.PrintStream;
public class Test6930043 {
int[] a;
int idx;
public int loop_back(int i, int i_0_) {
int i_1_ = 0;
int[] is = a;
if (is == null) return 0;
for (int i_2_ = i; i_2_ >= i_0_; i_2_--)
i_1_ += is[idx = i_2_];
return i_1_;
}
public int loop_forw(int start, int end) {
int result = 0;
int[] is = a;
if (is == null) return 0;
for (int index = start; index < end; index++)
result += is[index];
// result += is[idx = index];
return result;
}
public static void main(String[] strings) {
Test6930043 var_Test6930043 = new Test6930043();
var_Test6930043.a = new int[1000000];
var_Test6930043.loop_forw(10, 999990);
var_Test6930043.loop_forw(10, 999990);
for (int i = 0; i < 3; i++) {
try {
if (var_Test6930043.loop_forw(-1, 999990) != 0) throw new InternalError();
} catch (ArrayIndexOutOfBoundsException e) { }
}
var_Test6930043.loop_back(999990, 10);
var_Test6930043.loop_back(999990, 10);
for (int i = 0; i < 3; i++) {
try {
if (var_Test6930043.loop_back(999990, -1) != 0) throw new InternalError();
} catch (ArrayIndexOutOfBoundsException e) { }
}
}
}

View file

@ -0,0 +1,51 @@
/*
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
/**
* @test
* @bug 6932496
* @summary incorrect deopt of jsr subroutine on 64 bit c1
*
* @compile -source 1.5 -target 1.5 -XDjsrlimit=0 Test6932496.java
* @run main/othervm -Xcomp -XX:CompileOnly=Test6932496.m Test6932496
*/
public class Test6932496 {
static class A {
volatile boolean flag = false;
}
static void m() {
try {
} finally {
A a = new A();
a.flag = true;
}
}
static public void main(String[] args) {
m();
}
}

View file

@ -0,0 +1,48 @@
/*
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
/**
* @test
* @bug 6935535
* @summary String.indexOf() returns incorrect result on x86 with SSE4.2
*
* @run main/othervm -Xcomp Test
*/
public class Test {
static int IndexOfTest(String str) {
return str.indexOf("1111111111111xx1x");
}
public static void main(String args[]) {
String str = "1111111111111xx1111111111111xx1x";
str = str.substring(0, 31);
int idx = IndexOfTest(str);
System.out.println("IndexOf(" + "1111111111111xx1x" + ") = " + idx + " in " + str);
if (idx != -1) {
System.exit(97);
}
}
}

View file

@ -59,3 +59,4 @@ b1005c504358c18694c84e95fec16b28cdce7ae1 jdk7-b79
c876ad22e4bf9d3c6460080db7ace478e29a3ff9 jdk7-b82 c876ad22e4bf9d3c6460080db7ace478e29a3ff9 jdk7-b82
309a0a7fc6ceb1c9fc3a85b3608e97ef8f7b0dfd jdk7-b83 309a0a7fc6ceb1c9fc3a85b3608e97ef8f7b0dfd jdk7-b83
32c0cf01d555747918529a6ff9e06b0090c7a474 jdk7-b84 32c0cf01d555747918529a6ff9e06b0090c7a474 jdk7-b84
6c0ccabb430dacdcd4479f8b197980d5da4eeb66 jdk7-b85

View file

@ -59,3 +59,4 @@ f051045fe94a48fae1097f90cbd9227e6aae6b7e jdk7-b81
31573ae8eed15a6c170f3f0d1abd0b9109c0e086 jdk7-b82 31573ae8eed15a6c170f3f0d1abd0b9109c0e086 jdk7-b82
371e3ded591d09112a9f231e37cb072781c486ac jdk7-b83 371e3ded591d09112a9f231e37cb072781c486ac jdk7-b83
8bc02839eee4ef02cd1b50e87638874368a26535 jdk7-b84 8bc02839eee4ef02cd1b50e87638874368a26535 jdk7-b84
8424512588ff95362c1f1e5f11c6efd4e7f7db6e jdk7-b85

View file

@ -59,3 +59,4 @@ e6a5d095c356a547cf5b3c8885885aca5e91e09b jdk7-b77
69ef657320ad5c35cfa12e4d8322d877e778f8b3 jdk7-b82 69ef657320ad5c35cfa12e4d8322d877e778f8b3 jdk7-b82
9027c6b9d7e2c9ca04a1add691b5b50d0f22b1aa jdk7-b83 9027c6b9d7e2c9ca04a1add691b5b50d0f22b1aa jdk7-b83
7cb9388bb1a16365fa5118c5efa38b1cd58be40d jdk7-b84 7cb9388bb1a16365fa5118c5efa38b1cd58be40d jdk7-b84
b396584a3e64988839cca21ea1f7fbdcc9248783 jdk7-b85

Some files were not shown because too many files have changed in this diff Show more