This commit is contained in:
Lana Steuck 2010-04-15 11:51:48 -07:00
commit 951fbb9129
347 changed files with 50737 additions and 22562 deletions

View file

@ -60,3 +60,6 @@ a30062be6d9ca1d48579826f870f85974300004e jdk7-b82
34c8199936a1682aa8587857f44cfaf37c2b6381 jdk7-b83 34c8199936a1682aa8587857f44cfaf37c2b6381 jdk7-b83
b1e55627a6980b9508854ed0c0f21d4f981b4494 jdk7-b84 b1e55627a6980b9508854ed0c0f21d4f981b4494 jdk7-b84
b6f633a93ae0ec4555ff4bf756f5e2150c9bdede jdk7-b85 b6f633a93ae0ec4555ff4bf756f5e2150c9bdede jdk7-b85
c94d9cc81f495d97817eba9d71b84fc45f7661a5 jdk7-b86
b7456c473862048fa70ed8092313a4ef0a55d403 jdk7-b87
7077b95d42f6b3942a8751bba033801ff50e5889 jdk7-b88

View file

@ -60,3 +60,6 @@ e1176f86805fe07fd9fb9da065dc51b47712ce76 jdk7-b82
6880a3af9addb41541e80ebe8cde6f79ec402a58 jdk7-b83 6880a3af9addb41541e80ebe8cde6f79ec402a58 jdk7-b83
2f3ea057d1ad56cf3b269cdc4de2741411151982 jdk7-b84 2f3ea057d1ad56cf3b269cdc4de2741411151982 jdk7-b84
cf26288a114be67c39f2758959ce50b60f5ae330 jdk7-b85 cf26288a114be67c39f2758959ce50b60f5ae330 jdk7-b85
433a60a9c0bf1b26ee7e65cebaa89c541f497aed jdk7-b86
6b1069f53fbc30663ccef49d78c31bb7d6967bde jdk7-b87
82135c848d5fcddb065e98ae77b81077c858f593 jdk7-b88

View file

@ -60,3 +60,6 @@ e08a42a2a94d97ea8eedb187a94dbff822c8fbba jdk7-b81
fde0df7a2384f7fe33204a79678989807d9c2b98 jdk7-b83 fde0df7a2384f7fe33204a79678989807d9c2b98 jdk7-b83
68c8961a82e4a3ad2a67991e5d834192a81eb4cd jdk7-b84 68c8961a82e4a3ad2a67991e5d834192a81eb4cd jdk7-b84
c67a9df7bc0ca291f08f9a9cc05cb78ea15d25e6 jdk7-b85 c67a9df7bc0ca291f08f9a9cc05cb78ea15d25e6 jdk7-b85
6253e28826d16cf1aecc39ce04c8de1f6bf2df5f jdk7-b86
09a41111a401d327f65e453384d976a10154d9ea jdk7-b87
39e14d2da687c7e592142137517aaf689544820f jdk7-b88

View file

@ -83,3 +83,7 @@ fafab5d5349c7c066d677538db67a1ee0fb33bd2 hs15-b05
ffc8d176b84bcfb5ac21302b4feb3b0c0d69b97c jdk7-b84 ffc8d176b84bcfb5ac21302b4feb3b0c0d69b97c jdk7-b84
6c9796468b91dcbb39e09dfa1baf9779ac45eb66 jdk7-b85 6c9796468b91dcbb39e09dfa1baf9779ac45eb66 jdk7-b85
418bc80ce13995149eadc9eecbba21d7a9fa02ae hs17-b10 418bc80ce13995149eadc9eecbba21d7a9fa02ae hs17-b10
bf823ef06b4f211e66988d76a2e2669be5c0820e jdk7-b86
07226e9eab8f74b37346b32715f829a2ef2c3188 hs18-b01
e7e7e36ccdb5d56edd47e5744351202d38f3b7ad jdk7-b87
4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b jdk7-b88

View file

@ -174,7 +174,7 @@ lib_info* add_lib_info_fd(struct ps_prochandle* ph, const char* libname, int fd,
return NULL; return NULL;
} }
newlib->symtab = build_symtab(newlib->fd); newlib->symtab = build_symtab(newlib->fd, libname);
if (newlib->symtab == NULL) { if (newlib->symtab == NULL) {
print_debug("symbol table build failed for %s\n", newlib->name); print_debug("symbol table build failed for %s\n", newlib->name);
} }

View file

@ -53,8 +53,274 @@ typedef struct symtab {
struct hsearch_data *hash_table; struct hsearch_data *hash_table;
} symtab_t; } symtab_t;
// read symbol table from given fd.
struct symtab* build_symtab(int fd) { // Directory that contains global debuginfo files. In theory it
// should be possible to change this, but in a Java environment there
// is no obvious place to put a user interface to do it. Maybe this
// could be set with an environment variable.
static const char debug_file_directory[] = "/usr/lib/debug";
/* The CRC used in gnu_debuglink, retrieved from
http://sourceware.org/gdb/current/onlinedocs/gdb/Separate-Debug-Files.html#Separate-Debug-Files. */
unsigned int gnu_debuglink_crc32 (unsigned int crc,
unsigned char *buf, size_t len)
{
static const unsigned int crc32_table[256] =
{
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
0x2d02ef8d
};
unsigned char *end;
crc = ~crc & 0xffffffff;
for (end = buf + len; buf < end; ++buf)
crc = crc32_table[(crc ^ *buf) & 0xff] ^ (crc >> 8);
return ~crc & 0xffffffff;
}
/* Open a debuginfo file and check its CRC. If it exists and the CRC
matches return its fd. */
static int
open_debug_file (const char *pathname, unsigned int crc)
{
unsigned int file_crc = 0;
unsigned char buffer[8 * 1024];
int fd = pathmap_open(pathname);
if (fd < 0)
return -1;
lseek(fd, 0, SEEK_SET);
for (;;) {
int len = read(fd, buffer, sizeof buffer);
if (len <= 0)
break;
file_crc = gnu_debuglink_crc32(file_crc, buffer, len);
}
if (crc == file_crc)
return fd;
else {
close(fd);
return -1;
}
}
/* Find an ELF section. */
static struct elf_section *find_section_by_name(char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
ELF_SHDR* cursct = NULL;
char *strtab;
int cnt;
if (scn_cache[ehdr->e_shstrndx].c_data == NULL) {
if ((scn_cache[ehdr->e_shstrndx].c_data
= read_section_data(fd, ehdr, cursct)) == NULL) {
return NULL;
}
}
strtab = scn_cache[ehdr->e_shstrndx].c_data;
for (cursct = shbuf, cnt = 0;
cnt < ehdr->e_shnum;
cnt++, cursct++) {
if (strcmp(cursct->sh_name + strtab, name) == 0) {
scn_cache[cnt].c_data = read_section_data(fd, ehdr, cursct);
return &scn_cache[cnt];
}
}
return NULL;
}
/* Look for a ".gnu_debuglink" section. If one exists, try to open a
suitable debuginfo file. */
static int open_file_from_debug_link(const char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
int debug_fd;
struct elf_section *debug_link = find_section_by_name(".gnu_debuglink", fd, ehdr,
shbuf, scn_cache);
if (debug_link == NULL)
return -1;
char *debug_filename = debug_link->c_data;
int offset = (strlen(debug_filename) + 4) >> 2;
static unsigned int crc;
crc = ((unsigned int*)debug_link->c_data)[offset];
char *debug_pathname = malloc(strlen(debug_filename)
+ strlen(name)
+ strlen(".debug/")
+ strlen(debug_file_directory)
+ 2);
strcpy(debug_pathname, name);
char *last_slash = strrchr(debug_pathname, '/');
if (last_slash == NULL)
return -1;
/* Look in the same directory as the object. */
strcpy(last_slash+1, debug_filename);
debug_fd = open_debug_file(debug_pathname, crc);
if (debug_fd >= 0) {
free(debug_pathname);
return debug_fd;
}
/* Look in a subdirectory named ".debug". */
strcpy(last_slash+1, ".debug/");
strcat(last_slash, debug_filename);
debug_fd = open_debug_file(debug_pathname, crc);
if (debug_fd >= 0) {
free(debug_pathname);
return debug_fd;
}
/* Look in /usr/lib/debug + the full pathname. */
strcpy(debug_pathname, debug_file_directory);
strcat(debug_pathname, name);
last_slash = strrchr(debug_pathname, '/');
strcpy(last_slash+1, debug_filename);
debug_fd = open_debug_file(debug_pathname, crc);
if (debug_fd >= 0) {
free(debug_pathname);
return debug_fd;
}
free(debug_pathname);
return -1;
}
static struct symtab* build_symtab_internal(int fd, const char *filename, bool try_debuginfo);
/* Look for a ".gnu_debuglink" section. If one exists, try to open a
suitable debuginfo file and read a symbol table from it. */
static struct symtab *build_symtab_from_debug_link(const char *name,
int fd,
ELF_EHDR *ehdr,
ELF_SHDR *shbuf,
struct elf_section *scn_cache)
{
fd = open_file_from_debug_link(name, fd, ehdr, shbuf, scn_cache);
if (fd >= 0) {
struct symtab *symtab = build_symtab_internal(fd, NULL, /* try_debuginfo */ false);
close(fd);
return symtab;
}
return NULL;
}
// Given a build_id, find the associated debuginfo file
static char *
build_id_to_debug_filename (size_t size, unsigned char *data)
{
char *filename, *s;
filename = malloc(strlen (debug_file_directory) + (sizeof "/.build-id/" - 1) + 1
+ 2 * size + (sizeof ".debug" - 1) + 1);
s = filename + sprintf (filename, "%s/.build-id/", debug_file_directory);
if (size > 0)
{
size--;
s += sprintf (s, "%02x", *data++);
}
if (size > 0)
*s++ = '/';
while (size-- > 0)
s += sprintf (s, "%02x", *data++);
strcpy (s, ".debug");
return filename;
}
// Read a build ID note. Try to open any associated debuginfo file
// and return its symtab
static struct symtab* build_symtab_from_build_id(Elf64_Nhdr *note)
{
int fd;
struct symtab *symtab = NULL;
unsigned char *bytes
= (unsigned char*)(note+1) + note->n_namesz;
unsigned char *filename
= (build_id_to_debug_filename (note->n_descsz, bytes));
fd = pathmap_open(filename);
if (fd >= 0) {
symtab = build_symtab_internal(fd, NULL, /* try_debuginfo */ false);
close(fd);
}
free(filename);
return symtab;
}
// read symbol table from given fd. If try_debuginfo) is true, also
// try to open an associated debuginfo file
static struct symtab* build_symtab_internal(int fd, const char *filename, bool try_debuginfo) {
ELF_EHDR ehdr; ELF_EHDR ehdr;
char *names = NULL; char *names = NULL;
struct symtab* symtab = NULL; struct symtab* symtab = NULL;
@ -66,6 +332,7 @@ struct symtab* build_symtab(int fd) {
ELF_SHDR* cursct = NULL; ELF_SHDR* cursct = NULL;
ELF_PHDR* phbuf = NULL; ELF_PHDR* phbuf = NULL;
ELF_PHDR* phdr = NULL; ELF_PHDR* phdr = NULL;
int sym_section = SHT_DYNSYM;
uintptr_t baseaddr = (uintptr_t)-1; uintptr_t baseaddr = (uintptr_t)-1;
@ -90,18 +357,23 @@ struct symtab* build_symtab(int fd) {
for (cursct = shbuf, cnt = 0; cnt < ehdr.e_shnum; cnt++) { for (cursct = shbuf, cnt = 0; cnt < ehdr.e_shnum; cnt++) {
scn_cache[cnt].c_shdr = cursct; scn_cache[cnt].c_shdr = cursct;
if (cursct->sh_type == SHT_SYMTAB || cursct->sh_type == SHT_STRTAB) { if (cursct->sh_type == SHT_SYMTAB || cursct->sh_type == SHT_STRTAB
|| cursct->sh_type == SHT_NOTE || cursct->sh_type == SHT_DYNSYM) {
if ( (scn_cache[cnt].c_data = read_section_data(fd, &ehdr, cursct)) == NULL) { if ( (scn_cache[cnt].c_data = read_section_data(fd, &ehdr, cursct)) == NULL) {
goto quit; goto quit;
} }
} }
if (cursct->sh_type == SHT_SYMTAB) {
// Full symbol table available so use that
sym_section = cursct->sh_type;
}
cursct++; cursct++;
} }
for (cnt = 1; cnt < ehdr.e_shnum; cnt++) { for (cnt = 1; cnt < ehdr.e_shnum; cnt++) {
ELF_SHDR *shdr = scn_cache[cnt].c_shdr; ELF_SHDR *shdr = scn_cache[cnt].c_shdr;
if (shdr->sh_type == SHT_SYMTAB) { if (shdr->sh_type == sym_section) {
ELF_SYM *syms; ELF_SYM *syms;
int j, n, rslt; int j, n, rslt;
size_t size; size_t size;
@ -163,6 +435,45 @@ struct symtab* build_symtab(int fd) {
} }
} }
// Look for a separate debuginfo file.
if (try_debuginfo) {
// We prefer a debug symtab to an object's own symtab, so look in
// the debuginfo file. We stash a copy of the old symtab in case
// there is no debuginfo.
struct symtab* prev_symtab = symtab;
symtab = NULL;
#ifdef NT_GNU_BUILD_ID
// First we look for a Build ID
for (cursct = shbuf, cnt = 0;
symtab == NULL && cnt < ehdr.e_shnum;
cnt++) {
if (cursct->sh_type == SHT_NOTE) {
Elf64_Nhdr *note = (Elf64_Nhdr *)scn_cache[cnt].c_data;
if (note->n_type == NT_GNU_BUILD_ID) {
symtab = build_symtab_from_build_id(note);
}
}
cursct++;
}
#endif
// Then, if that doesn't work, the debug link
if (symtab == NULL) {
symtab = build_symtab_from_debug_link(filename, fd, &ehdr, shbuf,
scn_cache);
}
// If we still haven't found a symtab, use the object's own symtab.
if (symtab != NULL) {
if (prev_symtab != NULL)
destroy_symtab(prev_symtab);
} else {
symtab = prev_symtab;
}
}
quit: quit:
if (shbuf) free(shbuf); if (shbuf) free(shbuf);
if (phbuf) free(phbuf); if (phbuf) free(phbuf);
@ -177,6 +488,11 @@ quit:
return symtab; return symtab;
} }
struct symtab* build_symtab(int fd, const char *filename) {
return build_symtab_internal(fd, filename, /* try_debuginfo */ true);
}
void destroy_symtab(struct symtab* symtab) { void destroy_symtab(struct symtab* symtab) {
if (!symtab) return; if (!symtab) return;
if (symtab->strs) free(symtab->strs); if (symtab->strs) free(symtab->strs);

View file

@ -32,7 +32,7 @@
struct symtab; struct symtab;
// build symbol table for a given ELF file descriptor // build symbol table for a given ELF file descriptor
struct symtab* build_symtab(int fd); struct symtab* build_symtab(int fd, const char *filename);
// destroy the symbol table // destroy the symbol table
void destroy_symtab(struct symtab* symtab); void destroy_symtab(struct symtab* symtab);

View file

@ -31,11 +31,11 @@
# #
# Don't put quotes (fail windows build). # Don't put quotes (fail windows build).
HOTSPOT_VM_COPYRIGHT=Copyright 2009 HOTSPOT_VM_COPYRIGHT=Copyright 2010
HS_MAJOR_VER=17 HS_MAJOR_VER=18
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=10 HS_BUILD_NUMBER=02
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=7 JDK_MINOR_VER=7

View file

@ -0,0 +1,5 @@
#!/bin/sh
nm --defined-only $* | awk '
{ if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 ";" }
'

View file

@ -290,6 +290,9 @@ SUNWprivate_1.1 {
# This is for Forte Analyzer profiling support. # This is for Forte Analyzer profiling support.
AsyncGetCallTrace; AsyncGetCallTrace;
# INSERT VTABLE SYMBOLS HERE
local: local:
*; *;
}; };

View file

@ -285,6 +285,9 @@ SUNWprivate_1.1 {
# This is for Forte Analyzer profiling support. # This is for Forte Analyzer profiling support.
AsyncGetCallTrace; AsyncGetCallTrace;
# INSERT VTABLE SYMBOLS HERE
local: local:
*; *;
}; };

View file

@ -121,14 +121,21 @@ JVM_OBJ_FILES = $(Obj_Files)
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES)) vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
mapfile : $(MAPFILE) mapfile : $(MAPFILE) vm.def
rm -f $@ rm -f $@
cat $^ > $@ awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \
{ system ("cat vm.def"); } \
else \
{ print $$0 } \
}' > $@ < $(MAPFILE)
mapfile_reorder : mapfile $(REORDERFILE) mapfile_reorder : mapfile $(REORDERFILE)
rm -f $@ rm -f $@
cat $^ > $@ cat $^ > $@
vm.def: $(Res_Files) $(Obj_Files)
sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
ifeq ($(ZERO_LIBARCH), ppc64) ifeq ($(ZERO_LIBARCH), ppc64)
STATIC_CXX = false STATIC_CXX = false
else else

View file

@ -28,6 +28,9 @@ REM
REM Since we don't have uname and we could be cross-compiling, REM Since we don't have uname and we could be cross-compiling,
REM Use the compiler to determine which ARCH we are building REM Use the compiler to determine which ARCH we are building
REM REM
REM Note: Running this batch file from the Windows command shell requires
REM that "grep" be accessible on the PATH. An MKS install does this.
REM
cl 2>&1 | grep "IA-64" >NUL cl 2>&1 | grep "IA-64" >NUL
if %errorlevel% == 0 goto isia64 if %errorlevel% == 0 goto isia64
cl 2>&1 | grep "AMD64" >NUL cl 2>&1 | grep "AMD64" >NUL
@ -57,11 +60,12 @@ if not "%7" == "" goto usage
if "%1" == "product" goto test1 if "%1" == "product" goto test1
if "%1" == "debug" goto test1 if "%1" == "debug" goto test1
if "%1" == "fastdebug" goto test1 if "%1" == "fastdebug" goto test1
if "%1" == "tree" goto test1
goto usage goto usage
:test1 :test1
if "%2" == "core" goto test2 if "%2" == "core" goto test2
if "%2" == "kernel" goto test2 if "%2" == "kernel" goto test2
if "%2" == "compiler1" goto test2 if "%2" == "compiler1" goto test2
if "%2" == "compiler2" goto test2 if "%2" == "compiler2" goto test2
if "%2" == "tiered" goto test2 if "%2" == "tiered" goto test2
@ -70,6 +74,7 @@ if "%2" == "adlc" goto build_adlc
goto usage goto usage
:test2 :test2
if "%1" == "tree" goto build_tree
REM check_j2se_version REM check_j2se_version
REM jvmti.make requires J2SE 1.4.x or newer. REM jvmti.make requires J2SE 1.4.x or newer.
REM If not found then fail fast. REM If not found then fail fast.
@ -93,6 +98,10 @@ goto end
nmake -f %3/make/windows/build.make Variant=compiler2 WorkSpace=%3 BootStrapDir=%4 BuildUser="%USERNAME%" HOTSPOT_BUILD_VERSION=%5 ADLC_ONLY=1 %1 nmake -f %3/make/windows/build.make Variant=compiler2 WorkSpace=%3 BootStrapDir=%4 BuildUser="%USERNAME%" HOTSPOT_BUILD_VERSION=%5 ADLC_ONLY=1 %1
goto end goto end
:build_tree
nmake -f %3/make/windows/build.make Variant=%2 WorkSpace=%3 BootStrapDir=%4 BuildUser="%USERNAME%" HOTSPOT_BUILD_VERSION="%5" %1
goto end
:usage :usage
echo Usage: build flavor version workspace bootstrap_dir [build_id] [windbg_home] echo Usage: build flavor version workspace bootstrap_dir [build_id] [windbg_home]
echo. echo.
@ -100,8 +109,10 @@ echo where:
echo flavor is "product", "debug" or "fastdebug", echo flavor is "product", "debug" or "fastdebug",
echo version is "core", "kernel", "compiler1", "compiler2", or "tiered", echo version is "core", "kernel", "compiler1", "compiler2", or "tiered",
echo workspace is source directory without trailing slash, echo workspace is source directory without trailing slash,
echo bootstrap_dir is a full path to echo a JDK in which bin/java echo bootstrap_dir is a full path to a JDK in which bin/java
echo and bin/javac are present and working, and echo build_id is an echo and bin/javac are present and working, and build_id is an
echo optional build identifier displayed by java -version echo optional build identifier displayed by java -version
exit /b 1
:end :end
exit /b %errorlevel%

View file

@ -27,6 +27,9 @@
# environment variables (Variant, WorkSpace, BootStrapDir, BuildUser, HOTSPOT_BUILD_VERSION) # environment variables (Variant, WorkSpace, BootStrapDir, BuildUser, HOTSPOT_BUILD_VERSION)
# are passed in as command line arguments. # are passed in as command line arguments.
# Note: Running nmake or build.bat from the Windows command shell requires
# that "sh" be accessible on the PATH. An MKS install does this.
# SA components are built if BUILD_WIN_SA=1 is specified. # SA components are built if BUILD_WIN_SA=1 is specified.
# See notes in README. This produces files: # See notes in README. This produces files:
# 1. sa-jdi.jar - This is built before building jvm.dll # 1. sa-jdi.jar - This is built before building jvm.dll
@ -233,6 +236,12 @@ develop: checks $(variantDir) $(variantDir)\local.make sanity
cd $(variantDir) cd $(variantDir)
nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=product DEVELOP=1 ARCH=$(ARCH) nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=product DEVELOP=1 ARCH=$(ARCH)
# target to create just the directory structure
tree: checks $(variantDir) $(variantDir)\local.make sanity
mkdir $(variantDir)\product
mkdir $(variantDir)\debug
mkdir $(variantDir)\fastdebug
sanity: sanity:
@ echo; @ echo;
@ cd $(variantDir) @ cd $(variantDir)

View file

@ -36,6 +36,9 @@ REM
REM Since we don't have uname and we could be cross-compiling, REM Since we don't have uname and we could be cross-compiling,
REM Use the compiler to determine which ARCH we are building REM Use the compiler to determine which ARCH we are building
REM REM
REM Note: Running this batch file from the Windows command shell requires
REM that "grep" be accessible on the PATH. An MKS install does this.
REM
cl 2>&1 | grep "IA-64" >NUL cl 2>&1 | grep "IA-64" >NUL
if %errorlevel% == 0 goto isia64 if %errorlevel% == 0 goto isia64
cl 2>&1 | grep "AMD64" >NUL cl 2>&1 | grep "AMD64" >NUL

View file

@ -22,6 +22,8 @@
# #
# #
set -e
# This shell script echoes "MSC_VER=<munged version of cl>" # This shell script echoes "MSC_VER=<munged version of cl>"
# It ignores the micro version component. # It ignores the micro version component.
# Examples: # Examples:
@ -38,17 +40,20 @@
# sh, and it has been found that sometimes `which sh` fails. # sh, and it has been found that sometimes `which sh` fails.
if [ "x$HotSpotMksHome" != "x" ]; then if [ "x$HotSpotMksHome" != "x" ]; then
MKS_HOME="$HotSpotMksHome" TOOL_DIR="$HotSpotMksHome"
else else
SH=`which sh` # HotSpotMksHome is not set so use the directory that contains "sh".
MKS_HOME=`dirname "$SH"` # This works with both MKS and Cygwin.
SH=`which sh`
TOOL_DIR=`dirname "$SH"`
fi fi
HEAD="$MKS_HOME/head" DIRNAME="$TOOL_DIR/dirname"
ECHO="$MKS_HOME/echo" HEAD="$TOOL_DIR/head"
EXPR="$MKS_HOME/expr" ECHO="$TOOL_DIR/echo"
CUT="$MKS_HOME/cut" EXPR="$TOOL_DIR/expr"
SED="$MKS_HOME/sed" CUT="$TOOL_DIR/cut"
SED="$TOOL_DIR/sed"
if [ "x$FORCE_MSC_VER" != "x" ]; then if [ "x$FORCE_MSC_VER" != "x" ]; then
echo "MSC_VER=$FORCE_MSC_VER" echo "MSC_VER=$FORCE_MSC_VER"
@ -70,7 +75,15 @@ fi
if [ "x$FORCE_LINK_VER" != "x" ]; then if [ "x$FORCE_LINK_VER" != "x" ]; then
echo "LINK_VER=$FORCE_LINK_VER" echo "LINK_VER=$FORCE_LINK_VER"
else else
LINK_VER_RAW=`link 2>&1 | "$HEAD" -n 1 | "$SED" 's/.*Version[\ ]*\([0-9][0-9.]*\).*/\1/'` # use the "link" command that is co-located with the "cl" command
cl_cmd=`which cl`
if [ "x$cl_cmd" != "x" ]; then
link_cmd=`$DIRNAME "$cl_cmd"`/link
else
# which can't find "cl" so just use which ever "link" we find
link_cmd="link"
fi
LINK_VER_RAW=`"$link_cmd" 2>&1 | "$HEAD" -n 1 | "$SED" 's/.*Version[\ ]*\([0-9][0-9.]*\).*/\1/'`
LINK_VER_MAJOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f1` LINK_VER_MAJOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f1`
LINK_VER_MINOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f2` LINK_VER_MINOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f2`
LINK_VER_MICRO=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f3` LINK_VER_MICRO=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f3`

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -377,6 +377,16 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
} }
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
__ call(SharedRuntime::deopt_blob()->unpack_with_reexecution());
__ delayed()->nop();
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
}
void ArrayCopyStub::emit_code(LIR_Assembler* ce) { void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
//---------------slow case: call to native----------------- //---------------slow case: call to native-----------------
__ bind(_entry); __ bind(_entry);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -143,3 +143,6 @@
static bool is_caller_save_register (LIR_Opr reg); static bool is_caller_save_register (LIR_Opr reg);
static bool is_caller_save_register (Register r); static bool is_caller_save_register (Register r);
// JSR 292
static LIR_Opr& method_handle_invoke_SP_save_opr() { return L7_opr; }

View file

@ -378,12 +378,7 @@ int LIR_Assembler::emit_exception_handler() {
int offset = code_offset(); int offset = code_offset();
if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) { __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
__ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
__ delayed()->nop();
}
__ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
__ delayed()->nop(); __ delayed()->nop();
debug_only(__ stop("should have gone to the caller");) debug_only(__ stop("should have gone to the caller");)
assert(code_offset() - offset <= exception_handler_size, "overflow"); assert(code_offset() - offset <= exception_handler_size, "overflow");
@ -685,29 +680,29 @@ void LIR_Assembler::align_call(LIR_Code) {
} }
void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) { void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
__ call(entry, rtype); __ call(op->addr(), rtype);
// the peephole pass fills the delay slot // the peephole pass fills the delay slot
} }
void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) { void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
RelocationHolder rspec = virtual_call_Relocation::spec(pc()); RelocationHolder rspec = virtual_call_Relocation::spec(pc());
__ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg); __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
__ relocate(rspec); __ relocate(rspec);
__ call(entry, relocInfo::none); __ call(op->addr(), relocInfo::none);
// the peephole pass fills the delay slot // the peephole pass fills the delay slot
} }
void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) { void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
add_debug_info_for_null_check_here(info); add_debug_info_for_null_check_here(op->info());
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch); __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
if (__ is_simm13(vtable_offset) ) { if (__ is_simm13(op->vtable_offset())) {
__ ld_ptr(G3_scratch, vtable_offset, G5_method); __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
} else { } else {
// This will generate 2 instructions // This will generate 2 instructions
__ set(vtable_offset, G5_method); __ set(op->vtable_offset(), G5_method);
// ld_ptr, set_hi, set // ld_ptr, set_hi, set
__ ld_ptr(G3_scratch, G5_method, G5_method); __ ld_ptr(G3_scratch, G5_method, G5_method);
} }
@ -717,6 +712,16 @@ void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
} }
void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
Unimplemented();
}
void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
Unimplemented();
}
// load with 32-bit displacement // load with 32-bit displacement
int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) { int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) {
int load_offset = code_offset(); int load_offset = code_offset();
@ -1067,7 +1072,8 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
LIR_Const* c = src->as_constant_ptr(); LIR_Const* c = src->as_constant_ptr();
switch (c->type()) { switch (c->type()) {
case T_INT: case T_INT:
case T_FLOAT: { case T_FLOAT:
case T_ADDRESS: {
Register src_reg = O7; Register src_reg = O7;
int value = c->as_jint_bits(); int value = c->as_jint_bits();
if (value == 0) { if (value == 0) {
@ -1123,7 +1129,8 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
} }
switch (c->type()) { switch (c->type()) {
case T_INT: case T_INT:
case T_FLOAT: { case T_FLOAT:
case T_ADDRESS: {
LIR_Opr tmp = FrameMap::O7_opr; LIR_Opr tmp = FrameMap::O7_opr;
int value = c->as_jint_bits(); int value = c->as_jint_bits();
if (value == 0) { if (value == 0) {
@ -1195,6 +1202,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
switch (c->type()) { switch (c->type()) {
case T_INT: case T_INT:
case T_ADDRESS:
{ {
jint con = c->as_jint(); jint con = c->as_jint();
if (to_reg->is_single_cpu()) { if (to_reg->is_single_cpu()) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -42,17 +42,6 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
} }
void C1_MacroAssembler::method_exit(bool restore_frame) {
// this code must be structured this way so that the return
// instruction can be a safepoint.
if (restore_frame) {
restore();
}
retl();
delayed()->nop();
}
void C1_MacroAssembler::explicit_null_check(Register base) { void C1_MacroAssembler::explicit_null_check(Register base) {
Unimplemented(); Unimplemented();
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -677,7 +677,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ add(I7, frame::pc_return_offset, Oissuing_pc->after_save()); __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save());
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
Oissuing_pc->after_save()); G2_thread, Oissuing_pc->after_save());
__ verify_not_null_oop(Oexception->after_save()); __ verify_not_null_oop(Oexception->after_save());
__ jmp(O0, 0); __ jmp(O0, 0);
__ delayed()->restore(); __ delayed()->restore();
@ -985,7 +985,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) { void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) {
Label no_deopt; Label no_deopt;
Label no_handler;
__ verify_not_null_oop(Oexception); __ verify_not_null_oop(Oexception);
@ -1003,9 +1002,14 @@ void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_map
// whether it had a handler or not we will deoptimize // whether it had a handler or not we will deoptimize
// by entering the deopt blob with a pending exception. // by entering the deopt blob with a pending exception.
#ifdef ASSERT
Label done;
__ tst(O0); __ tst(O0);
__ br(Assembler::zero, false, Assembler::pn, no_handler); __ br(Assembler::notZero, false, Assembler::pn, done);
__ delayed()->nop(); __ delayed()->nop();
__ stop("should have found address");
__ bind(done);
#endif
// restore the registers that were saved at the beginning and jump to the exception handler. // restore the registers that were saved at the beginning and jump to the exception handler.
restore_live_registers(sasm); restore_live_registers(sasm);
@ -1013,20 +1017,6 @@ void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_map
__ jmp(O0, 0); __ jmp(O0, 0);
__ delayed()->restore(); __ delayed()->restore();
__ bind(no_handler);
__ mov(L0, I7); // restore return address
// restore exception oop
__ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception->after_save());
__ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
__ restore();
AddressLiteral exc(Runtime1::entry_for(Runtime1::unwind_exception_id));
__ jump_to(exc, G4);
__ delayed()->nop();
oop_maps->add_gc_map(call_offset, oop_map); oop_maps->add_gc_map(call_offset, oop_map);
} }

View file

@ -244,9 +244,10 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg)
} }
void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
mov(arg_1, O0); mov(arg_1, O0);
MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 1); mov(arg_2, O1);
MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
} }
#endif /* CC_INTERP */ #endif /* CC_INTERP */

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -121,7 +121,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
bool check_exception = true); bool check_exception = true);
#ifndef CC_INTERP #ifndef CC_INTERP
void super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1); void super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2);
// Generate a subtype check: branch to ok_is_subtype if sub_klass is // Generate a subtype check: branch to ok_is_subtype if sub_klass is
// a subtype of super_klass. Blows registers tmp1, tmp2 and tmp3. // a subtype of super_klass. Blows registers tmp1, tmp2 and tmp3.

View file

@ -1803,8 +1803,9 @@ void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
// to implement the UseStrictFP mode. // to implement the UseStrictFP mode.
const bool Matcher::strict_fp_requires_explicit_rounding = false; const bool Matcher::strict_fp_requires_explicit_rounding = false;
// Do floats take an entire double register or just half? // Are floats conerted to double when stored to stack during deoptimization?
const bool Matcher::float_in_double = false; // Sparc does not handle callee-save floats.
bool Matcher::float_in_double() { return false; }
// Do ints take an entire long register or just half? // Do ints take an entire long register or just half?
// Note that we if-def off of _LP64. // Note that we if-def off of _LP64.

View file

@ -379,7 +379,7 @@ class StubGenerator: public StubCodeGenerator {
__ save_frame(0); // compensates for compiler weakness __ save_frame(0); // compensates for compiler weakness
__ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
BLOCK_COMMENT("call exception_handler_for_return_address"); BLOCK_COMMENT("call exception_handler_for_return_address");
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Lscratch); __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch);
__ mov(O0, handler_reg); __ mov(O0, handler_reg);
__ restore(); // compensates for compiler weakness __ restore(); // compensates for compiler weakness

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -37,8 +37,13 @@ static bool returns_to_call_stub(address return_pc) {
enum /* platform_dependent_constants */ { enum /* platform_dependent_constants */ {
// %%%%%%%% May be able to shrink this a lot // %%%%%%%% May be able to shrink this a lot
code_size1 = 20000, // simply increase if too small (assembler will crash if too small) code_size1 = 20000, // simply increase if too small (assembler will crash if too small)
code_size2 = 20000 // simply increase if too small (assembler will crash if too small) code_size2 = 20000 // simply increase if too small (assembler will crash if too small)
};
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 5000
}; };
class Sparc { class Sparc {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1822,7 +1822,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller
__ super_call_VM_leaf(L7_thread_cache, __ super_call_VM_leaf(L7_thread_cache,
CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
Oissuing_pc->after_save()); G2_thread, Oissuing_pc->after_save());
// The caller's SP was adjusted upon method entry to accomodate // The caller's SP was adjusted upon method entry to accomodate
// the callee's non-argument locals. Undo that adjustment. // the callee's non-argument locals. Undo that adjustment.

View file

@ -8460,6 +8460,7 @@ void MacroAssembler::string_indexof(Register str1, Register str2,
subptr(str1, result); // Restore counter subptr(str1, result); // Restore counter
shrl(str1, 1); shrl(str1, 1);
addl(cnt1, str1); addl(cnt1, str1);
decrementl(cnt1);
lea(str1, Address(result, 2)); // Reload string lea(str1, Address(result, 2)); // Reload string
// Load substr // Load substr

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -373,6 +373,14 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
} }
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
__ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack_with_reexecution()));
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
}
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry); __ bind(_entry);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -126,3 +126,6 @@
assert(i >= 0 && i < nof_caller_save_xmm_regs, "out of bounds"); assert(i >= 0 && i < nof_caller_save_xmm_regs, "out of bounds");
return _caller_save_xmm_regs[i]; return _caller_save_xmm_regs[i];
} }
// JSR 292
static LIR_Opr& method_handle_invoke_SP_save_opr() { return rbp_opr; }

View file

@ -436,40 +436,18 @@ int LIR_Assembler::emit_exception_handler() {
int offset = code_offset(); int offset = code_offset();
// if the method does not have an exception handler, then there is // the exception oop and pc are in rax, and rdx
// no reason to search for one
if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) {
// the exception oop and pc are in rax, and rdx
// no other registers need to be preserved, so invalidate them
__ invalidate_registers(false, true, true, false, true, true);
// check that there is really an exception
__ verify_not_null_oop(rax);
// search an exception handler (rax: exception oop, rdx: throwing pc)
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
// if the call returns here, then the exception handler for particular
// exception doesn't exist -> unwind activation and forward exception to caller
}
// the exception oop is in rax,
// no other registers need to be preserved, so invalidate them // no other registers need to be preserved, so invalidate them
__ invalidate_registers(false, true, true, true, true, true); __ invalidate_registers(false, true, true, false, true, true);
// check that there is really an exception // check that there is really an exception
__ verify_not_null_oop(rax); __ verify_not_null_oop(rax);
// unlock the receiver/klass if necessary // search an exception handler (rax: exception oop, rdx: throwing pc)
// rax,: exception __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id)));
ciMethod* method = compilation()->method();
if (method->is_synchronized() && GenerateSynchronizationCode) { __ stop("should not reach here");
monitorexit(FrameMap::rbx_oop_opr, FrameMap::rcx_opr, SYNC_header, 0, rax);
}
// unwind activation and forward exception to caller
// rax,: exception
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
assert(code_offset() - offset <= exception_handler_size, "overflow"); assert(code_offset() - offset <= exception_handler_size, "overflow");
__ end_a_stub(); __ end_a_stub();
@ -495,8 +473,10 @@ int LIR_Assembler::emit_deopt_handler() {
int offset = code_offset(); int offset = code_offset();
InternalAddress here(__ pc()); InternalAddress here(__ pc());
__ pushptr(here.addr()); __ pushptr(here.addr());
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
assert(code_offset() - offset <= deopt_handler_size, "overflow"); assert(code_offset() - offset <= deopt_handler_size, "overflow");
__ end_a_stub(); __ end_a_stub();
@ -593,7 +573,7 @@ void LIR_Assembler::return_op(LIR_Opr result) {
} }
// Pop the stack before the safepoint code // Pop the stack before the safepoint code
__ leave(); __ remove_frame(initial_frame_size_in_bytes());
bool result_is_oop = result->is_valid() ? result->is_oop() : false; bool result_is_oop = result->is_valid() ? result->is_oop() : false;
@ -648,7 +628,8 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
LIR_Const* c = src->as_constant_ptr(); LIR_Const* c = src->as_constant_ptr();
switch (c->type()) { switch (c->type()) {
case T_INT: { case T_INT:
case T_ADDRESS: {
assert(patch_code == lir_patch_none, "no patching handled here"); assert(patch_code == lir_patch_none, "no patching handled here");
__ movl(dest->as_register(), c->as_jint()); __ movl(dest->as_register(), c->as_jint());
break; break;
@ -731,6 +712,7 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
switch (c->type()) { switch (c->type()) {
case T_INT: // fall through case T_INT: // fall through
case T_FLOAT: case T_FLOAT:
case T_ADDRESS:
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
break; break;
@ -766,6 +748,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
switch (type) { switch (type) {
case T_INT: // fall through case T_INT: // fall through
case T_FLOAT: case T_FLOAT:
case T_ADDRESS:
__ movl(as_Address(addr), c->as_jint_bits()); __ movl(as_Address(addr), c->as_jint_bits());
break; break;
@ -2738,6 +2721,7 @@ void LIR_Assembler::align_call(LIR_Code code) {
switch (code) { switch (code) {
case lir_static_call: case lir_static_call:
case lir_optvirtual_call: case lir_optvirtual_call:
case lir_dynamic_call:
offset += NativeCall::displacement_offset; offset += NativeCall::displacement_offset;
break; break;
case lir_icvirtual_call: case lir_icvirtual_call:
@ -2753,30 +2737,41 @@ void LIR_Assembler::align_call(LIR_Code code) {
} }
void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) { void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned"); "must be aligned");
__ call(AddressLiteral(entry, rtype)); __ call(AddressLiteral(op->addr(), rtype));
add_call_info(code_offset(), info); add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
} }
void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) { void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
RelocationHolder rh = virtual_call_Relocation::spec(pc()); RelocationHolder rh = virtual_call_Relocation::spec(pc());
__ movoop(IC_Klass, (jobject)Universe::non_oop_word()); __ movoop(IC_Klass, (jobject)Universe::non_oop_word());
assert(!os::is_MP() || assert(!os::is_MP() ||
(__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
"must be aligned"); "must be aligned");
__ call(AddressLiteral(entry, rh)); __ call(AddressLiteral(op->addr(), rh));
add_call_info(code_offset(), info); add_call_info(code_offset(), op->info(), op->is_method_handle_invoke());
} }
/* Currently, vtable-dispatch is only enabled for sparc platforms */ /* Currently, vtable-dispatch is only enabled for sparc platforms */
void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) { void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
ShouldNotReachHere(); ShouldNotReachHere();
} }
void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
__ movptr(FrameMap::method_handle_invoke_SP_save_opr()->as_register(), rsp);
}
void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
__ movptr(rsp, FrameMap::method_handle_invoke_SP_save_opr()->as_register());
}
void LIR_Assembler::emit_static_call_stub() { void LIR_Assembler::emit_static_call_stub() {
address call_pc = __ pc(); address call_pc = __ pc();
address stub = __ start_a_stub(call_stub_size); address stub = __ start_a_stub(call_stub_size);
@ -2829,10 +2824,12 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
} else { } else {
unwind_id = Runtime1::handle_exception_nofpu_id; unwind_id = Runtime1::handle_exception_nofpu_id;
} }
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
} else { } else {
unwind_id = Runtime1::unwind_exception_id; // remove the activation
__ remove_frame(initial_frame_size_in_bytes());
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
} }
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
// enough room for two byte trap // enough room for two byte trap
__ nop(); __ nop();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -317,14 +317,6 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
} }
void C1_MacroAssembler::method_exit(bool restore_frame) {
if (restore_frame) {
leave();
}
ret(0);
}
void C1_MacroAssembler::build_frame(int frame_size_in_bytes) { void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
// Make sure there is enough stack space for this method's activation. // Make sure there is enough stack space for this method's activation.
// Note that we do this before doing an enter(). This matches the // Note that we do this before doing an enter(). This matches the
@ -333,7 +325,7 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
// between the two compilers. // between the two compilers.
generate_stack_overflow_check(frame_size_in_bytes); generate_stack_overflow_check(frame_size_in_bytes);
enter(); push(rbp);
#ifdef TIERED #ifdef TIERED
// c2 leaves fpu stack dirty. Clean it on entry // c2 leaves fpu stack dirty. Clean it on entry
if (UseSSE < 2 ) { if (UseSSE < 2 ) {
@ -344,6 +336,12 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
} }
void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) {
increment(rsp, frame_size_in_bytes); // Does not emit code for frame_size == 0
pop(rbp);
}
void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) { void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
if (C1Breakpoint) int3(); if (C1Breakpoint) int3();
inline_cache_check(receiver, ic_klass); inline_cache_check(receiver, ic_klass);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -688,18 +688,21 @@ void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_map
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
oop_maps->add_gc_map(call_offset, oop_map); oop_maps->add_gc_map(call_offset, oop_map);
// rax,: handler address or NULL if no handler exists // rax,: handler address
// will be the deopt blob if nmethod was deoptimized while we looked up // will be the deopt blob if nmethod was deoptimized while we looked up
// handler regardless of whether handler existed in the nmethod. // handler regardless of whether handler existed in the nmethod.
// only rax, is valid at this time, all other registers have been destroyed by the runtime call // only rax, is valid at this time, all other registers have been destroyed by the runtime call
__ invalidate_registers(false, true, true, true, true, true); __ invalidate_registers(false, true, true, true, true, true);
#ifdef ASSERT
// Do we have an exception handler in the nmethod? // Do we have an exception handler in the nmethod?
Label no_handler;
Label done; Label done;
__ testptr(rax, rax); __ testptr(rax, rax);
__ jcc(Assembler::zero, no_handler); __ jcc(Assembler::notZero, done);
__ stop("no handler found");
__ bind(done);
#endif
// exception handler found // exception handler found
// patch the return address -> the stub will directly return to the exception handler // patch the return address -> the stub will directly return to the exception handler
@ -712,36 +715,14 @@ void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_map
__ leave(); __ leave();
__ ret(0); __ ret(0);
__ bind(no_handler);
// no exception handler found in this method, so the exception is
// forwarded to the caller (using the unwind code of the nmethod)
// there is no need to restore the registers
// restore the real return address that was saved before the RT-call
__ movptr(real_return_addr, Address(rsp, temp_1_off * VMRegImpl::stack_slot_size));
__ movptr(Address(rbp, 1*BytesPerWord), real_return_addr);
// load address of JavaThread object for thread-local data
NOT_LP64(__ get_thread(thread);)
// restore exception oop into rax, (convention for unwind code)
__ movptr(exception_oop, Address(thread, JavaThread::exception_oop_offset()));
// clear exception fields in JavaThread because they are no longer needed
// (fields must be cleared because they are processed by GC otherwise)
__ movptr(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD);
__ movptr(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD);
// pop the stub frame off
__ leave();
generate_unwind_exception(sasm);
__ stop("should not reach here");
} }
void Runtime1::generate_unwind_exception(StubAssembler *sasm) { void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// incoming parameters // incoming parameters
const Register exception_oop = rax; const Register exception_oop = rax;
// callee-saved copy of exception_oop during runtime call
const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
// other registers used in this stub // other registers used in this stub
const Register exception_pc = rdx; const Register exception_pc = rdx;
const Register handler_addr = rbx; const Register handler_addr = rbx;
@ -769,38 +750,39 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// clear the FPU stack in case any FPU results are left behind // clear the FPU stack in case any FPU results are left behind
__ empty_FPU_stack(); __ empty_FPU_stack();
// leave activation of nmethod // save exception_oop in callee-saved register to preserve it during runtime calls
__ leave(); __ verify_not_null_oop(exception_oop);
// store return address (is on top of stack after leave) __ movptr(exception_oop_callee_saved, exception_oop);
NOT_LP64(__ get_thread(thread);)
// Get return address (is on top of stack after leave).
__ movptr(exception_pc, Address(rsp, 0)); __ movptr(exception_pc, Address(rsp, 0));
__ verify_oop(exception_oop);
// save exception oop from rax, to stack before call
__ push(exception_oop);
// search the exception handler address of the caller (using the return address) // search the exception handler address of the caller (using the return address)
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), exception_pc); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
// rax,: exception handler address of the caller // rax: exception handler address of the caller
// only rax, is valid at this time, all other registers have been destroyed by the call // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
__ invalidate_registers(false, true, true, true, true, true); __ invalidate_registers(false, true, true, true, false, true);
// move result of call into correct register // move result of call into correct register
__ movptr(handler_addr, rax); __ movptr(handler_addr, rax);
// restore exception oop in rax, (required convention of exception handler) // Restore exception oop to RAX (required convention of exception handler).
__ pop(exception_oop); __ movptr(exception_oop, exception_oop_callee_saved);
__ verify_oop(exception_oop); // verify that there is really a valid exception in rax
__ verify_not_null_oop(exception_oop);
// get throwing pc (= return address). // get throwing pc (= return address).
// rdx has been destroyed by the call, so it must be set again // rdx has been destroyed by the call, so it must be set again
// the pop is also necessary to simulate the effect of a ret(0) // the pop is also necessary to simulate the effect of a ret(0)
__ pop(exception_pc); __ pop(exception_pc);
// verify that that there is really a valid exception in rax, // Restore SP from BP if the exception PC is a MethodHandle call site.
__ verify_not_null_oop(exception_oop); NOT_LP64(__ get_thread(thread);)
__ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0);
__ cmovptr(Assembler::notEqual, rsp, rbp);
// continue at exception handler (return address removed) // continue at exception handler (return address removed)
// note: do *not* remove arguments when unwinding the // note: do *not* remove arguments when unwinding the
@ -808,9 +790,9 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// all arguments on the stack when entering the // all arguments on the stack when entering the
// runtime to determine the exception handler // runtime to determine the exception handler
// (GC happens at call site with arguments!) // (GC happens at call site with arguments!)
// rax,: exception oop // rax: exception oop
// rdx: throwing pc // rdx: throwing pc
// rbx,: exception handler // rbx: exception handler
__ jmp(handler_addr); __ jmp(handler_addr);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -60,13 +60,13 @@ MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _mas
} }
#ifdef ASSERT #ifdef ASSERT
static void verify_argslot(MacroAssembler* _masm, Register rax_argslot, static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
const char* error_message) { const char* error_message) {
// Verify that argslot lies within (rsp, rbp]. // Verify that argslot lies within (rsp, rbp].
Label L_ok, L_bad; Label L_ok, L_bad;
__ cmpptr(rax_argslot, rbp); __ cmpptr(argslot_reg, rbp);
__ jccb(Assembler::above, L_bad); __ jccb(Assembler::above, L_bad);
__ cmpptr(rsp, rax_argslot); __ cmpptr(rsp, argslot_reg);
__ jccb(Assembler::below, L_ok); __ jccb(Assembler::below, L_ok);
__ bind(L_bad); __ bind(L_bad);
__ stop(error_message); __ stop(error_message);
@ -178,22 +178,6 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
// Now move the argslot down, to point to the opened-up space. // Now move the argslot down, to point to the opened-up space.
__ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr)); __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
if (TaggedStackInterpreter && arg_mask != _INSERT_NO_MASK) {
// The caller has specified a bitmask of tags to put into the opened space.
// This only works when the arg_slots value is an assembly-time constant.
int constant_arg_slots = arg_slots.as_constant() / stack_move_unit();
int tag_offset = Interpreter::tag_offset_in_bytes() - Interpreter::value_offset_in_bytes();
for (int slot = 0; slot < constant_arg_slots; slot++) {
BasicType slot_type = ((arg_mask & (1 << slot)) == 0 ? T_OBJECT : T_INT);
int slot_offset = Interpreter::stackElementSize() * slot;
Address tag_addr(rax_argslot, slot_offset + tag_offset);
__ movptr(tag_addr, frame::tag_for_basic_type(slot_type));
}
// Note that the new argument slots are tagged properly but contain
// garbage at this point. The value portions must be initialized
// by the caller. (Especially references!)
}
} }
// Helper to remove argument slots from the stack. // Helper to remove argument slots from the stack.
@ -206,18 +190,9 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
(!arg_slots.is_register() ? rsp : arg_slots.as_register())); (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
#ifdef ASSERT #ifdef ASSERT
{ // Verify that [argslot..argslot+size) lies within (rsp, rbp).
// Verify that [argslot..argslot+size) lies within (rsp, rbp). __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
Label L_ok, L_bad; verify_argslot(_masm, rbx_temp, "deleted argument(s) must fall within current frame");
__ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
__ cmpptr(rbx_temp, rbp);
__ jccb(Assembler::above, L_bad);
__ cmpptr(rsp, rax_argslot);
__ jccb(Assembler::below, L_ok);
__ bind(L_bad);
__ stop("deleted argument(s) must fall within current frame");
__ bind(L_ok);
}
if (arg_slots.is_register()) { if (arg_slots.is_register()) {
Label L_ok, L_bad; Label L_ok, L_bad;
__ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD); __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
@ -321,12 +296,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Address rcx_amh_conversion( rcx_recv, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes() ); Address rcx_amh_conversion( rcx_recv, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes() );
Address vmarg; // __ argument_address(vmargslot) Address vmarg; // __ argument_address(vmargslot)
int tag_offset = -1;
if (TaggedStackInterpreter) {
tag_offset = Interpreter::tag_offset_in_bytes() - Interpreter::value_offset_in_bytes();
assert(tag_offset = wordSize, "stack grows as expected");
}
const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
if (have_entry(ek)) { if (have_entry(ek)) {
@ -372,11 +341,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ mov(rsp, rsi); // cut the stack back to where the caller started __ mov(rsp, rsi); // cut the stack back to where the caller started
// Repush the arguments as if coming from the interpreter. // Repush the arguments as if coming from the interpreter.
if (TaggedStackInterpreter) __ push(frame::tag_for_basic_type(T_INT));
__ push(rdx_code); __ push(rdx_code);
if (TaggedStackInterpreter) __ push(frame::tag_for_basic_type(T_OBJECT));
__ push(rcx_fail); __ push(rcx_fail);
if (TaggedStackInterpreter) __ push(frame::tag_for_basic_type(T_OBJECT));
__ push(rax_want); __ push(rax_want);
Register rbx_method = rbx_temp; Register rbx_method = rbx_temp;
@ -397,7 +363,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// Do something that is at least causes a valid throw from the interpreter. // Do something that is at least causes a valid throw from the interpreter.
__ bind(no_method); __ bind(no_method);
__ pop(rax_want); __ pop(rax_want);
if (TaggedStackInterpreter) __ pop(rcx_fail);
__ pop(rcx_fail); __ pop(rcx_fail);
__ push(rax_want); __ push(rax_want);
__ push(rcx_fail); __ push(rcx_fail);
@ -510,18 +475,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case _bound_long_direct_mh: case _bound_long_direct_mh:
{ {
bool direct_to_method = (ek >= _bound_ref_direct_mh); bool direct_to_method = (ek >= _bound_ref_direct_mh);
BasicType arg_type = T_ILLEGAL; BasicType arg_type = T_ILLEGAL;
if (ek == _bound_long_mh || ek == _bound_long_direct_mh) { int arg_mask = _INSERT_NO_MASK;
arg_type = T_LONG; int arg_slots = -1;
} else if (ek == _bound_int_mh || ek == _bound_int_direct_mh) { get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
arg_type = T_INT;
} else {
assert(ek == _bound_ref_mh || ek == _bound_ref_direct_mh, "must be ref");
arg_type = T_OBJECT;
}
int arg_slots = type2size[arg_type];
int arg_mask = (arg_type == T_OBJECT ? _INSERT_REF_MASK :
arg_slots == 1 ? _INSERT_INT_MASK : _INSERT_LONG_MASK);
// make room for the new argument: // make room for the new argument:
__ movl(rax_argslot, rcx_bmh_vmargslot); __ movl(rax_argslot, rcx_bmh_vmargslot);
@ -584,7 +541,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Label done; Label done;
__ movptr(rdx_temp, vmarg); __ movptr(rdx_temp, vmarg);
__ testl(rdx_temp, rdx_temp); __ testptr(rdx_temp, rdx_temp);
__ jccb(Assembler::zero, done); // no cast if null __ jccb(Assembler::zero, done); // no cast if null
__ load_klass(rdx_temp, rdx_temp); __ load_klass(rdx_temp, rdx_temp);
@ -660,13 +617,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
} }
break; break;
default: default:
assert(false, ""); ShouldNotReachHere();
} }
goto finish_int_conversion;
}
finish_int_conversion: // Do the requested conversion and store the value.
{
Register rbx_vminfo = rbx_temp; Register rbx_vminfo = rbx_temp;
__ movl(rbx_vminfo, rcx_amh_conversion); __ movl(rbx_vminfo, rcx_amh_conversion);
assert(CONV_VMINFO_SHIFT == 0, "preshifted"); assert(CONV_VMINFO_SHIFT == 0, "preshifted");
@ -692,7 +646,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ shrl(rdx_temp /*, rcx*/); __ shrl(rdx_temp /*, rcx*/);
__ bind(done); __ bind(done);
__ movl(vmarg, rdx_temp); __ movl(vmarg, rdx_temp); // Store the value.
__ xchgptr(rcx, rbx_vminfo); // restore rcx_recv __ xchgptr(rcx, rbx_vminfo); // restore rcx_recv
__ jump_to_method_handle_entry(rcx_recv, rdx_temp); __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
@ -715,9 +669,14 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
switch (ek) { switch (ek) {
case _adapter_opt_i2l: case _adapter_opt_i2l:
{ {
#ifdef _LP64
__ movslq(rdx_temp, vmarg1); // Load sign-extended
__ movq(vmarg1, rdx_temp); // Store into first slot
#else
__ movl(rdx_temp, vmarg1); __ movl(rdx_temp, vmarg1);
__ sarl(rdx_temp, 31); // __ extend_sign() __ sarl(rdx_temp, BitsPerInt - 1); // __ extend_sign()
__ movl(vmarg2, rdx_temp); // store second word __ movl(vmarg2, rdx_temp); // store second word
#endif
} }
break; break;
case _adapter_opt_unboxl: case _adapter_opt_unboxl:
@ -727,14 +686,19 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG); int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), ""); assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
__ null_check(rdx_temp, value_offset); __ null_check(rdx_temp, value_offset);
#ifdef _LP64
__ movq(rbx_temp, Address(rdx_temp, value_offset));
__ movq(vmarg1, rbx_temp);
#else
__ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt)); __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt));
__ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt)); __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt));
__ movl(vmarg1, rbx_temp); __ movl(vmarg1, rbx_temp);
__ movl(vmarg2, rdx_temp); __ movl(vmarg2, rdx_temp);
#endif
} }
break; break;
default: default:
assert(false, ""); ShouldNotReachHere();
} }
__ movptr(rcx_recv, rcx_mh_vmtarget); __ movptr(rcx_recv, rcx_mh_vmtarget);
@ -768,19 +732,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
if (ek == _adapter_opt_f2d) { if (ek == _adapter_opt_f2d) {
__ fld_s(vmarg); // load float to ST0 __ fld_s(vmarg); // load float to ST0
__ fstp_s(vmarg); // store single __ fstp_s(vmarg); // store single
} else if (!TaggedStackInterpreter) {
__ fld_d(vmarg); // load double to ST0
__ fstp_s(vmarg); // store single
} else { } else {
Address vmarg_tag = vmarg.plus_disp(tag_offset);
Address vmarg2 = vmarg.plus_disp(Interpreter::stackElementSize());
// vmarg2_tag does not participate in this code
Register rbx_tag = rbx_temp;
__ movl(rbx_tag, vmarg_tag); // preserve tag
__ movl(rdx_temp, vmarg2); // get second word of double
__ movl(vmarg_tag, rdx_temp); // align with first word
__ fld_d(vmarg); // load double to ST0 __ fld_d(vmarg); // load double to ST0
__ movl(vmarg_tag, rbx_tag); // restore tag
__ fstp_s(vmarg); // store single __ fstp_s(vmarg); // store single
} }
#endif //_LP64 #endif //_LP64
@ -812,19 +765,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case _adapter_opt_rot_2_up: case _adapter_opt_rot_2_up:
case _adapter_opt_rot_2_down: case _adapter_opt_rot_2_down:
{ {
int rotate = 0, swap_slots = 0; int swap_bytes = 0, rotate = 0;
switch ((int)ek) { get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
case _adapter_opt_swap_1: swap_slots = 1; break;
case _adapter_opt_swap_2: swap_slots = 2; break;
case _adapter_opt_rot_1_up: swap_slots = 1; rotate++; break;
case _adapter_opt_rot_1_down: swap_slots = 1; rotate--; break;
case _adapter_opt_rot_2_up: swap_slots = 2; rotate++; break;
case _adapter_opt_rot_2_down: swap_slots = 2; rotate--; break;
default: assert(false, "");
}
// the real size of the move must be doubled if TaggedStackInterpreter:
int swap_bytes = (int)( swap_slots * Interpreter::stackElementWords() * wordSize );
// 'argslot' is the position of the first argument to swap // 'argslot' is the position of the first argument to swap
__ movl(rax_argslot, rcx_amh_vmargslot); __ movl(rax_argslot, rcx_amh_vmargslot);
@ -925,8 +867,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// 'stack_move' is negative number of words to duplicate // 'stack_move' is negative number of words to duplicate
Register rdx_stack_move = rdx_temp; Register rdx_stack_move = rdx_temp;
__ movl(rdx_stack_move, rcx_amh_conversion); __ movl2ptr(rdx_stack_move, rcx_amh_conversion);
__ sarl(rdx_stack_move, CONV_STACK_MOVE_SHIFT); __ sarptr(rdx_stack_move, CONV_STACK_MOVE_SHIFT);
int argslot0_num = 0; int argslot0_num = 0;
Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num)); Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num));
@ -988,8 +930,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// 'stack_move' is number of words to drop // 'stack_move' is number of words to drop
Register rdi_stack_move = rdi; Register rdi_stack_move = rdi;
__ movl(rdi_stack_move, rcx_amh_conversion); __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
__ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT); __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
remove_arg_slots(_masm, rdi_stack_move, remove_arg_slots(_masm, rdi_stack_move,
rax_argslot, rbx_temp, rdx_temp); rax_argslot, rbx_temp, rdx_temp);
@ -1014,11 +956,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
case _adapter_opt_spread_more: case _adapter_opt_spread_more:
{ {
// spread an array out into a group of arguments // spread an array out into a group of arguments
int length_constant = -1; int length_constant = get_ek_adapter_opt_spread_info(ek);
switch (ek) {
case _adapter_opt_spread_0: length_constant = 0; break;
case _adapter_opt_spread_1: length_constant = 1; break;
}
// find the address of the array argument // find the address of the array argument
__ movl(rax_argslot, rcx_amh_vmargslot); __ movl(rax_argslot, rcx_amh_vmargslot);
@ -1079,8 +1017,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize())); __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize()));
// 'stack_move' is negative number of words to insert // 'stack_move' is negative number of words to insert
Register rdi_stack_move = rdi; Register rdi_stack_move = rdi;
__ movl(rdi_stack_move, rcx_amh_conversion); __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
__ sarl(rdi_stack_move, CONV_STACK_MOVE_SHIFT); __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
Register rsi_temp = rsi_array; // spill this Register rsi_temp = rsi_array; // spill this
insert_arg_slots(_masm, rdi_stack_move, -1, insert_arg_slots(_masm, rdi_stack_move, -1,
rax_argslot, rbx_temp, rsi_temp); rax_argslot, rbx_temp, rsi_temp);
@ -1114,10 +1052,6 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(rbx_temp, Address(rsi_source, 0)); __ movptr(rbx_temp, Address(rsi_source, 0));
__ movptr(Address(rax_argslot, 0), rbx_temp); __ movptr(Address(rax_argslot, 0), rbx_temp);
__ addptr(rsi_source, type2aelembytes(elem_type)); __ addptr(rsi_source, type2aelembytes(elem_type));
if (TaggedStackInterpreter) {
__ movptr(Address(rax_argslot, tag_offset),
frame::tag_for_basic_type(elem_type));
}
__ addptr(rax_argslot, Interpreter::stackElementSize()); __ addptr(rax_argslot, Interpreter::stackElementSize());
__ cmpptr(rax_argslot, rdx_argslot_limit); __ cmpptr(rax_argslot, rdx_argslot_limit);
__ jccb(Assembler::less, loop); __ jccb(Assembler::less, loop);
@ -1131,11 +1065,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(rbx_temp, Address(rsi_array, elem_offset)); __ movptr(rbx_temp, Address(rsi_array, elem_offset));
__ movptr(Address(rax_argslot, slot_offset), rbx_temp); __ movptr(Address(rax_argslot, slot_offset), rbx_temp);
elem_offset += type2aelembytes(elem_type); elem_offset += type2aelembytes(elem_type);
if (TaggedStackInterpreter) { slot_offset += Interpreter::stackElementSize();
__ movptr(Address(rax_argslot, slot_offset + tag_offset),
frame::tag_for_basic_type(elem_type));
}
slot_offset += Interpreter::stackElementSize();
} }
} }

View file

@ -369,7 +369,7 @@ class StubGenerator: public StubCodeGenerator {
// The pending exception in Thread is converted into a Java-level exception. // The pending exception in Thread is converted into a Java-level exception.
// //
// Contract with Java-level exception handlers: // Contract with Java-level exception handlers:
// rax,: exception // rax: exception
// rdx: throwing pc // rdx: throwing pc
// //
// NOTE: At entry of this stub, exception-pc must be on stack !! // NOTE: At entry of this stub, exception-pc must be on stack !!
@ -377,6 +377,12 @@ class StubGenerator: public StubCodeGenerator {
address generate_forward_exception() { address generate_forward_exception() {
StubCodeMark mark(this, "StubRoutines", "forward exception"); StubCodeMark mark(this, "StubRoutines", "forward exception");
address start = __ pc(); address start = __ pc();
const Register thread = rcx;
// other registers used in this stub
const Register exception_oop = rax;
const Register handler_addr = rbx;
const Register exception_pc = rdx;
// Upon entry, the sp points to the return address returning into Java // Upon entry, the sp points to the return address returning into Java
// (interpreted or compiled) code; i.e., the return address becomes the // (interpreted or compiled) code; i.e., the return address becomes the
@ -389,8 +395,8 @@ class StubGenerator: public StubCodeGenerator {
#ifdef ASSERT #ifdef ASSERT
// make sure this code is only executed if there is a pending exception // make sure this code is only executed if there is a pending exception
{ Label L; { Label L;
__ get_thread(rcx); __ get_thread(thread);
__ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD); __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, L); __ jcc(Assembler::notEqual, L);
__ stop("StubRoutines::forward exception: no pending exception (1)"); __ stop("StubRoutines::forward exception: no pending exception (1)");
__ bind(L); __ bind(L);
@ -398,33 +404,40 @@ class StubGenerator: public StubCodeGenerator {
#endif #endif
// compute exception handler into rbx, // compute exception handler into rbx,
__ movptr(rax, Address(rsp, 0)); __ get_thread(thread);
__ movptr(exception_pc, Address(rsp, 0));
BLOCK_COMMENT("call exception_handler_for_return_address"); BLOCK_COMMENT("call exception_handler_for_return_address");
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rax); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
__ mov(rbx, rax); __ mov(handler_addr, rax);
// setup rax, & rdx, remove return address & clear pending exception // setup rax & rdx, remove return address & clear pending exception
__ get_thread(rcx); __ get_thread(thread);
__ pop(rdx); __ pop(exception_pc);
__ movptr(rax, Address(rcx, Thread::pending_exception_offset())); __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
__ movptr(Address(rcx, Thread::pending_exception_offset()), NULL_WORD); __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
#ifdef ASSERT #ifdef ASSERT
// make sure exception is set // make sure exception is set
{ Label L; { Label L;
__ testptr(rax, rax); __ testptr(exception_oop, exception_oop);
__ jcc(Assembler::notEqual, L); __ jcc(Assembler::notEqual, L);
__ stop("StubRoutines::forward exception: no pending exception (2)"); __ stop("StubRoutines::forward exception: no pending exception (2)");
__ bind(L); __ bind(L);
} }
#endif #endif
// Verify that there is really a valid exception in RAX.
__ verify_oop(exception_oop);
// Restore SP from BP if the exception PC is a MethodHandle call site.
__ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0);
__ cmovptr(Assembler::notEqual, rsp, rbp);
// continue at exception handler (return address removed) // continue at exception handler (return address removed)
// rax,: exception // rax: exception
// rbx,: exception handler // rbx: exception handler
// rdx: throwing pc // rdx: throwing pc
__ verify_oop(rax); __ jmp(handler_addr);
__ jmp(rbx);
return start; return start;
} }
@ -2263,16 +2276,6 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers // arraycopy stubs used by compilers
generate_arraycopy_stubs(); generate_arraycopy_stubs();
// generic method handle stubs
if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
ek < MethodHandles::_EK_LIMIT;
ek = MethodHandles::EntryKind(1 + (int)ek)) {
StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
MethodHandles::generate_method_handle_stub(_masm, ek);
}
}
generate_math_stubs(); generate_math_stubs();
} }

View file

@ -466,7 +466,7 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("call exception_handler_for_return_address"); BLOCK_COMMENT("call exception_handler_for_return_address");
__ call_VM_leaf(CAST_FROM_FN_PTR(address, __ call_VM_leaf(CAST_FROM_FN_PTR(address,
SharedRuntime::exception_handler_for_return_address), SharedRuntime::exception_handler_for_return_address),
c_rarg0); r15_thread, c_rarg0);
__ mov(rbx, rax); __ mov(rbx, rax);
// setup rax & rdx, remove return address & clear pending exception // setup rax & rdx, remove return address & clear pending exception
@ -3009,16 +3009,6 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers // arraycopy stubs used by compilers
generate_arraycopy_stubs(); generate_arraycopy_stubs();
// generic method handle stubs
if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
ek < MethodHandles::_EK_LIMIT;
ek = MethodHandles::EntryKind(1 + (int)ek)) {
StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
MethodHandles::generate_method_handle_stub(_masm, ek);
}
}
generate_math_stubs(); generate_math_stubs();
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,11 @@ enum platform_dependent_constants {
code_size2 = 22000 // simply increase if too small (assembler will crash if too small) code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
}; };
// MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 5000
};
class x86 { class x86 {
friend class StubGenerator; friend class StubGenerator;
friend class VMStructs; friend class VMStructs;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,12 +28,14 @@
static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; } static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
enum platform_dependent_constants enum platform_dependent_constants {
{ code_size1 = 19000, // simply increase if too small (assembler will crash if too small)
code_size1 = 19000, // simply increase if too small (assembler will code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
// crash if too small) };
code_size2 = 22000 // simply increase if too small (assembler will
// crash if too small) // MethodHandles adapters
enum method_handles_platform_dependent_constants {
method_handles_adapters_code_size = 13000
}; };
class x86 { class x86 {

View file

@ -1550,6 +1550,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
void TemplateInterpreterGenerator::generate_throw_exception() { void TemplateInterpreterGenerator::generate_throw_exception() {
// Entry point in previous activation (i.e., if the caller was interpreted) // Entry point in previous activation (i.e., if the caller was interpreted)
Interpreter::_rethrow_exception_entry = __ pc(); Interpreter::_rethrow_exception_entry = __ pc();
const Register thread = rcx;
// Restore sp to interpreter_frame_last_sp even though we are going // Restore sp to interpreter_frame_last_sp even though we are going
// to empty the expression stack for the exception processing. // to empty the expression stack for the exception processing.
@ -1598,10 +1599,10 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// Set the popframe_processing bit in pending_popframe_condition indicating that we are // Set the popframe_processing bit in pending_popframe_condition indicating that we are
// currently handling popframe, so that call_VMs that may happen later do not trigger new // currently handling popframe, so that call_VMs that may happen later do not trigger new
// popframe handling cycles. // popframe handling cycles.
__ get_thread(rcx); __ get_thread(thread);
__ movl(rdx, Address(rcx, JavaThread::popframe_condition_offset())); __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
__ orl(rdx, JavaThread::popframe_processing_bit); __ orl(rdx, JavaThread::popframe_processing_bit);
__ movl(Address(rcx, JavaThread::popframe_condition_offset()), rdx); __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
{ {
// Check to see whether we are returning to a deoptimized frame. // Check to see whether we are returning to a deoptimized frame.
@ -1629,8 +1630,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ subptr(rdi, rax); __ subptr(rdi, rax);
__ addptr(rdi, wordSize); __ addptr(rdi, wordSize);
// Save these arguments // Save these arguments
__ get_thread(rcx); __ get_thread(thread);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), rcx, rax, rdi); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), thread, rax, rdi);
__ remove_activation(vtos, rdx, __ remove_activation(vtos, rdx,
/* throw_monitor_exception */ false, /* throw_monitor_exception */ false,
@ -1638,8 +1639,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
/* notify_jvmdi */ false); /* notify_jvmdi */ false);
// Inform deoptimization that it is responsible for restoring these arguments // Inform deoptimization that it is responsible for restoring these arguments
__ get_thread(rcx); __ get_thread(thread);
__ movl(Address(rcx, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit); __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_force_deopt_reexecution_bit);
// Continue in deoptimization handler // Continue in deoptimization handler
__ jmp(rdx); __ jmp(rdx);
@ -1665,12 +1666,12 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// expression stack if necessary. // expression stack if necessary.
__ mov(rax, rsp); __ mov(rax, rsp);
__ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
__ get_thread(rcx); __ get_thread(thread);
// PC must point into interpreter here // PC must point into interpreter here
__ set_last_Java_frame(rcx, noreg, rbp, __ pc()); __ set_last_Java_frame(thread, noreg, rbp, __ pc());
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), rcx, rax, rbx); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
__ get_thread(rcx); __ get_thread(thread);
__ reset_last_Java_frame(rcx, true, true); __ reset_last_Java_frame(thread, true, true);
// Restore the last_sp and null it out // Restore the last_sp and null it out
__ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
@ -1684,8 +1685,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
} }
// Clear the popframe condition flag // Clear the popframe condition flag
__ get_thread(rcx); __ get_thread(thread);
__ movl(Address(rcx, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive); __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
__ dispatch_next(vtos); __ dispatch_next(vtos);
// end of PopFrame support // end of PopFrame support
@ -1694,27 +1695,27 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// preserve exception over this code sequence // preserve exception over this code sequence
__ pop_ptr(rax); __ pop_ptr(rax);
__ get_thread(rcx); __ get_thread(thread);
__ movptr(Address(rcx, JavaThread::vm_result_offset()), rax); __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
// remove the activation (without doing throws on illegalMonitorExceptions) // remove the activation (without doing throws on illegalMonitorExceptions)
__ remove_activation(vtos, rdx, false, true, false); __ remove_activation(vtos, rdx, false, true, false);
// restore exception // restore exception
__ get_thread(rcx); __ get_thread(thread);
__ movptr(rax, Address(rcx, JavaThread::vm_result_offset())); __ movptr(rax, Address(thread, JavaThread::vm_result_offset()));
__ movptr(Address(rcx, JavaThread::vm_result_offset()), NULL_WORD); __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
__ verify_oop(rax); __ verify_oop(rax);
// Inbetween activations - previous activation type unknown yet // Inbetween activations - previous activation type unknown yet
// compute continuation point - the continuation point expects // compute continuation point - the continuation point expects
// the following registers set up: // the following registers set up:
// //
// rax,: exception // rax: exception
// rdx: return address/pc that threw exception // rdx: return address/pc that threw exception
// rsp: expression stack of caller // rsp: expression stack of caller
// rbp,: rbp, of caller // rbp: rbp, of caller
__ push(rax); // save exception __ push(rax); // save exception
__ push(rdx); // save return address __ push(rdx); // save return address
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rdx); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, rdx);
__ mov(rbx, rax); // save exception handler __ mov(rbx, rax); // save exception handler
__ pop(rdx); // restore return address __ pop(rdx); // restore return address
__ pop(rax); // restore exception __ pop(rax); // restore exception
@ -1728,6 +1729,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// //
address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
address entry = __ pc(); address entry = __ pc();
const Register thread = rcx;
__ restore_bcp(); __ restore_bcp();
__ restore_locals(); __ restore_locals();
@ -1735,8 +1737,8 @@ address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state
__ empty_FPU_stack(); __ empty_FPU_stack();
__ load_earlyret_value(state); __ load_earlyret_value(state);
__ get_thread(rcx); __ get_thread(thread);
__ movptr(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset())); __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
// Clear the earlyret state // Clear the earlyret state

View file

@ -1741,7 +1741,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ push(rdx); // save return address __ push(rdx); // save return address
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
SharedRuntime::exception_handler_for_return_address), SharedRuntime::exception_handler_for_return_address),
rdx); r15_thread, rdx);
__ mov(rbx, rax); // save exception handler __ mov(rbx, rax); // save exception handler
__ pop(rdx); // restore return address __ pop(rdx); // restore return address
__ pop(rax); // restore exception __ pop(rax); // restore exception

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2915,12 +2915,8 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
__ andl(recv, 0xFF); __ andl(recv, 0xFF);
// recv count is 0 based? // recv count is 0 based?
Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1)); Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
if (is_invokedynamic) { __ movptr(recv, recv_addr);
__ lea(recv, recv_addr); __ verify_oop(recv);
} else {
__ movptr(recv, recv_addr);
__ verify_oop(recv);
}
} }
// do null check if needed // do null check if needed

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2860,12 +2860,8 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
__ andl(recv, 0xFF); __ andl(recv, 0xFF);
if (TaggedStackInterpreter) __ shll(recv, 1); // index*2 if (TaggedStackInterpreter) __ shll(recv, 1); // index*2
Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1)); Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
if (is_invokedynamic) { __ movptr(recv, recv_addr);
__ lea(recv, recv_addr); __ verify_oop(recv);
} else {
__ movptr(recv, recv_addr);
__ verify_oop(recv);
}
} }
// do null check if needed // do null check if needed

View file

@ -1444,8 +1444,10 @@ void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
// to implement the UseStrictFP mode. // to implement the UseStrictFP mode.
const bool Matcher::strict_fp_requires_explicit_rounding = true; const bool Matcher::strict_fp_requires_explicit_rounding = true;
// Do floats take an entire double register or just half? // Are floats conerted to double when stored to stack during deoptimization?
const bool Matcher::float_in_double = true; // On x32 it is stored with convertion only when FPU is used for floats.
bool Matcher::float_in_double() { return (UseSSE == 0); }
// Do ints take an entire long register or just half? // Do ints take an entire long register or just half?
const bool Matcher::int_in_long = false; const bool Matcher::int_in_long = false;

View file

@ -2074,8 +2074,10 @@ void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {}
// implement the UseStrictFP mode. // implement the UseStrictFP mode.
const bool Matcher::strict_fp_requires_explicit_rounding = true; const bool Matcher::strict_fp_requires_explicit_rounding = true;
// Do floats take an entire double register or just half? // Are floats conerted to double when stored to stack during deoptimization?
const bool Matcher::float_in_double = true; // On x64 it is stored without convertion so we can use normal access.
bool Matcher::float_in_double() { return false; }
// Do ints take an entire long register or just half? // Do ints take an entire long register or just half?
const bool Matcher::int_in_long = true; const bool Matcher::int_in_long = true;

View file

@ -22,6 +22,8 @@
* *
*/ */
# define __STDC_FORMAT_MACROS
// do not include precompiled header file // do not include precompiled header file
# include "incls/_os_linux.cpp.incl" # include "incls/_os_linux.cpp.incl"
@ -53,6 +55,8 @@
# include <sys/ipc.h> # include <sys/ipc.h>
# include <sys/shm.h> # include <sys/shm.h>
# include <link.h> # include <link.h>
# include <stdint.h>
# include <inttypes.h>
#define MAX_PATH (2 * K) #define MAX_PATH (2 * K)
@ -2492,6 +2496,91 @@ bool os::uncommit_memory(char* addr, size_t size) {
!= MAP_FAILED; != MAP_FAILED;
} }
// Linux uses a growable mapping for the stack, and if the mapping for
// the stack guard pages is not removed when we detach a thread the
// stack cannot grow beyond the pages where the stack guard was
// mapped. If at some point later in the process the stack expands to
// that point, the Linux kernel cannot expand the stack any further
// because the guard pages are in the way, and a segfault occurs.
//
// However, it's essential not to split the stack region by unmapping
// a region (leaving a hole) that's already part of the stack mapping,
// so if the stack mapping has already grown beyond the guard pages at
// the time we create them, we have to truncate the stack mapping.
// So, we need to know the extent of the stack mapping when
// create_stack_guard_pages() is called.
// Find the bounds of the stack mapping. Return true for success.
//
// We only need this for stacks that are growable: at the time of
// writing thread stacks don't use growable mappings (i.e. those
// creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
// only applies to the main thread.
static bool
get_stack_bounds(uintptr_t *bottom, uintptr_t *top)
{
FILE *f = fopen("/proc/self/maps", "r");
if (f == NULL)
return false;
while (!feof(f)) {
size_t dummy;
char *str = NULL;
ssize_t len = getline(&str, &dummy, f);
if (len == -1) {
fclose(f);
return false;
}
if (len > 0 && str[len-1] == '\n') {
str[len-1] = 0;
len--;
}
static const char *stack_str = "[stack]";
if (len > (ssize_t)strlen(stack_str)
&& (strcmp(str + len - strlen(stack_str), stack_str) == 0)) {
if (sscanf(str, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) {
uintptr_t sp = (uintptr_t)__builtin_frame_address(0);
if (sp >= *bottom && sp <= *top) {
free(str);
fclose(f);
return true;
}
}
}
free(str);
}
fclose(f);
return false;
}
// If the (growable) stack mapping already extends beyond the point
// where we're going to put our guard pages, truncate the mapping at
// that point by munmap()ping it. This ensures that when we later
// munmap() the guard pages we don't leave a hole in the stack
// mapping.
bool os::create_stack_guard_pages(char* addr, size_t size) {
uintptr_t stack_extent, stack_base;
if (get_stack_bounds(&stack_extent, &stack_base)) {
if (stack_extent < (uintptr_t)addr)
::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent);
}
return os::commit_memory(addr, size);
}
// If this is a growable mapping, remove the guard pages entirely by
// munmap()ping them. If not, just call uncommit_memory().
bool os::remove_stack_guard_pages(char* addr, size_t size) {
uintptr_t stack_extent, stack_base;
if (get_stack_bounds(&stack_extent, &stack_base)) {
return ::munmap(addr, size) == 0;
}
return os::uncommit_memory(addr, size);
}
static address _highest_vm_reserved_address = NULL; static address _highest_vm_reserved_address = NULL;
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory

View file

@ -25,9 +25,20 @@
provider hotspot { provider hotspot {
probe class__loaded(char*, uintptr_t, void*, uintptr_t); probe class__loaded(char*, uintptr_t, void*, uintptr_t);
probe class__unloaded(char*, uintptr_t, void*, uintptr_t); probe class__unloaded(char*, uintptr_t, void*, uintptr_t);
probe class__initialization__required(char*, uintptr_t, void*, intptr_t,int);
probe class__initialization__recursive(char*, uintptr_t, void*, intptr_t,int);
probe class__initialization__concurrent(char*, uintptr_t, void*, intptr_t,int);
probe class__initialization__erroneous(char*, uintptr_t, void*, intptr_t, int);
probe class__initialization__super__failed(char*, uintptr_t, void*, intptr_t,int);
probe class__initialization__clinit(char*, uintptr_t, void*, intptr_t,int);
probe class__initialization__error(char*, uintptr_t, void*, intptr_t,int);
probe class__initialization__end(char*, uintptr_t, void*, intptr_t,int);
probe vm__init__begin(); probe vm__init__begin();
probe vm__init__end(); probe vm__init__end();
probe vm__shutdown(); probe vm__shutdown();
probe vmops__request(char*, uintptr_t, int);
probe vmops__begin(char*, uintptr_t, int);
probe vmops__end(char*, uintptr_t, int);
probe gc__begin(uintptr_t); probe gc__begin(uintptr_t);
probe gc__end(); probe gc__end();
probe mem__pool__gc__begin( probe mem__pool__gc__begin(
@ -38,6 +49,12 @@ provider hotspot {
uintptr_t, uintptr_t, uintptr_t, uintptr_t); uintptr_t, uintptr_t, uintptr_t, uintptr_t);
probe thread__start(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t); probe thread__start(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
probe thread__stop(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t); probe thread__stop(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
probe thread__sleep__begin(long long);
probe thread__sleep__end(int);
probe thread__yield();
probe thread__park__begin(uintptr_t, int, long long);
probe thread__park__end(uintptr_t);
probe thread__unpark(uintptr_t);
probe method__compile__begin( probe method__compile__begin(
char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, char*, uintptr_t); char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, char*, uintptr_t);
probe method__compile__end( probe method__compile__end(

View file

@ -668,13 +668,18 @@ jint AttachListener::pd_set_flag(AttachOperation* op, outputStream* out) {
} }
} }
if (strcmp(name, "ExtendedDTraceProbes") != 0) { if (strcmp(name, "ExtendedDTraceProbes") == 0) {
out->print_cr("flag '%s' cannot be changed", name); DTrace::set_extended_dprobes(flag);
return JNI_ERR; return JNI_OK;
} }
DTrace::set_extended_dprobes(flag); if (strcmp(name, "DTraceMonitorProbes") == 0) {
return JNI_OK; DTrace::set_monitor_dprobes(flag);
return JNI_OK;
}
out->print_cr("flag '%s' cannot be changed", name);
return JNI_ERR;
} }
void AttachListener::pd_detachall() { void AttachListener::pd_detachall() {

View file

@ -2698,6 +2698,14 @@ void os::free_memory(char* addr, size_t bytes) {
} }
} }
bool os::create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size);
}
bool os::remove_stack_guard_pages(char* addr, size_t size) {
return os::uncommit_memory(addr, size);
}
// Change the page size in a given range. // Change the page size in a given range.
void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");

View file

@ -2803,6 +2803,14 @@ bool os::release_memory(char* addr, size_t bytes) {
return VirtualFree(addr, 0, MEM_RELEASE) != 0; return VirtualFree(addr, 0, MEM_RELEASE) != 0;
} }
bool os::create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size);
}
bool os::remove_stack_guard_pages(char* addr, size_t size) {
return os::uncommit_memory(addr, size);
}
// Set protections specified // Set protections specified
bool os::protect_memory(char* addr, size_t bytes, ProtType prot, bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
bool is_committed) { bool is_committed) {

View file

@ -1,6 +1,6 @@
/* /*
* Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2007, 2008 Red Hat, Inc. * Copyright 2007, 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,11 +29,10 @@
// //
define_pd_global(bool, DontYieldALot, false); define_pd_global(bool, DontYieldALot, false);
#ifdef _LP64
define_pd_global(intx, ThreadStackSize, 1536); define_pd_global(intx, ThreadStackSize, 1536);
#ifdef _LP64
define_pd_global(intx, VMThreadStackSize, 1024); define_pd_global(intx, VMThreadStackSize, 1024);
#else #else
define_pd_global(intx, ThreadStackSize, 1024);
define_pd_global(intx, VMThreadStackSize, 512); define_pd_global(intx, VMThreadStackSize, 512);
#endif // _LP64 #endif // _LP64
define_pd_global(intx, SurvivorRatio, 8); define_pd_global(intx, SurvivorRatio, 8);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -222,11 +222,15 @@ void Canonicalizer::do_ArrayLength (ArrayLength* x) {
} }
} else { } else {
LoadField* lf = x->array()->as_LoadField(); LoadField* lf = x->array()->as_LoadField();
if (lf != NULL && lf->field()->is_constant()) { if (lf != NULL) {
ciObject* c = lf->field()->constant_value().as_object(); ciField* field = lf->field();
if (c->is_array()) { if (field->is_constant() && field->is_static()) {
ciArray* array = (ciArray*) c; // final static field
set_constant(array->length()); ciObject* c = field->constant_value().as_object();
if (c->is_array()) {
ciArray* array = (ciArray*) c;
set_constant(array->length());
}
} }
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -415,6 +415,28 @@ class PatchingStub: public CodeStub {
}; };
//------------------------------------------------------------------------------
// DeoptimizeStub
//
class DeoptimizeStub : public CodeStub {
private:
CodeEmitInfo* _info;
public:
DeoptimizeStub(CodeEmitInfo* info) : _info(new CodeEmitInfo(info)) {}
virtual void emit_code(LIR_Assembler* e);
virtual CodeEmitInfo* info() const { return _info; }
virtual bool is_exception_throw_stub() const { return true; }
virtual void visit(LIR_OpVisitState* visitor) {
visitor->do_slow_case(_info);
}
#ifndef PRODUCT
virtual void print_name(outputStream* out) const { out->print("DeoptimizeStub"); }
#endif // PRODUCT
};
class SimpleExceptionStub: public CodeStub { class SimpleExceptionStub: public CodeStub {
private: private:
LIR_Opr _obj; LIR_Opr _obj;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1524,18 +1524,14 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code = Bytecodes::_invokespecial; code = Bytecodes::_invokespecial;
} }
if (code == Bytecodes::_invokedynamic) {
BAILOUT("invokedynamic NYI"); // FIXME
return;
}
// NEEDS_CLEANUP // NEEDS_CLEANUP
// I've added the target-is_loaded() test below but I don't really understand // I've added the target-is_loaded() test below but I don't really understand
// how klass->is_loaded() can be true and yet target->is_loaded() is false. // how klass->is_loaded() can be true and yet target->is_loaded() is false.
// this happened while running the JCK invokevirtual tests under doit. TKR // this happened while running the JCK invokevirtual tests under doit. TKR
ciMethod* cha_monomorphic_target = NULL; ciMethod* cha_monomorphic_target = NULL;
ciMethod* exact_target = NULL; ciMethod* exact_target = NULL;
if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded()) { if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
!target->is_method_handle_invoke()) {
Value receiver = NULL; Value receiver = NULL;
ciInstanceKlass* receiver_klass = NULL; ciInstanceKlass* receiver_klass = NULL;
bool type_is_exact = false; bool type_is_exact = false;
@ -1681,11 +1677,20 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
CHECK_BAILOUT(); CHECK_BAILOUT();
// inlining not successful => standard invoke // inlining not successful => standard invoke
bool is_static = code == Bytecodes::_invokestatic;
ValueType* result_type = as_ValueType(target->return_type());
Values* args = state()->pop_arguments(target->arg_size_no_receiver());
Value recv = is_static ? NULL : apop();
bool is_loaded = target->is_loaded(); bool is_loaded = target->is_loaded();
bool has_receiver =
code == Bytecodes::_invokespecial ||
code == Bytecodes::_invokevirtual ||
code == Bytecodes::_invokeinterface;
bool is_invokedynamic = code == Bytecodes::_invokedynamic;
ValueType* result_type = as_ValueType(target->return_type());
// We require the debug info to be the "state before" because
// invokedynamics may deoptimize.
ValueStack* state_before = is_invokedynamic ? state()->copy() : NULL;
Values* args = state()->pop_arguments(target->arg_size_no_receiver());
Value recv = has_receiver ? apop() : NULL;
int vtable_index = methodOopDesc::invalid_vtable_index; int vtable_index = methodOopDesc::invalid_vtable_index;
#ifdef SPARC #ifdef SPARC
@ -1723,7 +1728,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
profile_call(recv, target_klass); profile_call(recv, target_klass);
} }
Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target); Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before);
// push result // push result
append_split(result); append_split(result);
@ -2862,20 +2867,18 @@ GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
_initial_state = state_at_entry(); _initial_state = state_at_entry();
start_block->merge(_initial_state); start_block->merge(_initial_state);
BlockBegin* sync_handler = NULL; // setup an exception handler to do the unlocking and/or
if (method()->is_synchronized() || _compilation->env()->dtrace_method_probes()) { // notification and unwind the frame.
// setup an exception handler to do the unlocking and/or notification BlockBegin* sync_handler = new BlockBegin(-1);
sync_handler = new BlockBegin(-1); sync_handler->set(BlockBegin::exception_entry_flag);
sync_handler->set(BlockBegin::exception_entry_flag); sync_handler->set(BlockBegin::is_on_work_list_flag);
sync_handler->set(BlockBegin::is_on_work_list_flag); sync_handler->set(BlockBegin::default_exception_handler_flag);
sync_handler->set(BlockBegin::default_exception_handler_flag);
ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0); ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
XHandler* h = new XHandler(desc); XHandler* h = new XHandler(desc);
h->set_entry_block(sync_handler); h->set_entry_block(sync_handler);
scope_data()->xhandlers()->append(h); scope_data()->xhandlers()->append(h);
scope_data()->set_has_handler(); scope_data()->set_has_handler();
}
// complete graph // complete graph
_vmap = new ValueMap(); _vmap = new ValueMap();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -259,10 +259,10 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, bool lock_stack_only)
} }
void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) { void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke) {
// record the safepoint before recording the debug info for enclosing scopes // record the safepoint before recording the debug info for enclosing scopes
recorder->add_safepoint(pc_offset, _oop_map->deep_copy()); recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
_scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/); _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, is_method_handle_invoke);
recorder->end_safepoint(pc_offset); recorder->end_safepoint(pc_offset);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -242,7 +242,7 @@ class IRScopeDebugInfo: public CompilationResourceObj {
//Whether we should reexecute this bytecode for deopt //Whether we should reexecute this bytecode for deopt
bool should_reexecute(); bool should_reexecute();
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool topmost) { void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool topmost, bool is_method_handle_invoke = false) {
if (caller() != NULL) { if (caller() != NULL) {
// Order is significant: Must record caller first. // Order is significant: Must record caller first.
caller()->record_debug_info(recorder, pc_offset, false/*topmost*/); caller()->record_debug_info(recorder, pc_offset, false/*topmost*/);
@ -252,7 +252,6 @@ class IRScopeDebugInfo: public CompilationResourceObj {
DebugToken* monvals = recorder->create_monitor_values(monitors()); DebugToken* monvals = recorder->create_monitor_values(monitors());
// reexecute allowed only for the topmost frame // reexecute allowed only for the topmost frame
bool reexecute = topmost ? should_reexecute() : false; bool reexecute = topmost ? should_reexecute() : false;
bool is_method_handle_invoke = false;
bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis. bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, is_method_handle_invoke, return_oop, locvals, expvals, monvals); recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, is_method_handle_invoke, return_oop, locvals, expvals, monvals);
} }
@ -303,7 +302,7 @@ class CodeEmitInfo: public CompilationResourceObj {
int bci() const { return _bci; } int bci() const { return _bci; }
void add_register_oop(LIR_Opr opr); void add_register_oop(LIR_Opr opr);
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset); void record_debug_info(DebugInformationRecorder* recorder, int pc_offset, bool is_method_handle_invoke = false);
CodeEmitInfo* next() const { return _next; } CodeEmitInfo* next() const { return _next; }
void set_next(CodeEmitInfo* next) { _next = next; } void set_next(CodeEmitInfo* next) { _next = next; }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -334,13 +334,14 @@ void Intrinsic::state_values_do(void f(Value*)) {
Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values* args, Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values* args,
int vtable_index, ciMethod* target) int vtable_index, ciMethod* target, ValueStack* state_before)
: StateSplit(result_type) : StateSplit(result_type)
, _code(code) , _code(code)
, _recv(recv) , _recv(recv)
, _args(args) , _args(args)
, _vtable_index(vtable_index) , _vtable_index(vtable_index)
, _target(target) , _target(target)
, _state_before(state_before)
{ {
set_flag(TargetIsLoadedFlag, target->is_loaded()); set_flag(TargetIsLoadedFlag, target->is_loaded());
set_flag(TargetIsFinalFlag, target_is_loaded() && target->is_final_method()); set_flag(TargetIsFinalFlag, target_is_loaded() && target->is_final_method());
@ -355,6 +356,9 @@ Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values*
_signature = new BasicTypeList(number_of_arguments() + (has_receiver() ? 1 : 0)); _signature = new BasicTypeList(number_of_arguments() + (has_receiver() ? 1 : 0));
if (has_receiver()) { if (has_receiver()) {
_signature->append(as_BasicType(receiver()->type())); _signature->append(as_BasicType(receiver()->type()));
} else if (is_invokedynamic()) {
// Add the synthetic MethodHandle argument to the signature.
_signature->append(T_OBJECT);
} }
for (int i = 0; i < number_of_arguments(); i++) { for (int i = 0; i < number_of_arguments(); i++) {
ValueType* t = argument_at(i)->type(); ValueType* t = argument_at(i)->type();
@ -364,6 +368,13 @@ Invoke::Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values*
} }
void Invoke::state_values_do(void f(Value*)) {
StateSplit::state_values_do(f);
if (state_before() != NULL) state_before()->values_do(f);
if (state() != NULL) state()->values_do(f);
}
// Implementation of Contant // Implementation of Contant
intx Constant::hash() const { intx Constant::hash() const {
if (_state == NULL) { if (_state == NULL) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1134,17 +1134,18 @@ BASE(StateSplit, Instruction)
LEAF(Invoke, StateSplit) LEAF(Invoke, StateSplit)
private: private:
Bytecodes::Code _code; Bytecodes::Code _code;
Value _recv; Value _recv;
Values* _args; Values* _args;
BasicTypeList* _signature; BasicTypeList* _signature;
int _vtable_index; int _vtable_index;
ciMethod* _target; ciMethod* _target;
ValueStack* _state_before; // Required for deoptimization.
public: public:
// creation // creation
Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values* args, Invoke(Bytecodes::Code code, ValueType* result_type, Value recv, Values* args,
int vtable_index, ciMethod* target); int vtable_index, ciMethod* target, ValueStack* state_before);
// accessors // accessors
Bytecodes::Code code() const { return _code; } Bytecodes::Code code() const { return _code; }
@ -1155,6 +1156,7 @@ LEAF(Invoke, StateSplit)
int vtable_index() const { return _vtable_index; } int vtable_index() const { return _vtable_index; }
BasicTypeList* signature() const { return _signature; } BasicTypeList* signature() const { return _signature; }
ciMethod* target() const { return _target; } ciMethod* target() const { return _target; }
ValueStack* state_before() const { return _state_before; }
// Returns false if target is not loaded // Returns false if target is not loaded
bool target_is_final() const { return check_flag(TargetIsFinalFlag); } bool target_is_final() const { return check_flag(TargetIsFinalFlag); }
@ -1162,6 +1164,9 @@ LEAF(Invoke, StateSplit)
// Returns false if target is not loaded // Returns false if target is not loaded
bool target_is_strictfp() const { return check_flag(TargetIsStrictfpFlag); } bool target_is_strictfp() const { return check_flag(TargetIsStrictfpFlag); }
// JSR 292 support
bool is_invokedynamic() const { return code() == Bytecodes::_invokedynamic; }
// generic // generic
virtual bool can_trap() const { return true; } virtual bool can_trap() const { return true; }
virtual void input_values_do(void f(Value*)) { virtual void input_values_do(void f(Value*)) {
@ -1169,6 +1174,7 @@ LEAF(Invoke, StateSplit)
if (has_receiver()) f(&_recv); if (has_receiver()) f(&_recv);
for (int i = 0; i < _args->length(); i++) f(_args->adr_at(i)); for (int i = 0; i < _args->length(); i++) f(_args->adr_at(i));
} }
virtual void state_values_do(void f(Value*));
}; };

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -76,7 +76,7 @@ LIR_Opr LIR_OprFact::value_type(ValueType* type) {
return LIR_OprFact::oopConst(type->as_ObjectType()->encoding()); return LIR_OprFact::oopConst(type->as_ObjectType()->encoding());
} }
} }
case addressTag: return LIR_OprFact::intConst(type->as_AddressConstant()->value()); case addressTag: return LIR_OprFact::addressConst(type->as_AddressConstant()->value());
case intTag : return LIR_OprFact::intConst(type->as_IntConstant()->value()); case intTag : return LIR_OprFact::intConst(type->as_IntConstant()->value());
case floatTag : return LIR_OprFact::floatConst(type->as_FloatConstant()->value()); case floatTag : return LIR_OprFact::floatConst(type->as_FloatConstant()->value());
case longTag : return LIR_OprFact::longConst(type->as_LongConstant()->value()); case longTag : return LIR_OprFact::longConst(type->as_LongConstant()->value());
@ -89,7 +89,7 @@ LIR_Opr LIR_OprFact::value_type(ValueType* type) {
LIR_Opr LIR_OprFact::dummy_value_type(ValueType* type) { LIR_Opr LIR_OprFact::dummy_value_type(ValueType* type) {
switch (type->tag()) { switch (type->tag()) {
case objectTag: return LIR_OprFact::oopConst(NULL); case objectTag: return LIR_OprFact::oopConst(NULL);
case addressTag: case addressTag:return LIR_OprFact::addressConst(0);
case intTag: return LIR_OprFact::intConst(0); case intTag: return LIR_OprFact::intConst(0);
case floatTag: return LIR_OprFact::floatConst(0.0); case floatTag: return LIR_OprFact::floatConst(0.0);
case longTag: return LIR_OprFact::longConst(0); case longTag: return LIR_OprFact::longConst(0);
@ -689,9 +689,10 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
case lir_static_call: case lir_static_call:
case lir_optvirtual_call: case lir_optvirtual_call:
case lir_icvirtual_call: case lir_icvirtual_call:
case lir_virtual_call: { case lir_virtual_call:
assert(op->as_OpJavaCall() != NULL, "must be"); case lir_dynamic_call: {
LIR_OpJavaCall* opJavaCall = (LIR_OpJavaCall*)op; LIR_OpJavaCall* opJavaCall = op->as_OpJavaCall();
assert(opJavaCall != NULL, "must be");
if (opJavaCall->_receiver->is_valid()) do_input(opJavaCall->_receiver); if (opJavaCall->_receiver->is_valid()) do_input(opJavaCall->_receiver);
@ -704,6 +705,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
} }
if (opJavaCall->_info) do_info(opJavaCall->_info); if (opJavaCall->_info) do_info(opJavaCall->_info);
if (opJavaCall->is_method_handle_invoke()) do_temp(FrameMap::method_handle_invoke_SP_save_opr());
do_call(); do_call();
if (opJavaCall->_result->is_valid()) do_output(opJavaCall->_result); if (opJavaCall->_result->is_valid()) do_output(opJavaCall->_result);
@ -1410,6 +1412,7 @@ void LIR_OprDesc::print(outputStream* out) const {
// LIR_Address // LIR_Address
void LIR_Const::print_value_on(outputStream* out) const { void LIR_Const::print_value_on(outputStream* out) const {
switch (type()) { switch (type()) {
case T_ADDRESS:out->print("address:%d",as_jint()); break;
case T_INT: out->print("int:%d", as_jint()); break; case T_INT: out->print("int:%d", as_jint()); break;
case T_LONG: out->print("lng:%lld", as_jlong()); break; case T_LONG: out->print("lng:%lld", as_jlong()); break;
case T_FLOAT: out->print("flt:%f", as_jfloat()); break; case T_FLOAT: out->print("flt:%f", as_jfloat()); break;
@ -1590,6 +1593,7 @@ const char * LIR_Op::name() const {
case lir_optvirtual_call: s = "optvirtual"; break; case lir_optvirtual_call: s = "optvirtual"; break;
case lir_icvirtual_call: s = "icvirtual"; break; case lir_icvirtual_call: s = "icvirtual"; break;
case lir_virtual_call: s = "virtual"; break; case lir_virtual_call: s = "virtual"; break;
case lir_dynamic_call: s = "dynamic"; break;
// LIR_OpArrayCopy // LIR_OpArrayCopy
case lir_arraycopy: s = "arraycopy"; break; case lir_arraycopy: s = "arraycopy"; break;
// LIR_OpLock // LIR_OpLock

View file

@ -85,9 +85,10 @@ class LIR_Const: public LIR_OprPtr {
void type_check(BasicType t) const { assert(type() == t, "type check"); } void type_check(BasicType t) const { assert(type() == t, "type check"); }
void type_check(BasicType t1, BasicType t2) const { assert(type() == t1 || type() == t2, "type check"); } void type_check(BasicType t1, BasicType t2) const { assert(type() == t1 || type() == t2, "type check"); }
void type_check(BasicType t1, BasicType t2, BasicType t3) const { assert(type() == t1 || type() == t2 || type() == t3, "type check"); }
public: public:
LIR_Const(jint i) { _value.set_type(T_INT); _value.set_jint(i); } LIR_Const(jint i, bool is_address=false) { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); }
LIR_Const(jlong l) { _value.set_type(T_LONG); _value.set_jlong(l); } LIR_Const(jlong l) { _value.set_type(T_LONG); _value.set_jlong(l); }
LIR_Const(jfloat f) { _value.set_type(T_FLOAT); _value.set_jfloat(f); } LIR_Const(jfloat f) { _value.set_type(T_FLOAT); _value.set_jfloat(f); }
LIR_Const(jdouble d) { _value.set_type(T_DOUBLE); _value.set_jdouble(d); } LIR_Const(jdouble d) { _value.set_type(T_DOUBLE); _value.set_jdouble(d); }
@ -105,7 +106,7 @@ class LIR_Const: public LIR_OprPtr {
virtual BasicType type() const { return _value.get_type(); } virtual BasicType type() const { return _value.get_type(); }
virtual LIR_Const* as_constant() { return this; } virtual LIR_Const* as_constant() { return this; }
jint as_jint() const { type_check(T_INT ); return _value.get_jint(); } jint as_jint() const { type_check(T_INT, T_ADDRESS); return _value.get_jint(); }
jlong as_jlong() const { type_check(T_LONG ); return _value.get_jlong(); } jlong as_jlong() const { type_check(T_LONG ); return _value.get_jlong(); }
jfloat as_jfloat() const { type_check(T_FLOAT ); return _value.get_jfloat(); } jfloat as_jfloat() const { type_check(T_FLOAT ); return _value.get_jfloat(); }
jdouble as_jdouble() const { type_check(T_DOUBLE); return _value.get_jdouble(); } jdouble as_jdouble() const { type_check(T_DOUBLE); return _value.get_jdouble(); }
@ -120,7 +121,7 @@ class LIR_Const: public LIR_OprPtr {
#endif #endif
jint as_jint_bits() const { type_check(T_FLOAT, T_INT); return _value.get_jint(); } jint as_jint_bits() const { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); }
jint as_jint_lo_bits() const { jint as_jint_lo_bits() const {
if (type() == T_DOUBLE) { if (type() == T_DOUBLE) {
return low(jlong_cast(_value.get_jdouble())); return low(jlong_cast(_value.get_jdouble()));
@ -718,6 +719,7 @@ class LIR_OprFact: public AllStatic {
static LIR_Opr intptrConst(void* p) { return (LIR_Opr)(new LIR_Const(p)); } static LIR_Opr intptrConst(void* p) { return (LIR_Opr)(new LIR_Const(p)); }
static LIR_Opr intptrConst(intptr_t v) { return (LIR_Opr)(new LIR_Const((void*)v)); } static LIR_Opr intptrConst(intptr_t v) { return (LIR_Opr)(new LIR_Const((void*)v)); }
static LIR_Opr illegal() { return (LIR_Opr)-1; } static LIR_Opr illegal() { return (LIR_Opr)-1; }
static LIR_Opr addressConst(jint i) { return (LIR_Opr)(new LIR_Const(i, true)); }
static LIR_Opr value_type(ValueType* type); static LIR_Opr value_type(ValueType* type);
static LIR_Opr dummy_value_type(ValueType* type); static LIR_Opr dummy_value_type(ValueType* type);
@ -840,6 +842,7 @@ enum LIR_Code {
, lir_optvirtual_call , lir_optvirtual_call
, lir_icvirtual_call , lir_icvirtual_call
, lir_virtual_call , lir_virtual_call
, lir_dynamic_call
, end_opJavaCall , end_opJavaCall
, begin_opArrayCopy , begin_opArrayCopy
, lir_arraycopy , lir_arraycopy
@ -1052,6 +1055,16 @@ class LIR_OpJavaCall: public LIR_OpCall {
LIR_Opr receiver() const { return _receiver; } LIR_Opr receiver() const { return _receiver; }
ciMethod* method() const { return _method; } ciMethod* method() const { return _method; }
// JSR 292 support.
bool is_invokedynamic() const { return code() == lir_dynamic_call; }
bool is_method_handle_invoke() const {
return
is_invokedynamic() // An invokedynamic is always a MethodHandle call site.
||
(method()->holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
method()->name() == ciSymbol::invoke_name());
}
intptr_t vtable_offset() const { intptr_t vtable_offset() const {
assert(_code == lir_virtual_call, "only have vtable for real vcall"); assert(_code == lir_virtual_call, "only have vtable for real vcall");
return (intptr_t) addr(); return (intptr_t) addr();
@ -1766,6 +1779,10 @@ class LIR_List: public CompilationResourceObj {
intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) { intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) {
append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info)); append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info));
} }
void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
}
void get_thread(LIR_Opr result) { append(new LIR_Op0(lir_get_thread, result)); } void get_thread(LIR_Opr result) { append(new LIR_Op0(lir_get_thread, result)); }
void word_align() { append(new LIR_Op0(lir_word_align)); } void word_align() { append(new LIR_Op0(lir_word_align)); }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -301,9 +301,9 @@ void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
} }
void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) { void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke) {
flush_debug_info(pc_offset); flush_debug_info(pc_offset);
cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset); cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, is_method_handle_invoke);
if (cinfo->exception_handlers() != NULL) { if (cinfo->exception_handlers() != NULL) {
compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers()); compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
} }
@ -413,6 +413,12 @@ void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
void LIR_Assembler::emit_call(LIR_OpJavaCall* op) { void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
verify_oop_map(op->info()); verify_oop_map(op->info());
// JSR 292
// Preserve the SP over MethodHandle call sites.
if (op->is_method_handle_invoke()) {
preserve_SP(op);
}
if (os::is_MP()) { if (os::is_MP()) {
// must align calls sites, otherwise they can't be updated atomically on MP hardware // must align calls sites, otherwise they can't be updated atomically on MP hardware
align_call(op->code()); align_call(op->code());
@ -423,19 +429,25 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
switch (op->code()) { switch (op->code()) {
case lir_static_call: case lir_static_call:
call(op->addr(), relocInfo::static_call_type, op->info()); call(op, relocInfo::static_call_type);
break; break;
case lir_optvirtual_call: case lir_optvirtual_call:
call(op->addr(), relocInfo::opt_virtual_call_type, op->info()); case lir_dynamic_call:
call(op, relocInfo::opt_virtual_call_type);
break; break;
case lir_icvirtual_call: case lir_icvirtual_call:
ic_call(op->addr(), op->info()); ic_call(op);
break; break;
case lir_virtual_call: case lir_virtual_call:
vtable_call(op->vtable_offset(), op->info()); vtable_call(op);
break; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
if (op->is_method_handle_invoke()) {
restore_SP(op);
}
#if defined(X86) && defined(TIERED) #if defined(X86) && defined(TIERED)
// C2 leave fpu stack dirty clean it // C2 leave fpu stack dirty clean it
if (UseSSE < 2) { if (UseSSE < 2) {

View file

@ -82,7 +82,7 @@ class LIR_Assembler: public CompilationResourceObj {
Address as_Address_hi(LIR_Address* addr); Address as_Address_hi(LIR_Address* addr);
// debug information // debug information
void add_call_info(int pc_offset, CodeEmitInfo* cinfo); void add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke = false);
void add_debug_info_for_branch(CodeEmitInfo* info); void add_debug_info_for_branch(CodeEmitInfo* info);
void add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo); void add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo);
void add_debug_info_for_div0_here(CodeEmitInfo* info); void add_debug_info_for_div0_here(CodeEmitInfo* info);
@ -205,9 +205,13 @@ class LIR_Assembler: public CompilationResourceObj {
void comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr result, LIR_Op2* op); void comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr result, LIR_Op2* op);
void cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr result); void cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr result);
void ic_call(address destination, CodeEmitInfo* info); void call( LIR_OpJavaCall* op, relocInfo::relocType rtype);
void vtable_call(int vtable_offset, CodeEmitInfo* info); void ic_call( LIR_OpJavaCall* op);
void call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info); void vtable_call( LIR_OpJavaCall* op);
// JSR 292
void preserve_SP(LIR_OpJavaCall* op);
void restore_SP( LIR_OpJavaCall* op);
void osr_entry(); void osr_entry();

View file

@ -2284,7 +2284,7 @@ void LIRGenerator::do_OsrEntry(OsrEntry* x) {
void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) { void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
int i = x->has_receiver() ? 1 : 0; int i = (x->has_receiver() || x->is_invokedynamic()) ? 1 : 0;
for (; i < args->length(); i++) { for (; i < args->length(); i++) {
LIRItem* param = args->at(i); LIRItem* param = args->at(i);
LIR_Opr loc = arg_list->at(i); LIR_Opr loc = arg_list->at(i);
@ -2322,6 +2322,10 @@ LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
LIRItem* receiver = new LIRItem(x->receiver(), this); LIRItem* receiver = new LIRItem(x->receiver(), this);
argument_items->append(receiver); argument_items->append(receiver);
} }
if (x->is_invokedynamic()) {
// Insert a dummy for the synthetic MethodHandle argument.
argument_items->append(NULL);
}
int idx = x->has_receiver() ? 1 : 0; int idx = x->has_receiver() ? 1 : 0;
for (int i = 0; i < x->number_of_arguments(); i++) { for (int i = 0; i < x->number_of_arguments(); i++) {
LIRItem* param = new LIRItem(x->argument_at(i), this); LIRItem* param = new LIRItem(x->argument_at(i), this);
@ -2371,6 +2375,9 @@ void LIRGenerator::do_Invoke(Invoke* x) {
CodeEmitInfo* info = state_for(x, x->state()); CodeEmitInfo* info = state_for(x, x->state());
// invokedynamics can deoptimize.
CodeEmitInfo* deopt_info = x->is_invokedynamic() ? state_for(x, x->state_before()) : NULL;
invoke_load_arguments(x, args, arg_list); invoke_load_arguments(x, args, arg_list);
if (x->has_receiver()) { if (x->has_receiver()) {
@ -2407,6 +2414,47 @@ void LIRGenerator::do_Invoke(Invoke* x) {
__ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info); __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
} }
break; break;
case Bytecodes::_invokedynamic: {
ciBytecodeStream bcs(x->scope()->method());
bcs.force_bci(x->bci());
assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
ciCPCache* cpcache = bcs.get_cpcache();
// Get CallSite offset from constant pool cache pointer.
int index = bcs.get_method_index();
size_t call_site_offset = cpcache->get_f1_offset(index);
// If this invokedynamic call site hasn't been executed yet in
// the interpreter, the CallSite object in the constant pool
// cache is still null and we need to deoptimize.
if (cpcache->is_f1_null_at(index)) {
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so
// clone all handlers. This is handled transparently in other
// places by the CodeEmitInfo cloning logic but is handled
// specially here because a stub isn't being used.
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
__ jump(deopt_stub);
}
// Use the receiver register for the synthetic MethodHandle
// argument.
receiver = LIR_Assembler::receiverOpr();
LIR_Opr tmp = new_register(objectType);
// Load CallSite object from constant pool cache.
__ oop2reg(cpcache->constant_encoding(), tmp);
__ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
// Load target MethodHandle from CallSite object.
__ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
__ call_dynamic(x->target(), receiver, result_register,
SharedRuntime::get_resolve_opt_virtual_call_stub(),
arg_list, info);
break;
}
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
break; break;

View file

@ -2479,6 +2479,15 @@ int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<Scope
return 2; return 2;
} }
case T_ADDRESS: {
#ifdef _LP64
scope_values->append(new ConstantLongValue(c->as_jint()));
#else
scope_values->append(new ConstantIntValue(c->as_jint()));
#endif
return 1;
}
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
return -1; return -1;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@ class C1_MacroAssembler: public MacroAssembler {
void inline_cache_check(Register receiver, Register iCache); void inline_cache_check(Register receiver, Register iCache);
void build_frame(int frame_size_in_bytes); void build_frame(int frame_size_in_bytes);
void method_exit(bool restore_frame); void remove_frame(int frame_size_in_bytes);
void unverified_entry(Register receiver, Register ic_klass); void unverified_entry(Register receiver, Register ic_klass);
void verified_entry(); void verified_entry();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2009-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -40,6 +40,16 @@ size_t ciCPCache::get_f1_offset(int index) {
} }
// ------------------------------------------------------------------
// ciCPCache::is_f1_null_at
bool ciCPCache::is_f1_null_at(int index) {
VM_ENTRY_MARK;
constantPoolCacheOop cpcache = (constantPoolCacheOop) get_oop();
oop f1 = cpcache->secondary_entry_at(index)->f1();
return (f1 == NULL);
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciCPCache::print // ciCPCache::print
// //

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 2009-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -39,5 +39,7 @@ public:
// requested entry. // requested entry.
size_t get_f1_offset(int index); size_t get_f1_offset(int index);
bool is_f1_null_at(int index);
void print(); void print();
}; };

View file

@ -385,11 +385,6 @@ ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
KILL_COMPILE_ON_FATAL_(fail_type)); KILL_COMPILE_ON_FATAL_(fail_type));
} }
if (found_klass != NULL) {
// Found it. Build a CI handle.
return get_object(found_klass)->as_klass();
}
// If we fail to find an array klass, look again for its element type. // If we fail to find an array klass, look again for its element type.
// The element type may be available either locally or via constraints. // The element type may be available either locally or via constraints.
// In either case, if we can find the element type in the system dictionary, // In either case, if we can find the element type in the system dictionary,
@ -414,6 +409,11 @@ ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
} }
} }
if (found_klass != NULL) {
// Found it. Build a CI handle.
return get_object(found_klass)->as_klass();
}
if (require_local) return NULL; if (require_local) return NULL;
// Not yet loaded into the VM, or not governed by loader constraints. // Not yet loaded into the VM, or not governed by loader constraints.
// Make a CI representative for it. // Make a CI representative for it.

View file

@ -334,33 +334,6 @@ klassOop LoaderConstraintTable::find_constrained_klass(symbolHandle name,
return NULL; return NULL;
} }
klassOop LoaderConstraintTable::find_constrained_elem_klass(symbolHandle name,
symbolHandle elem_name,
Handle loader,
TRAPS) {
LoaderConstraintEntry *p = *(find_loader_constraint(name, loader));
if (p != NULL) {
assert(p->klass() == NULL, "Expecting null array klass");
// The array name has a constraint, but it will not have a class. Check
// each loader for an associated elem
for (int i = 0; i < p->num_loaders(); i++) {
Handle no_protection_domain;
klassOop k = SystemDictionary::find(elem_name, p->loader(i), no_protection_domain, THREAD);
if (k != NULL) {
// Return the first elem klass found.
return k;
}
}
}
// No constraints, or else no klass loaded yet.
return NULL;
}
void LoaderConstraintTable::ensure_loader_constraint_capacity( void LoaderConstraintTable::ensure_loader_constraint_capacity(
LoaderConstraintEntry *p, LoaderConstraintEntry *p,
int nfree) { int nfree) {

View file

@ -66,9 +66,6 @@ public:
// bool is_method, TRAPS) // bool is_method, TRAPS)
klassOop find_constrained_klass(symbolHandle name, Handle loader); klassOop find_constrained_klass(symbolHandle name, Handle loader);
klassOop find_constrained_elem_klass(symbolHandle name, symbolHandle elem_name,
Handle loader, TRAPS);
// Class loader constraints // Class loader constraints

View file

@ -2178,9 +2178,8 @@ klassOop SystemDictionary::find_constrained_instance_or_array_klass(
// a loader constraint that would require this loader to return the // a loader constraint that would require this loader to return the
// klass that is already loaded. // klass that is already loaded.
if (FieldType::is_array(class_name())) { if (FieldType::is_array(class_name())) {
// Array classes are hard because their klassOops are not kept in the // For array classes, their klassOops are not kept in the
// constraint table. The array klass may be constrained, but the elem class // constraint table. The element klassOops are.
// may not be.
jint dimension; jint dimension;
symbolOop object_key; symbolOop object_key;
BasicType t = FieldType::get_array_info(class_name(), &dimension, BasicType t = FieldType::get_array_info(class_name(), &dimension,
@ -2190,8 +2189,9 @@ klassOop SystemDictionary::find_constrained_instance_or_array_klass(
} else { } else {
symbolHandle elem_name(THREAD, object_key); symbolHandle elem_name(THREAD, object_key);
MutexLocker mu(SystemDictionary_lock, THREAD); MutexLocker mu(SystemDictionary_lock, THREAD);
klass = constraints()->find_constrained_elem_klass(class_name, elem_name, class_loader, THREAD); klass = constraints()->find_constrained_klass(elem_name, class_loader);
} }
// If element class already loaded, allocate array klass
if (klass != NULL) { if (klass != NULL) {
klass = Klass::cast(klass)->array_klass_or_null(dimension); klass = Klass::cast(klass)->array_klass_or_null(dimension);
} }
@ -2209,22 +2209,38 @@ bool SystemDictionary::add_loader_constraint(symbolHandle class_name,
Handle class_loader1, Handle class_loader1,
Handle class_loader2, Handle class_loader2,
Thread* THREAD) { Thread* THREAD) {
unsigned int d_hash1 = dictionary()->compute_hash(class_name, class_loader1); symbolHandle constraint_name;
if (!FieldType::is_array(class_name())) {
constraint_name = class_name;
} else {
// For array classes, their klassOops are not kept in the
// constraint table. The element classes are.
jint dimension;
symbolOop object_key;
BasicType t = FieldType::get_array_info(class_name(), &dimension,
&object_key, CHECK_(false));
// primitive types always pass
if (t != T_OBJECT) {
return true;
} else {
constraint_name = symbolHandle(THREAD, object_key);
}
}
unsigned int d_hash1 = dictionary()->compute_hash(constraint_name, class_loader1);
int d_index1 = dictionary()->hash_to_index(d_hash1); int d_index1 = dictionary()->hash_to_index(d_hash1);
unsigned int d_hash2 = dictionary()->compute_hash(class_name, class_loader2); unsigned int d_hash2 = dictionary()->compute_hash(constraint_name, class_loader2);
int d_index2 = dictionary()->hash_to_index(d_hash2); int d_index2 = dictionary()->hash_to_index(d_hash2);
{ {
MutexLocker mu_s(SystemDictionary_lock, THREAD); MutexLocker mu_s(SystemDictionary_lock, THREAD);
// Better never do a GC while we're holding these oops // Better never do a GC while we're holding these oops
No_Safepoint_Verifier nosafepoint; No_Safepoint_Verifier nosafepoint;
klassOop klass1 = find_class(d_index1, d_hash1, class_name, class_loader1); klassOop klass1 = find_class(d_index1, d_hash1, constraint_name, class_loader1);
klassOop klass2 = find_class(d_index2, d_hash2, class_name, class_loader2); klassOop klass2 = find_class(d_index2, d_hash2, constraint_name, class_loader2);
return constraints()->add_entry(class_name, klass1, class_loader1, return constraints()->add_entry(constraint_name, klass1, class_loader1,
klass2, class_loader2); klass2, class_loader2);
} }
} }
@ -2301,6 +2317,7 @@ symbolOop SystemDictionary::find_resolution_error(constantPoolHandle pool, int w
// Returns the name of the type that failed a loader constraint check, or // Returns the name of the type that failed a loader constraint check, or
// NULL if no constraint failed. The returned C string needs cleaning up // NULL if no constraint failed. The returned C string needs cleaning up
// with a ResourceMark in the caller. No exception except OOME is thrown. // with a ResourceMark in the caller. No exception except OOME is thrown.
// Arrays are not added to the loader constraint table, their elements are.
char* SystemDictionary::check_signature_loaders(symbolHandle signature, char* SystemDictionary::check_signature_loaders(symbolHandle signature,
Handle loader1, Handle loader2, Handle loader1, Handle loader2,
bool is_method, TRAPS) { bool is_method, TRAPS) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -249,7 +249,6 @@ BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
size += round_to(buffer_size, oopSize); size += round_to(buffer_size, oopSize);
assert(name != NULL, "must provide a name"); assert(name != NULL, "must provide a name");
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) BufferBlob(name, size); blob = new (size) BufferBlob(name, size);
} }
@ -271,7 +270,6 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
unsigned int size = allocation_size(cb, sizeof(BufferBlob)); unsigned int size = allocation_size(cb, sizeof(BufferBlob));
assert(name != NULL, "must provide a name"); assert(name != NULL, "must provide a name");
{ {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) BufferBlob(name, size, cb); blob = new (size) BufferBlob(name, size, cb);
} }
@ -298,10 +296,48 @@ void BufferBlob::free( BufferBlob *blob ) {
MemoryService::track_code_cache_memory_usage(); MemoryService::track_code_cache_memory_usage();
} }
bool BufferBlob::is_adapter_blob() const {
return (strcmp(AdapterHandlerEntry::name, name()) == 0); //----------------------------------------------------------------------------------------------------
// Implementation of AdapterBlob
AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
AdapterBlob* blob = NULL;
unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) AdapterBlob(size, cb);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
return blob;
} }
//----------------------------------------------------------------------------------------------------
// Implementation of MethodHandlesAdapterBlob
MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
MethodHandlesAdapterBlob* blob = NULL;
unsigned int size = sizeof(MethodHandlesAdapterBlob);
// align the size to CodeEntryAlignment
size = align_code_offset(size);
size += round_to(buffer_size, oopSize);
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) MethodHandlesAdapterBlob(size);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
return blob;
}
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// Implementation of RuntimeStub // Implementation of RuntimeStub

View file

@ -1,5 +1,5 @@
/* /*
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -90,14 +90,15 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
void flush(); void flush();
// Typing // Typing
virtual bool is_buffer_blob() const { return false; } virtual bool is_buffer_blob() const { return false; }
virtual bool is_nmethod() const { return false; } virtual bool is_nmethod() const { return false; }
virtual bool is_runtime_stub() const { return false; } virtual bool is_runtime_stub() const { return false; }
virtual bool is_deoptimization_stub() const { return false; } virtual bool is_deoptimization_stub() const { return false; }
virtual bool is_uncommon_trap_stub() const { return false; } virtual bool is_uncommon_trap_stub() const { return false; }
virtual bool is_exception_stub() const { return false; } virtual bool is_exception_stub() const { return false; }
virtual bool is_safepoint_stub() const { return false; } virtual bool is_safepoint_stub() const { return false; }
virtual bool is_adapter_blob() const { return false; } virtual bool is_adapter_blob() const { return false; }
virtual bool is_method_handles_adapter_blob() const { return false; }
virtual bool is_compiled_by_c2() const { return false; } virtual bool is_compiled_by_c2() const { return false; }
virtual bool is_compiled_by_c1() const { return false; } virtual bool is_compiled_by_c1() const { return false; }
@ -221,6 +222,9 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
class BufferBlob: public CodeBlob { class BufferBlob: public CodeBlob {
friend class VMStructs; friend class VMStructs;
friend class AdapterBlob;
friend class MethodHandlesAdapterBlob;
private: private:
// Creation support // Creation support
BufferBlob(const char* name, int size); BufferBlob(const char* name, int size);
@ -236,8 +240,7 @@ class BufferBlob: public CodeBlob {
static void free(BufferBlob* buf); static void free(BufferBlob* buf);
// Typing // Typing
bool is_buffer_blob() const { return true; } virtual bool is_buffer_blob() const { return true; }
bool is_adapter_blob() const;
// GC/Verification support // GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ } void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { /* nothing to do */ }
@ -254,6 +257,40 @@ class BufferBlob: public CodeBlob {
}; };
//----------------------------------------------------------------------------------------------------
// AdapterBlob: used to hold C2I/I2C adapters
class AdapterBlob: public BufferBlob {
private:
AdapterBlob(int size) : BufferBlob("I2C/C2I adapters", size) {}
AdapterBlob(int size, CodeBuffer* cb) : BufferBlob("I2C/C2I adapters", size, cb) {}
public:
// Creation
static AdapterBlob* create(CodeBuffer* cb);
// Typing
virtual bool is_adapter_blob() const { return true; }
};
//----------------------------------------------------------------------------------------------------
// MethodHandlesAdapterBlob: used to hold MethodHandles adapters
class MethodHandlesAdapterBlob: public BufferBlob {
private:
MethodHandlesAdapterBlob(int size) : BufferBlob("MethodHandles adapters", size) {}
MethodHandlesAdapterBlob(int size, CodeBuffer* cb) : BufferBlob("MethodHandles adapters", size, cb) {}
public:
// Creation
static MethodHandlesAdapterBlob* create(int buffer_size);
// Typing
virtual bool is_method_handles_adapter_blob() const { return true; }
};
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// RuntimeStub: describes stubs used by compiled code to call a (static) C++ runtime routine // RuntimeStub: describes stubs used by compiled code to call a (static) C++ runtime routine

View file

@ -988,10 +988,12 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
} }
if (method->is_not_compilable(comp_level)) return NULL; if (method->is_not_compilable(comp_level)) return NULL;
nmethod* saved = CodeCache::find_and_remove_saved_code(method()); if (UseCodeCacheFlushing) {
if (saved != NULL) { nmethod* saved = CodeCache::find_and_remove_saved_code(method());
method->set_code(method, saved); if (saved != NULL) {
return saved; method->set_code(method, saved);
return saved;
}
} }
} else { } else {

View file

@ -46,9 +46,9 @@ CMSAdaptiveSizePolicy::CMSAdaptiveSizePolicy(size_t init_eden_size,
_processor_count = os::active_processor_count(); _processor_count = os::active_processor_count();
if (CMSConcurrentMTEnabled && (ParallelCMSThreads > 1)) { if (CMSConcurrentMTEnabled && (ConcGCThreads > 1)) {
assert(_processor_count > 0, "Processor count is suspect"); assert(_processor_count > 0, "Processor count is suspect");
_concurrent_processor_count = MIN2((uint) ParallelCMSThreads, _concurrent_processor_count = MIN2((uint) ConcGCThreads,
(uint) _processor_count); (uint) _processor_count);
} else { } else {
_concurrent_processor_count = 1; _concurrent_processor_count = 1;

View file

@ -606,7 +606,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?"); assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
} }
if (!_markStack.allocate(CMSMarkStackSize)) { if (!_markStack.allocate(MarkStackSize)) {
warning("Failed to allocate CMS Marking Stack"); warning("Failed to allocate CMS Marking Stack");
return; return;
} }
@ -617,13 +617,13 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
// Support for multi-threaded concurrent phases // Support for multi-threaded concurrent phases
if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) { if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
if (FLAG_IS_DEFAULT(ParallelCMSThreads)) { if (FLAG_IS_DEFAULT(ConcGCThreads)) {
// just for now // just for now
FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4); FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
} }
if (ParallelCMSThreads > 1) { if (ConcGCThreads > 1) {
_conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads", _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
ParallelCMSThreads, true); ConcGCThreads, true);
if (_conc_workers == NULL) { if (_conc_workers == NULL) {
warning("GC/CMS: _conc_workers allocation failure: " warning("GC/CMS: _conc_workers allocation failure: "
"forcing -CMSConcurrentMTEnabled"); "forcing -CMSConcurrentMTEnabled");
@ -634,13 +634,13 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
} }
} }
if (!CMSConcurrentMTEnabled) { if (!CMSConcurrentMTEnabled) {
ParallelCMSThreads = 0; ConcGCThreads = 0;
} else { } else {
// Turn off CMSCleanOnEnter optimization temporarily for // Turn off CMSCleanOnEnter optimization temporarily for
// the MT case where it's not fixed yet; see 6178663. // the MT case where it's not fixed yet; see 6178663.
CMSCleanOnEnter = false; CMSCleanOnEnter = false;
} }
assert((_conc_workers != NULL) == (ParallelCMSThreads > 1), assert((_conc_workers != NULL) == (ConcGCThreads > 1),
"Inconsistency"); "Inconsistency");
// Parallel task queues; these are shared for the // Parallel task queues; these are shared for the
@ -648,7 +648,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
// are not shared with parallel scavenge (ParNew). // are not shared with parallel scavenge (ParNew).
{ {
uint i; uint i;
uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads); uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
|| ParallelRefProcEnabled) || ParallelRefProcEnabled)
@ -723,8 +723,9 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
// Support for parallelizing survivor space rescan // Support for parallelizing survivor space rescan
if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) { if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
size_t max_plab_samples = cp->max_gen0_size()/ const size_t max_plab_samples =
((SurvivorRatio+2)*MinTLABSize); ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
_survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads); _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
_survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples); _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
_cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads); _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
@ -3657,7 +3658,7 @@ bool CMSCollector::markFromRootsWork(bool asynch) {
assert(_revisitStack.isEmpty(), "tabula rasa"); assert(_revisitStack.isEmpty(), "tabula rasa");
DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());) DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
bool result = false; bool result = false;
if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) { if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
result = do_marking_mt(asynch); result = do_marking_mt(asynch);
} else { } else {
result = do_marking_st(asynch); result = do_marking_st(asynch);
@ -4174,10 +4175,10 @@ void CMSConcMarkingTask::coordinator_yield() {
} }
bool CMSCollector::do_marking_mt(bool asynch) { bool CMSCollector::do_marking_mt(bool asynch) {
assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition"); assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
// In the future this would be determined ergonomically, based // In the future this would be determined ergonomically, based
// on #cpu's, # active mutator threads (and load), and mutation rate. // on #cpu's, # active mutator threads (and load), and mutation rate.
int num_workers = ParallelCMSThreads; int num_workers = ConcGCThreads;
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
CompactibleFreeListSpace* perm_space = _permGen->cmsSpace(); CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
@ -6429,8 +6430,8 @@ bool CMSMarkStack::allocate(size_t size) {
// For now we take the expedient path of just disabling the // For now we take the expedient path of just disabling the
// messages for the problematic case.) // messages for the problematic case.)
void CMSMarkStack::expand() { void CMSMarkStack::expand() {
assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted"); assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
if (_capacity == CMSMarkStackSizeMax) { if (_capacity == MarkStackSizeMax) {
if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
// We print a warning message only once per CMS cycle. // We print a warning message only once per CMS cycle.
gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit"); gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
@ -6438,7 +6439,7 @@ void CMSMarkStack::expand() {
return; return;
} }
// Double capacity if possible // Double capacity if possible
size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax); size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
// Do not give up existing stack until we have managed to // Do not give up existing stack until we have managed to
// get the double capacity that we desired. // get the double capacity that we desired.
ReservedSpace rs(ReservedSpace::allocation_align_size_up( ReservedSpace rs(ReservedSpace::allocation_align_size_up(

View file

@ -44,20 +44,20 @@ ConcurrentG1Refine::ConcurrentG1Refine() :
{ {
// Ergomonically select initial concurrent refinement parameters // Ergomonically select initial concurrent refinement parameters
if (FLAG_IS_DEFAULT(G1ConcRefineGreenZone)) { if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
FLAG_SET_DEFAULT(G1ConcRefineGreenZone, MAX2<int>(ParallelGCThreads, 1)); FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
} }
set_green_zone(G1ConcRefineGreenZone); set_green_zone(G1ConcRefinementGreenZone);
if (FLAG_IS_DEFAULT(G1ConcRefineYellowZone)) { if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
FLAG_SET_DEFAULT(G1ConcRefineYellowZone, green_zone() * 3); FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
} }
set_yellow_zone(MAX2<int>(G1ConcRefineYellowZone, green_zone())); set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
if (FLAG_IS_DEFAULT(G1ConcRefineRedZone)) { if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
FLAG_SET_DEFAULT(G1ConcRefineRedZone, yellow_zone() * 2); FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
} }
set_red_zone(MAX2<int>(G1ConcRefineRedZone, yellow_zone())); set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
_n_worker_threads = thread_num(); _n_worker_threads = thread_num();
// We need one extra thread to do the young gen rset size sampling. // We need one extra thread to do the young gen rset size sampling.
_n_threads = _n_worker_threads + 1; _n_threads = _n_worker_threads + 1;
@ -76,15 +76,15 @@ ConcurrentG1Refine::ConcurrentG1Refine() :
} }
void ConcurrentG1Refine::reset_threshold_step() { void ConcurrentG1Refine::reset_threshold_step() {
if (FLAG_IS_DEFAULT(G1ConcRefineThresholdStep)) { if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
_thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1); _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
} else { } else {
_thread_threshold_step = G1ConcRefineThresholdStep; _thread_threshold_step = G1ConcRefinementThresholdStep;
} }
} }
int ConcurrentG1Refine::thread_num() { int ConcurrentG1Refine::thread_num() {
return MAX2<int>((G1ParallelRSetThreads > 0) ? G1ParallelRSetThreads : ParallelGCThreads, 1); return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
} }
void ConcurrentG1Refine::init() { void ConcurrentG1Refine::init() {

View file

@ -39,7 +39,8 @@ class ConcurrentG1Refine: public CHeapObj {
* running. If the length becomes red (max queue length) the mutators start * running. If the length becomes red (max queue length) the mutators start
* processing the buffers. * processing the buffers.
* *
* There are some interesting cases (with G1AdaptiveConcRefine turned off): * There are some interesting cases (when G1UseAdaptiveConcRefinement
* is turned off):
* 1) green = yellow = red = 0. In this case the mutator will process all * 1) green = yellow = red = 0. In this case the mutator will process all
* buffers. Except for those that are created by the deferred updates * buffers. Except for those that are created by the deferred updates
* machinery during a collection. * machinery during a collection.

View file

@ -107,7 +107,7 @@ void ConcurrentG1RefineThread::run_young_rs_sampling() {
if (_should_terminate) { if (_should_terminate) {
break; break;
} }
_monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefineServiceInterval); _monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefinementServiceIntervalMillis);
} }
} }
@ -127,7 +127,7 @@ bool ConcurrentG1RefineThread::is_active() {
void ConcurrentG1RefineThread::activate() { void ConcurrentG1RefineThread::activate() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
if (_worker_id > 0) { if (_worker_id > 0) {
if (G1TraceConcurrentRefinement) { if (G1TraceConcRefinement) {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d", gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d",
_worker_id, _threshold, (int)dcqs.completed_buffers_num()); _worker_id, _threshold, (int)dcqs.completed_buffers_num());
@ -143,7 +143,7 @@ void ConcurrentG1RefineThread::activate() {
void ConcurrentG1RefineThread::deactivate() { void ConcurrentG1RefineThread::deactivate() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
if (_worker_id > 0) { if (_worker_id > 0) {
if (G1TraceConcurrentRefinement) { if (G1TraceConcRefinement) {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d", gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d",
_worker_id, _deactivation_threshold, (int)dcqs.completed_buffers_num()); _worker_id, _deactivation_threshold, (int)dcqs.completed_buffers_num());
@ -218,9 +218,13 @@ void ConcurrentG1RefineThread::run() {
void ConcurrentG1RefineThread::yield() { void ConcurrentG1RefineThread::yield() {
if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-yield"); if (G1TraceConcRefinement) {
gclog_or_tty->print_cr("G1-Refine-yield");
}
_sts.yield("G1 refine"); _sts.yield("G1 refine");
if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-yield-end"); if (G1TraceConcRefinement) {
gclog_or_tty->print_cr("G1-Refine-yield-end");
}
} }
void ConcurrentG1RefineThread::stop() { void ConcurrentG1RefineThread::stop() {
@ -241,7 +245,9 @@ void ConcurrentG1RefineThread::stop() {
Terminator_lock->wait(); Terminator_lock->wait();
} }
} }
if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-stop"); if (G1TraceConcRefinement) {
gclog_or_tty->print_cr("G1-Refine-stop");
}
} }
void ConcurrentG1RefineThread::print() const { void ConcurrentG1RefineThread::print() const {

View file

@ -447,7 +447,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", " gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
"heap end = "PTR_FORMAT, _heap_start, _heap_end); "heap end = "PTR_FORMAT, _heap_start, _heap_end);
_markStack.allocate(G1MarkStackSize); _markStack.allocate(MarkStackSize);
_regionStack.allocate(G1MarkRegionStackSize); _regionStack.allocate(G1MarkRegionStackSize);
// Create & start a ConcurrentMark thread. // Create & start a ConcurrentMark thread.
@ -461,7 +461,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency"); assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
satb_qs.set_buffer_size(G1SATBLogBufferSize); satb_qs.set_buffer_size(G1SATBBufferSize);
int size = (int) MAX2(ParallelGCThreads, (size_t)1); int size = (int) MAX2(ParallelGCThreads, (size_t)1);
_par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size); _par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size);
@ -483,8 +483,8 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_accum_task_vtime[i] = 0.0; _accum_task_vtime[i] = 0.0;
} }
if (ParallelMarkingThreads > ParallelGCThreads) { if (ConcGCThreads > ParallelGCThreads) {
vm_exit_during_initialization("Can't have more ParallelMarkingThreads " vm_exit_during_initialization("Can't have more ConcGCThreads "
"than ParallelGCThreads."); "than ParallelGCThreads.");
} }
if (ParallelGCThreads == 0) { if (ParallelGCThreads == 0) {
@ -494,11 +494,11 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_sleep_factor = 0.0; _sleep_factor = 0.0;
_marking_task_overhead = 1.0; _marking_task_overhead = 1.0;
} else { } else {
if (ParallelMarkingThreads > 0) { if (ConcGCThreads > 0) {
// notice that ParallelMarkingThreads overwrites G1MarkingOverheadPercent // notice that ConcGCThreads overwrites G1MarkingOverheadPercent
// if both are set // if both are set
_parallel_marking_threads = ParallelMarkingThreads; _parallel_marking_threads = ConcGCThreads;
_sleep_factor = 0.0; _sleep_factor = 0.0;
_marking_task_overhead = 1.0; _marking_task_overhead = 1.0;
} else if (G1MarkingOverheadPercent > 0) { } else if (G1MarkingOverheadPercent > 0) {
@ -760,7 +760,10 @@ void ConcurrentMark::checkpointRootsInitialPost() {
rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
satb_mq_set.set_active_all_threads(true); // This is the start of the marking cycle, we're expected all
// threads to have SATB queues with active set to false.
satb_mq_set.set_active_all_threads(true, /* new active value */
false /* expected_active */);
// update_g1_committed() will be called at the end of an evac pause // update_g1_committed() will be called at the end of an evac pause
// when marking is on. So, it's also called at the end of the // when marking is on. So, it's also called at the end of the
@ -1079,7 +1082,11 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
gclog_or_tty->print_cr("\nRemark led to restart for overflow."); gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
} else { } else {
// We're done with marking. // We're done with marking.
JavaThread::satb_mark_queue_set().set_active_all_threads(false); // This is the end of the marking cycle, we're expected all
// threads to have SATB queues with active set to true.
JavaThread::satb_mark_queue_set().set_active_all_threads(
false, /* new active value */
true /* expected_active */);
if (VerifyDuringGC) { if (VerifyDuringGC) {
HandleMark hm; // handle scope HandleMark hm; // handle scope
@ -2586,7 +2593,11 @@ void ConcurrentMark::abort() {
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
satb_mq_set.abandon_partial_marking(); satb_mq_set.abandon_partial_marking();
satb_mq_set.set_active_all_threads(false); // This can be called either during or outside marking, we'll read
// the expected_active value from the SATB queue set.
satb_mq_set.set_active_all_threads(
false, /* new active value */
satb_mq_set.is_active() /* expected_active */);
} }
static void print_ms_time_info(const char* prefix, const char* name, static void print_ms_time_info(const char* prefix, const char* name,
@ -3704,7 +3715,14 @@ void CMTask::do_marking_step(double time_target_ms) {
// enough to point to the next possible object header (the // enough to point to the next possible object header (the
// bitmap knows by how much we need to move it as it knows its // bitmap knows by how much we need to move it as it knows its
// granularity). // granularity).
move_finger_to(_nextMarkBitMap->nextWord(_finger)); assert(_finger < _region_limit, "invariant");
HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger);
// Check if bitmap iteration was aborted while scanning the last object
if (new_finger >= _region_limit) {
giveup_current_region();
} else {
move_finger_to(new_finger);
}
} }
} }
// At this point we have either completed iterating over the // At this point we have either completed iterating over the

View file

@ -24,8 +24,8 @@
class G1CollectedHeap; class G1CollectedHeap;
class CMTask; class CMTask;
typedef GenericTaskQueue<oop> CMTaskQueue; typedef GenericTaskQueue<oop> CMTaskQueue;
typedef GenericTaskQueueSet<oop> CMTaskQueueSet; typedef GenericTaskQueueSet<CMTaskQueue> CMTaskQueueSet;
// A generic CM bit map. This is essentially a wrapper around the BitMap // A generic CM bit map. This is essentially a wrapper around the BitMap
// class, with one bit per (1<<_shifter) HeapWords. // class, with one bit per (1<<_shifter) HeapWords.

View file

@ -583,7 +583,7 @@ HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
res->zero_fill_state() == HeapRegion::Allocated)), res->zero_fill_state() == HeapRegion::Allocated)),
"Non-young alloc Regions must be zero filled (and non-H)"); "Non-young alloc Regions must be zero filled (and non-H)");
if (G1PrintRegions) { if (G1PrintHeapRegions) {
if (res != NULL) { if (res != NULL) {
gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
"top "PTR_FORMAT, "top "PTR_FORMAT,
@ -2102,18 +2102,21 @@ size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
// Return the remaining space in the cur alloc region, but not less than // Return the remaining space in the cur alloc region, but not less than
// the min TLAB size. // the min TLAB size.
// Also, no more than half the region size, since we can't allow tlabs to
// grow big enough to accomodate humongous objects.
// We need to story it locally, since it might change between when we // Also, this value can be at most the humongous object threshold,
// test for NULL and when we use it later. // since we can't allow tlabs to grow big enough to accomodate
// humongous objects.
// We need to store the cur alloc region locally, since it might change
// between when we test for NULL and when we use it later.
ContiguousSpace* cur_alloc_space = _cur_alloc_region; ContiguousSpace* cur_alloc_space = _cur_alloc_region;
size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
if (cur_alloc_space == NULL) { if (cur_alloc_space == NULL) {
return HeapRegion::GrainBytes/2; return max_tlab_size;
} else { } else {
return MAX2(MIN2(cur_alloc_space->free(), return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize),
(size_t)(HeapRegion::GrainBytes/2)), max_tlab_size);
(size_t)MinTLABSize);
} }
} }
@ -2477,7 +2480,7 @@ void G1CollectedHeap::print_tracing_info() const {
if (G1SummarizeRSetStats) { if (G1SummarizeRSetStats) {
g1_rem_set()->print_summary_info(); g1_rem_set()->print_summary_info();
} }
if (G1SummarizeConcurrentMark) { if (G1SummarizeConcMark) {
concurrent_mark()->print_summary_info(); concurrent_mark()->print_summary_info();
} }
if (G1SummarizeZFStats) { if (G1SummarizeZFStats) {
@ -3480,7 +3483,7 @@ void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
HeapRegion* r = heap_region_containing(old); HeapRegion* r = heap_region_containing(old);
if (!r->evacuation_failed()) { if (!r->evacuation_failed()) {
r->set_evacuation_failed(true); r->set_evacuation_failed(true);
if (G1PrintRegions) { if (G1PrintHeapRegions) {
gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" "
"["PTR_FORMAT","PTR_FORMAT")\n", "["PTR_FORMAT","PTR_FORMAT")\n",
r, r->bottom(), r->end()); r, r->bottom(), r->end());
@ -4002,9 +4005,7 @@ public:
_g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
_g1h->g1_policy()->record_termination_time(i, term_ms); _g1h->g1_policy()->record_termination_time(i, term_ms);
} }
if (G1UseSurvivorSpaces) { _g1h->g1_policy()->record_thread_age_table(pss.age_table());
_g1h->g1_policy()->record_thread_age_table(pss.age_table());
}
_g1h->update_surviving_young_words(pss.surviving_young_words()+1); _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
// Clean up any par-expanded rem sets. // Clean up any par-expanded rem sets.

View file

@ -56,8 +56,8 @@ class ConcurrentZFThread;
# define IF_G1_DETAILED_STATS(code) # define IF_G1_DETAILED_STATS(code)
#endif #endif
typedef GenericTaskQueue<StarTask> RefToScanQueue; typedef GenericTaskQueue<StarTask> RefToScanQueue;
typedef GenericTaskQueueSet<StarTask> RefToScanQueueSet; typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
@ -1055,7 +1055,12 @@ public:
// Returns "true" iff the given word_size is "very large". // Returns "true" iff the given word_size is "very large".
static bool isHumongous(size_t word_size) { static bool isHumongous(size_t word_size) {
return word_size >= _humongous_object_threshold_in_words; // Note this has to be strictly greater-than as the TLABs
// are capped at the humongous thresold and we want to
// ensure that we don't try to allocate a TLAB as
// humongous and that we don't allocate a humongous
// object in a TLAB.
return word_size > _humongous_object_threshold_in_words;
} }
// Update mod union table with the set of dirty cards. // Update mod union table with the set of dirty cards.

View file

@ -270,14 +270,10 @@ G1CollectorPolicy::G1CollectorPolicy() :
_concurrent_mark_cleanup_times_ms->add(0.20); _concurrent_mark_cleanup_times_ms->add(0.20);
_tenuring_threshold = MaxTenuringThreshold; _tenuring_threshold = MaxTenuringThreshold;
if (G1UseSurvivorSpaces) { // if G1FixedSurvivorSpaceSize is 0 which means the size is not
// if G1FixedSurvivorSpaceSize is 0 which means the size is not // fixed, then _max_survivor_regions will be calculated at
// fixed, then _max_survivor_regions will be calculated at // calculate_young_list_target_config during initialization
// calculate_young_list_target_config during initialization _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
_max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
} else {
_max_survivor_regions = 0;
}
initialize_all(); initialize_all();
} }
@ -296,28 +292,54 @@ void G1CollectorPolicy::initialize_flags() {
CollectorPolicy::initialize_flags(); CollectorPolicy::initialize_flags();
} }
// The easiest way to deal with the parsing of the NewSize /
// MaxNewSize / etc. parameteres is to re-use the code in the
// TwoGenerationCollectorPolicy class. This is similar to what
// ParallelScavenge does with its GenerationSizer class (see
// ParallelScavengeHeap::initialize()). We might change this in the
// future, but it's a good start.
class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
size_t size_to_region_num(size_t byte_size) {
return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
}
public:
G1YoungGenSizer() {
initialize_flags();
initialize_size_info();
}
size_t min_young_region_num() {
return size_to_region_num(_min_gen0_size);
}
size_t initial_young_region_num() {
return size_to_region_num(_initial_gen0_size);
}
size_t max_young_region_num() {
return size_to_region_num(_max_gen0_size);
}
};
void G1CollectorPolicy::init() { void G1CollectorPolicy::init() {
// Set aside an initial future to_space. // Set aside an initial future to_space.
_g1 = G1CollectedHeap::heap(); _g1 = G1CollectedHeap::heap();
size_t regions = Universe::heap()->capacity() / HeapRegion::GrainBytes;
assert(Heap_lock->owned_by_self(), "Locking discipline."); assert(Heap_lock->owned_by_self(), "Locking discipline.");
if (G1SteadyStateUsed < 50) {
vm_exit_during_initialization("G1SteadyStateUsed must be at least 50%.");
}
initialize_gc_policy_counters(); initialize_gc_policy_counters();
if (G1Gen) { if (G1Gen) {
_in_young_gc_mode = true; _in_young_gc_mode = true;
if (G1YoungGenSize == 0) { G1YoungGenSizer sizer;
size_t initial_region_num = sizer.initial_young_region_num();
if (UseAdaptiveSizePolicy) {
set_adaptive_young_list_length(true); set_adaptive_young_list_length(true);
_young_list_fixed_length = 0; _young_list_fixed_length = 0;
} else { } else {
set_adaptive_young_list_length(false); set_adaptive_young_list_length(false);
_young_list_fixed_length = (G1YoungGenSize / HeapRegion::GrainBytes); _young_list_fixed_length = initial_region_num;
} }
_free_regions_at_end_of_collection = _g1->free_regions(); _free_regions_at_end_of_collection = _g1->free_regions();
_scan_only_regions_at_end_of_collection = 0; _scan_only_regions_at_end_of_collection = 0;
@ -455,7 +477,7 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
guarantee( adaptive_young_list_length(), "pre-condition" ); guarantee( adaptive_young_list_length(), "pre-condition" );
double start_time_sec = os::elapsedTime(); double start_time_sec = os::elapsedTime();
size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1MinReservePercent); size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
min_reserve_perc = MIN2((size_t) 50, min_reserve_perc); min_reserve_perc = MIN2((size_t) 50, min_reserve_perc);
size_t reserve_regions = size_t reserve_regions =
(size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0); (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0);
@ -1110,10 +1132,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
size_t short_lived_so_length = _young_list_so_prefix_length; size_t short_lived_so_length = _young_list_so_prefix_length;
_short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length); _short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length);
tag_scan_only(short_lived_so_length); tag_scan_only(short_lived_so_length);
_survivors_age_table.clear();
if (G1UseSurvivorSpaces) {
_survivors_age_table.clear();
}
assert( verify_young_ages(), "region age verification" ); assert( verify_young_ages(), "region age verification" );
} }
@ -1432,7 +1451,7 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
record_concurrent_mark_init_end_pre(0.0); record_concurrent_mark_init_end_pre(0.0);
size_t min_used_targ = size_t min_used_targ =
(_g1->capacity() / 100) * (G1SteadyStateUsed - G1SteadyStateUsedDelta); (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
if (cur_used_bytes > min_used_targ) { if (cur_used_bytes > min_used_targ) {
if (cur_used_bytes <= _prev_collection_pause_used_at_end_bytes) { if (cur_used_bytes <= _prev_collection_pause_used_at_end_bytes) {
@ -1916,7 +1935,7 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
calculate_young_list_target_config(); calculate_young_list_target_config();
// Note that _mmu_tracker->max_gc_time() returns the time in seconds. // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSUpdatePauseFractionPercent / 100.0; double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms); adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
// </NEW PREDICTION> // </NEW PREDICTION>
@ -1932,7 +1951,7 @@ void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
if (G1AdaptiveConcRefine) { if (G1UseAdaptiveConcRefinement) {
const int k_gy = 3, k_gr = 6; const int k_gy = 3, k_gr = 6;
const double inc_k = 1.1, dec_k = 0.9; const double inc_k = 1.1, dec_k = 0.9;
@ -2607,9 +2626,6 @@ size_t G1CollectorPolicy::max_regions(int purpose) {
// Calculates survivor space parameters. // Calculates survivor space parameters.
void G1CollectorPolicy::calculate_survivors_policy() void G1CollectorPolicy::calculate_survivors_policy()
{ {
if (!G1UseSurvivorSpaces) {
return;
}
if (G1FixedSurvivorSpaceSize == 0) { if (G1FixedSurvivorSpaceSize == 0) {
_max_survivor_regions = _young_list_target_length / SurvivorRatio; _max_survivor_regions = _young_list_target_length / SurvivorRatio;
} else { } else {
@ -2628,13 +2644,6 @@ bool
G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
word_size) { word_size) {
assert(_g1->regions_accounted_for(), "Region leakage!"); assert(_g1->regions_accounted_for(), "Region leakage!");
// Initiate a pause when we reach the steady-state "used" target.
size_t used_hard = (_g1->capacity() / 100) * G1SteadyStateUsed;
size_t used_soft =
MAX2((_g1->capacity() / 100) * (G1SteadyStateUsed - G1SteadyStateUsedDelta),
used_hard/2);
size_t used = _g1->used();
double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
size_t young_list_length = _g1->young_list_length(); size_t young_list_length = _g1->young_list_length();
@ -2867,7 +2876,7 @@ record_concurrent_mark_cleanup_end(size_t freed_bytes,
// estimate of the number of live bytes. // estimate of the number of live bytes.
void G1CollectorPolicy:: void G1CollectorPolicy::
add_to_collection_set(HeapRegion* hr) { add_to_collection_set(HeapRegion* hr) {
if (G1PrintRegions) { if (G1PrintHeapRegions) {
gclog_or_tty->print_cr("added region to cset %d:["PTR_FORMAT", "PTR_FORMAT"], " gclog_or_tty->print_cr("added region to cset %d:["PTR_FORMAT", "PTR_FORMAT"], "
"top "PTR_FORMAT", young %s", "top "PTR_FORMAT", young %s",
hr->hrs_index(), hr->bottom(), hr->end(), hr->hrs_index(), hr->bottom(), hr->end(),

View file

@ -88,13 +88,13 @@ void G1MMUTrackerQueue::add_pause(double start, double end, bool gc_thread) {
// the time slice than what's allowed) // the time slice than what's allowed)
// consolidate the two entries with the minimum gap between them // consolidate the two entries with the minimum gap between them
// (this might allow less GC time than what's allowed) // (this might allow less GC time than what's allowed)
guarantee(NOT_PRODUCT(ScavengeALot ||) G1ForgetfulMMUTracker, guarantee(NOT_PRODUCT(ScavengeALot ||) G1UseFixedWindowMMUTracker,
"array full, currently we can't recover unless +G1ForgetfulMMUTracker"); "array full, currently we can't recover unless +G1UseFixedWindowMMUTracker");
// In the case where ScavengeALot is true, such overflow is not // In the case where ScavengeALot is true, such overflow is not
// uncommon; in such cases, we can, without much loss of precision // uncommon; in such cases, we can, without much loss of precision
// or performance (we are GC'ing most of the time anyway!), // or performance (we are GC'ing most of the time anyway!),
// simply overwrite the oldest entry in the tracker: this // simply overwrite the oldest entry in the tracker: this
// is also the behaviour when G1ForgetfulMMUTracker is enabled. // is also the behaviour when G1UseFixedWindowMMUTracker is enabled.
_head_index = trim_index(_head_index + 1); _head_index = trim_index(_head_index + 1);
assert(_head_index == _tail_index, "Because we have a full circular buffer"); assert(_head_index == _tail_index, "Because we have a full circular buffer");
_tail_index = trim_index(_tail_index + 1); _tail_index = trim_index(_tail_index + 1);

View file

@ -101,7 +101,7 @@ private:
// If the array is full, an easy fix is to look for the pauses with // If the array is full, an easy fix is to look for the pauses with
// the shortest gap between them and consolidate them. // the shortest gap between them and consolidate them.
// For now, we have taken the expedient alternative of forgetting // For now, we have taken the expedient alternative of forgetting
// the oldest entry in the event that +G1ForgetfulMMUTracker, thus // the oldest entry in the event that +G1UseFixedWindowMMUTracker, thus
// potentially violating MMU specs for some time thereafter. // potentially violating MMU specs for some time thereafter.
G1MMUTrackerQueueElem _array[QueueLength]; G1MMUTrackerQueueElem _array[QueueLength];

View file

@ -101,6 +101,8 @@ void G1MarkSweep::allocate_stacks() {
GenMarkSweep::_marking_stack = GenMarkSweep::_marking_stack =
new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true); new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
GenMarkSweep::_objarray_stack =
new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
int size = SystemDictionary::number_of_classes() * 2; int size = SystemDictionary::number_of_classes() * 2;
GenMarkSweep::_revisit_klass_stack = GenMarkSweep::_revisit_klass_stack =

View file

@ -467,7 +467,7 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
// and they are causing failures. When we resolve said race // and they are causing failures. When we resolve said race
// conditions, we'll revert back to parallel remembered set // conditions, we'll revert back to parallel remembered set
// updating and scanning. See CRs 6677707 and 6677708. // updating and scanning. See CRs 6677707 and 6677708.
if (G1ParallelRSetUpdatingEnabled || (worker_i == 0)) { if (G1UseParallelRSetUpdating || (worker_i == 0)) {
updateRS(worker_i); updateRS(worker_i);
scanNewRefsRS(oc, worker_i); scanNewRefsRS(oc, worker_i);
} else { } else {
@ -476,7 +476,7 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
_g1p->record_update_rs_time(worker_i, 0.0); _g1p->record_update_rs_time(worker_i, 0.0);
_g1p->record_scan_new_refs_time(worker_i, 0.0); _g1p->record_scan_new_refs_time(worker_i, 0.0);
} }
if (G1ParallelRSetScanningEnabled || (worker_i == 0)) { if (G1UseParallelRSetScanning || (worker_i == 0)) {
scanRS(oc, worker_i); scanRS(oc, worker_i);
} else { } else {
_g1p->record_scan_rs_start_time(worker_i, os::elapsedTime() * 1000.0); _g1p->record_scan_rs_start_time(worker_i, os::elapsedTime() * 1000.0);

View file

@ -35,7 +35,7 @@ G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
void G1SATBCardTableModRefBS::enqueue(oop pre_val) { void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
assert(pre_val->is_oop_or_null(true), "Error"); assert(pre_val->is_oop_or_null(true), "Error");
if (!JavaThread::satb_mark_queue_set().active()) return; if (!JavaThread::satb_mark_queue_set().is_active()) return;
Thread* thr = Thread::current(); Thread* thr = Thread::current();
if (thr->is_Java_thread()) { if (thr->is_Java_thread()) {
JavaThread* jt = (JavaThread*)thr; JavaThread* jt = (JavaThread*)thr;
@ -51,7 +51,7 @@ template <class T> void
G1SATBCardTableModRefBS::write_ref_field_pre_static(T* field, G1SATBCardTableModRefBS::write_ref_field_pre_static(T* field,
oop new_val, oop new_val,
JavaThread* jt) { JavaThread* jt) {
if (!JavaThread::satb_mark_queue_set().active()) return; if (!JavaThread::satb_mark_queue_set().is_active()) return;
T heap_oop = oopDesc::load_heap_oop(field); T heap_oop = oopDesc::load_heap_oop(field);
if (!oopDesc::is_null(heap_oop)) { if (!oopDesc::is_null(heap_oop)) {
oop pre_val = oopDesc::decode_heap_oop_not_null(heap_oop); oop pre_val = oopDesc::decode_heap_oop_not_null(heap_oop);
@ -62,7 +62,7 @@ G1SATBCardTableModRefBS::write_ref_field_pre_static(T* field,
template <class T> void template <class T> void
G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) { G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
if (!JavaThread::satb_mark_queue_set().active()) return; if (!JavaThread::satb_mark_queue_set().is_active()) return;
T* elem_ptr = dst; T* elem_ptr = dst;
for (int i = 0; i < count; i++, elem_ptr++) { for (int i = 0; i < count; i++, elem_ptr++) {
T heap_oop = oopDesc::load_heap_oop(elem_ptr); T heap_oop = oopDesc::load_heap_oop(elem_ptr);

View file

@ -37,9 +37,6 @@
develop(intx, G1MarkingOverheadPercent, 0, \ develop(intx, G1MarkingOverheadPercent, 0, \
"Overhead of concurrent marking") \ "Overhead of concurrent marking") \
\ \
product(uintx, G1YoungGenSize, 0, \
"Size of the G1 young generation, 0 is the adaptive policy") \
\
develop(bool, G1Gen, true, \ develop(bool, G1Gen, true, \
"If true, it will enable the generational G1") \ "If true, it will enable the generational G1") \
\ \
@ -70,7 +67,7 @@
develop(intx, G1PausesBtwnConcMark, -1, \ develop(intx, G1PausesBtwnConcMark, -1, \
"If positive, fixed number of pauses between conc markings") \ "If positive, fixed number of pauses between conc markings") \
\ \
diagnostic(bool, G1SummarizeConcurrentMark, false, \ diagnostic(bool, G1SummarizeConcMark, false, \
"Summarize concurrent mark info") \ "Summarize concurrent mark info") \
\ \
diagnostic(bool, G1SummarizeRSetStats, false, \ diagnostic(bool, G1SummarizeRSetStats, false, \
@ -85,12 +82,9 @@
diagnostic(bool, G1SummarizeZFStats, false, \ diagnostic(bool, G1SummarizeZFStats, false, \
"Summarize zero-filling info") \ "Summarize zero-filling info") \
\ \
diagnostic(bool, G1TraceConcurrentRefinement, false, \ diagnostic(bool, G1TraceConcRefinement, false, \
"Trace G1 concurrent refinement") \ "Trace G1 concurrent refinement") \
\ \
product(intx, G1MarkStackSize, 2 * 1024 * 1024, \
"Size of the mark stack for concurrent marking.") \
\
product(intx, G1MarkRegionStackSize, 1024 * 1024, \ product(intx, G1MarkRegionStackSize, 1024 * 1024, \
"Size of the region stack for concurrent marking.") \ "Size of the region stack for concurrent marking.") \
\ \
@ -100,20 +94,13 @@
develop(intx, G1ConcZFMaxRegions, 1, \ develop(intx, G1ConcZFMaxRegions, 1, \
"Stop zero-filling when # of zf'd regions reaches") \ "Stop zero-filling when # of zf'd regions reaches") \
\ \
product(intx, G1SteadyStateUsed, 90, \
"If non-0, try to maintain 'used' at this pct (of max)") \
\
product(intx, G1SteadyStateUsedDelta, 30, \
"If G1SteadyStateUsed is non-0, then do pause this number of " \
"of percentage points earlier if no marking is in progress.") \
\
develop(bool, G1SATBBarrierPrintNullPreVals, false, \ develop(bool, G1SATBBarrierPrintNullPreVals, false, \
"If true, count frac of ptr writes with null pre-vals.") \ "If true, count frac of ptr writes with null pre-vals.") \
\ \
product(intx, G1SATBLogBufferSize, 1*K, \ product(intx, G1SATBBufferSize, 1*K, \
"Number of entries in an SATB log buffer.") \ "Number of entries in an SATB log buffer.") \
\ \
product(intx, G1SATBProcessCompletedThreshold, 20, \ develop(intx, G1SATBProcessCompletedThreshold, 20, \
"Number of completed buffers that triggers log processing.") \ "Number of completed buffers that triggers log processing.") \
\ \
develop(intx, G1ExtraRegionSurvRate, 33, \ develop(intx, G1ExtraRegionSurvRate, 33, \
@ -127,7 +114,7 @@
develop(bool, G1SATBPrintStubs, false, \ develop(bool, G1SATBPrintStubs, false, \
"If true, print generated stubs for the SATB barrier") \ "If true, print generated stubs for the SATB barrier") \
\ \
product(intx, G1ExpandByPercentOfAvailable, 20, \ experimental(intx, G1ExpandByPercentOfAvailable, 20, \
"When expanding, % of uncommitted space to claim.") \ "When expanding, % of uncommitted space to claim.") \
\ \
develop(bool, G1RSBarrierRegionFilter, true, \ develop(bool, G1RSBarrierRegionFilter, true, \
@ -165,36 +152,36 @@
product(intx, G1UpdateBufferSize, 256, \ product(intx, G1UpdateBufferSize, 256, \
"Size of an update buffer") \ "Size of an update buffer") \
\ \
product(intx, G1ConcRefineYellowZone, 0, \ product(intx, G1ConcRefinementYellowZone, 0, \
"Number of enqueued update buffers that will " \ "Number of enqueued update buffers that will " \
"trigger concurrent processing. Will be selected ergonomically " \ "trigger concurrent processing. Will be selected ergonomically " \
"by default.") \ "by default.") \
\ \
product(intx, G1ConcRefineRedZone, 0, \ product(intx, G1ConcRefinementRedZone, 0, \
"Maximum number of enqueued update buffers before mutator " \ "Maximum number of enqueued update buffers before mutator " \
"threads start processing new ones instead of enqueueing them. " \ "threads start processing new ones instead of enqueueing them. " \
"Will be selected ergonomically by default. Zero will disable " \ "Will be selected ergonomically by default. Zero will disable " \
"concurrent processing.") \ "concurrent processing.") \
\ \
product(intx, G1ConcRefineGreenZone, 0, \ product(intx, G1ConcRefinementGreenZone, 0, \
"The number of update buffers that are left in the queue by the " \ "The number of update buffers that are left in the queue by the " \
"concurrent processing threads. Will be selected ergonomically " \ "concurrent processing threads. Will be selected ergonomically " \
"by default.") \ "by default.") \
\ \
product(intx, G1ConcRefineServiceInterval, 300, \ product(intx, G1ConcRefinementServiceIntervalMillis, 300, \
"The last concurrent refinement thread wakes up every " \ "The last concurrent refinement thread wakes up every " \
"specified number of milliseconds to do miscellaneous work.") \ "specified number of milliseconds to do miscellaneous work.") \
\ \
product(intx, G1ConcRefineThresholdStep, 0, \ product(intx, G1ConcRefinementThresholdStep, 0, \
"Each time the rset update queue increases by this amount " \ "Each time the rset update queue increases by this amount " \
"activate the next refinement thread if available. " \ "activate the next refinement thread if available. " \
"Will be selected ergonomically by default.") \ "Will be selected ergonomically by default.") \
\ \
product(intx, G1RSUpdatePauseFractionPercent, 10, \ product(intx, G1RSetUpdatingPauseTimePercent, 10, \
"A target percentage of time that is allowed to be spend on " \ "A target percentage of time that is allowed to be spend on " \
"process RS update buffers during the collection pause.") \ "process RS update buffers during the collection pause.") \
\ \
product(bool, G1AdaptiveConcRefine, true, \ product(bool, G1UseAdaptiveConcRefinement, true, \
"Select green, yellow and red zones adaptively to meet the " \ "Select green, yellow and red zones adaptively to meet the " \
"the pause requirements.") \ "the pause requirements.") \
\ \
@ -245,15 +232,15 @@
"the number of regions for which we'll print a surv rate " \ "the number of regions for which we'll print a surv rate " \
"summary.") \ "summary.") \
\ \
product(bool, G1UseScanOnlyPrefix, false, \ develop(bool, G1UseScanOnlyPrefix, false, \
"It determines whether the system will calculate an optimum " \ "It determines whether the system will calculate an optimum " \
"scan-only set.") \ "scan-only set.") \
\ \
product(intx, G1MinReservePercent, 10, \ product(intx, G1ReservePercent, 10, \
"It determines the minimum reserve we should have in the heap " \ "It determines the minimum reserve we should have in the heap " \
"to minimize the probability of promotion failure.") \ "to minimize the probability of promotion failure.") \
\ \
diagnostic(bool, G1PrintRegions, false, \ diagnostic(bool, G1PrintHeapRegions, false, \
"If set G1 will print information on which regions are being " \ "If set G1 will print information on which regions are being " \
"allocated and which are reclaimed.") \ "allocated and which are reclaimed.") \
\ \
@ -263,9 +250,6 @@
develop(bool, G1HRRSFlushLogBuffersOnVerify, false, \ develop(bool, G1HRRSFlushLogBuffersOnVerify, false, \
"Forces flushing of log buffers before verification.") \ "Forces flushing of log buffers before verification.") \
\ \
product(bool, G1UseSurvivorSpaces, true, \
"When true, use survivor space.") \
\
develop(bool, G1FailOnFPError, false, \ develop(bool, G1FailOnFPError, false, \
"When set, G1 will fail when it encounters an FP 'error', " \ "When set, G1 will fail when it encounters an FP 'error', " \
"so as to allow debugging") \ "so as to allow debugging") \
@ -280,21 +264,21 @@
"If non-0 is the size of the G1 survivor space, " \ "If non-0 is the size of the G1 survivor space, " \
"otherwise SurvivorRatio is used to determine the size") \ "otherwise SurvivorRatio is used to determine the size") \
\ \
product(bool, G1ForgetfulMMUTracker, false, \ product(bool, G1UseFixedWindowMMUTracker, false, \
"If the MMU tracker's memory is full, forget the oldest entry") \ "If the MMU tracker's memory is full, forget the oldest entry") \
\ \
product(uintx, G1HeapRegionSize, 0, \ product(uintx, G1HeapRegionSize, 0, \
"Size of the G1 regions.") \ "Size of the G1 regions.") \
\ \
experimental(bool, G1ParallelRSetUpdatingEnabled, false, \ experimental(bool, G1UseParallelRSetUpdating, false, \
"Enables the parallelization of remembered set updating " \ "Enables the parallelization of remembered set updating " \
"during evacuation pauses") \ "during evacuation pauses") \
\ \
experimental(bool, G1ParallelRSetScanningEnabled, false, \ experimental(bool, G1UseParallelRSetScanning, false, \
"Enables the parallelization of remembered set scanning " \ "Enables the parallelization of remembered set scanning " \
"during evacuation pauses") \ "during evacuation pauses") \
\ \
product(uintx, G1ParallelRSetThreads, 0, \ product(uintx, G1ConcRefinementThreads, 0, \
"If non-0 is the number of parallel rem set update threads, " \ "If non-0 is the number of parallel rem set update threads, " \
"otherwise the value is determined ergonomically.") \ "otherwise the value is determined ergonomically.") \
\ \

View file

@ -25,8 +25,8 @@
# include "incls/_precompiled.incl" # include "incls/_precompiled.incl"
# include "incls/_ptrQueue.cpp.incl" # include "incls/_ptrQueue.cpp.incl"
PtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm) : PtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm, bool active) :
_qset(qset_), _buf(NULL), _index(0), _active(false), _qset(qset_), _buf(NULL), _index(0), _active(active),
_perm(perm), _lock(NULL) _perm(perm), _lock(NULL)
{} {}

View file

@ -62,7 +62,7 @@ protected:
public: public:
// Initialize this queue to contain a null buffer, and be part of the // Initialize this queue to contain a null buffer, and be part of the
// given PtrQueueSet. // given PtrQueueSet.
PtrQueue(PtrQueueSet*, bool perm = false); PtrQueue(PtrQueueSet*, bool perm = false, bool active = false);
// Release any contained resources. // Release any contained resources.
void flush(); void flush();
// Calls flush() when destroyed. // Calls flush() when destroyed.
@ -101,6 +101,8 @@ public:
} }
} }
bool is_active() { return _active; }
static int byte_index_to_index(int ind) { static int byte_index_to_index(int ind) {
assert((ind % oopSize) == 0, "Invariant."); assert((ind % oopSize) == 0, "Invariant.");
return ind / oopSize; return ind / oopSize;
@ -257,7 +259,7 @@ public:
bool process_completed_buffers() { return _process_completed; } bool process_completed_buffers() { return _process_completed; }
void set_process_completed(bool x) { _process_completed = x; } void set_process_completed(bool x) { _process_completed = x; }
bool active() { return _all_active; } bool is_active() { return _all_active; }
// Set the buffer size. Should be called before any "enqueue" operation // Set the buffer size. Should be called before any "enqueue" operation
// can be called. And should only be called once. // can be called. And should only be called once.

View file

@ -82,9 +82,57 @@ void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
t->satb_mark_queue().handle_zero_index(); t->satb_mark_queue().handle_zero_index();
} }
void SATBMarkQueueSet::set_active_all_threads(bool b) { #ifdef ASSERT
void SATBMarkQueueSet::dump_active_values(JavaThread* first,
bool expected_active) {
gclog_or_tty->print_cr("SATB queue active values for Java Threads");
gclog_or_tty->print_cr(" SATB queue set: active is %s",
(is_active()) ? "TRUE" : "FALSE");
gclog_or_tty->print_cr(" expected_active is %s",
(expected_active) ? "TRUE" : "FALSE");
for (JavaThread* t = first; t; t = t->next()) {
bool active = t->satb_mark_queue().is_active();
gclog_or_tty->print_cr(" thread %s, active is %s",
t->name(), (active) ? "TRUE" : "FALSE");
}
}
#endif // ASSERT
void SATBMarkQueueSet::set_active_all_threads(bool b,
bool expected_active) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
JavaThread* first = Threads::first();
#ifdef ASSERT
if (_all_active != expected_active) {
dump_active_values(first, expected_active);
// I leave this here as a guarantee, instead of an assert, so
// that it will still be compiled in if we choose to uncomment
// the #ifdef ASSERT in a product build. The whole block is
// within an #ifdef ASSERT so the guarantee will not be compiled
// in a product build anyway.
guarantee(false,
"SATB queue set has an unexpected active value");
}
#endif // ASSERT
_all_active = b; _all_active = b;
for(JavaThread* t = Threads::first(); t; t = t->next()) {
for (JavaThread* t = first; t; t = t->next()) {
#ifdef ASSERT
bool active = t->satb_mark_queue().is_active();
if (active != expected_active) {
dump_active_values(first, expected_active);
// I leave this here as a guarantee, instead of an assert, so
// that it will still be compiled in if we choose to uncomment
// the #ifdef ASSERT in a product build. The whole block is
// within an #ifdef ASSERT so the guarantee will not be compiled
// in a product build anyway.
guarantee(false,
"thread has an unexpected active value in its SATB queue");
}
#endif // ASSERT
t->satb_mark_queue().set_active(b); t->satb_mark_queue().set_active(b);
} }
} }

View file

@ -29,8 +29,7 @@ class JavaThread;
class ObjPtrQueue: public PtrQueue { class ObjPtrQueue: public PtrQueue {
public: public:
ObjPtrQueue(PtrQueueSet* qset_, bool perm = false) : ObjPtrQueue(PtrQueueSet* qset_, bool perm = false) :
PtrQueue(qset_, perm) PtrQueue(qset_, perm, qset_->is_active()) { }
{}
// Apply the closure to all elements, and reset the index to make the // Apply the closure to all elements, and reset the index to make the
// buffer empty. // buffer empty.
void apply_closure(ObjectClosure* cl); void apply_closure(ObjectClosure* cl);
@ -55,6 +54,9 @@ class SATBMarkQueueSet: public PtrQueueSet {
// is ignored. // is ignored.
bool apply_closure_to_completed_buffer_work(bool par, int worker); bool apply_closure_to_completed_buffer_work(bool par, int worker);
#ifdef ASSERT
void dump_active_values(JavaThread* first, bool expected_active);
#endif // ASSERT
public: public:
SATBMarkQueueSet(); SATBMarkQueueSet();
@ -65,9 +67,11 @@ public:
static void handle_zero_index_for_thread(JavaThread* t); static void handle_zero_index_for_thread(JavaThread* t);
// Apply "set_active(b)" to all thread tloq's. Should be called only // Apply "set_active(b)" to all Java threads' SATB queues. It should be
// with the world stopped. // called only with the world stopped. The method will assert that the
void set_active_all_threads(bool b); // SATB queues of all threads it visits, as well as the SATB queue
// set itself, has an active value same as expected_active.
void set_active_all_threads(bool b, bool expected_active);
// Register "blk" as "the closure" for all queues. Only one such closure // Register "blk" as "the closure" for all queues. Only one such closure
// is allowed. The "apply_closure_to_completed_buffer" method will apply // is allowed. The "apply_closure_to_completed_buffer" method will apply

View file

@ -175,6 +175,7 @@ psAdaptiveSizePolicy.hpp gcUtil.hpp
psAdaptiveSizePolicy.hpp adaptiveSizePolicy.hpp psAdaptiveSizePolicy.hpp adaptiveSizePolicy.hpp
psCompactionManager.cpp gcTaskManager.hpp psCompactionManager.cpp gcTaskManager.hpp
psCompactionManager.cpp objArrayKlass.inline.hpp
psCompactionManager.cpp objectStartArray.hpp psCompactionManager.cpp objectStartArray.hpp
psCompactionManager.cpp oop.hpp psCompactionManager.cpp oop.hpp
psCompactionManager.cpp oop.inline.hpp psCompactionManager.cpp oop.inline.hpp
@ -189,6 +190,9 @@ psCompactionManager.cpp systemDictionary.hpp
psCompactionManager.hpp allocation.hpp psCompactionManager.hpp allocation.hpp
psCompactionManager.hpp taskqueue.hpp psCompactionManager.hpp taskqueue.hpp
psCompactionManager.inline.hpp psCompactionManager.hpp
psCompactionManager.inline.hpp psParallelCompact.hpp
psGCAdaptivePolicyCounters.hpp gcAdaptivePolicyCounters.hpp psGCAdaptivePolicyCounters.hpp gcAdaptivePolicyCounters.hpp
psGCAdaptivePolicyCounters.hpp gcPolicyCounters.hpp psGCAdaptivePolicyCounters.hpp gcPolicyCounters.hpp
psGCAdaptivePolicyCounters.hpp psAdaptiveSizePolicy.hpp psGCAdaptivePolicyCounters.hpp psAdaptiveSizePolicy.hpp
@ -379,12 +383,12 @@ pcTasks.cpp fprofiler.hpp
pcTasks.cpp jniHandles.hpp pcTasks.cpp jniHandles.hpp
pcTasks.cpp jvmtiExport.hpp pcTasks.cpp jvmtiExport.hpp
pcTasks.cpp management.hpp pcTasks.cpp management.hpp
pcTasks.cpp objArrayKlass.inline.hpp
pcTasks.cpp psParallelCompact.hpp pcTasks.cpp psParallelCompact.hpp
pcTasks.cpp pcTasks.hpp pcTasks.cpp pcTasks.hpp
pcTasks.cpp oop.inline.hpp pcTasks.cpp oop.inline.hpp
pcTasks.cpp oop.pcgc.inline.hpp pcTasks.cpp oop.pcgc.inline.hpp
pcTasks.cpp systemDictionary.hpp pcTasks.cpp systemDictionary.hpp
pcTasks.cpp taskqueue.hpp
pcTasks.cpp thread.hpp pcTasks.cpp thread.hpp
pcTasks.cpp universe.hpp pcTasks.cpp universe.hpp
pcTasks.cpp vmThread.hpp pcTasks.cpp vmThread.hpp

View file

@ -48,7 +48,7 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
_vm_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs); _vm_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
// Do the real work // Do the real work
cm->drain_marking_stacks(&mark_and_push_closure); cm->follow_marking_stacks();
} }
@ -118,7 +118,7 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
} }
// Do the real work // Do the real work
cm->drain_marking_stacks(&mark_and_push_closure); cm->follow_marking_stacks();
// cm->deallocate_stacks(); // cm->deallocate_stacks();
} }
@ -196,17 +196,19 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
oop obj = NULL; oop obj = NULL;
ObjArrayTask task;
int random_seed = 17; int random_seed = 17;
while(true) { do {
if (ParCompactionManager::steal(which, &random_seed, obj)) { while (ParCompactionManager::steal_objarray(which, &random_seed, task)) {
obj->follow_contents(cm); objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
cm->drain_marking_stacks(&mark_and_push_closure); k->oop_follow_contents(cm, task.obj(), task.index());
} else { cm->follow_marking_stacks();
if (terminator()->offer_termination()) {
break;
}
} }
} while (ParCompactionManager::steal(which, &random_seed, obj)) {
obj->follow_contents(cm);
cm->follow_marking_stacks();
}
} while (!terminator()->offer_termination());
} }
// //

View file

@ -28,6 +28,8 @@
PSOldGen* ParCompactionManager::_old_gen = NULL; PSOldGen* ParCompactionManager::_old_gen = NULL;
ParCompactionManager** ParCompactionManager::_manager_array = NULL; ParCompactionManager** ParCompactionManager::_manager_array = NULL;
OopTaskQueueSet* ParCompactionManager::_stack_array = NULL; OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
ParCompactionManager::ObjArrayTaskQueueSet*
ParCompactionManager::_objarray_queues = NULL;
ObjectStartArray* ParCompactionManager::_start_array = NULL; ObjectStartArray* ParCompactionManager::_start_array = NULL;
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL; ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
RegionTaskQueueSet* ParCompactionManager::_region_array = NULL; RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
@ -46,6 +48,11 @@ ParCompactionManager::ParCompactionManager() :
// We want the overflow stack to be permanent // We want the overflow stack to be permanent
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true); _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
_objarray_queue.initialize();
_objarray_overflow_stack =
new (ResourceObj::C_HEAP) ObjArrayOverflowStack(10, true);
#ifdef USE_RegionTaskQueueWithOverflow #ifdef USE_RegionTaskQueueWithOverflow
region_stack()->initialize(); region_stack()->initialize();
#else #else
@ -69,6 +76,7 @@ ParCompactionManager::ParCompactionManager() :
ParCompactionManager::~ParCompactionManager() { ParCompactionManager::~ParCompactionManager() {
delete _overflow_stack; delete _overflow_stack;
delete _objarray_overflow_stack;
delete _revisit_klass_stack; delete _revisit_klass_stack;
delete _revisit_mdo_stack; delete _revisit_mdo_stack;
// _manager_array and _stack_array are statics // _manager_array and _stack_array are statics
@ -86,18 +94,21 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
assert(_manager_array == NULL, "Attempt to initialize twice"); assert(_manager_array == NULL, "Attempt to initialize twice");
_manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 ); _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
guarantee(_manager_array != NULL, "Could not initialize promotion manager"); guarantee(_manager_array != NULL, "Could not allocate manager_array");
_stack_array = new OopTaskQueueSet(parallel_gc_threads); _stack_array = new OopTaskQueueSet(parallel_gc_threads);
guarantee(_stack_array != NULL, "Count not initialize promotion manager"); guarantee(_stack_array != NULL, "Could not allocate stack_array");
_objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues");
_region_array = new RegionTaskQueueSet(parallel_gc_threads); _region_array = new RegionTaskQueueSet(parallel_gc_threads);
guarantee(_region_array != NULL, "Count not initialize promotion manager"); guarantee(_region_array != NULL, "Could not allocate region_array");
// Create and register the ParCompactionManager(s) for the worker threads. // Create and register the ParCompactionManager(s) for the worker threads.
for(uint i=0; i<parallel_gc_threads; i++) { for(uint i=0; i<parallel_gc_threads; i++) {
_manager_array[i] = new ParCompactionManager(); _manager_array[i] = new ParCompactionManager();
guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager"); guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
stack_array()->register_queue(i, _manager_array[i]->marking_stack()); stack_array()->register_queue(i, _manager_array[i]->marking_stack());
_objarray_queues->register_queue(i, &_manager_array[i]->_objarray_queue);
#ifdef USE_RegionTaskQueueWithOverflow #ifdef USE_RegionTaskQueueWithOverflow
region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue()); region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
#else #else
@ -203,36 +214,30 @@ void ParCompactionManager::reset() {
} }
} }
void ParCompactionManager::drain_marking_stacks(OopClosure* blk) { void ParCompactionManager::follow_marking_stacks() {
#ifdef ASSERT
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
MutableSpace* to_space = heap->young_gen()->to_space();
MutableSpace* old_space = heap->old_gen()->object_space();
MutableSpace* perm_space = heap->perm_gen()->object_space();
#endif /* ASSERT */
do { do {
// Drain the overflow stack first, to allow stealing from the marking stack.
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
while(!overflow_stack()->is_empty()) {
oop obj = overflow_stack()->pop();
obj->follow_contents(this);
}
oop obj; oop obj;
// obj is a reference!!! while (!overflow_stack()->is_empty()) {
overflow_stack()->pop()->follow_contents(this);
}
while (marking_stack()->pop_local(obj)) { while (marking_stack()->pop_local(obj)) {
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
obj->follow_contents(this); obj->follow_contents(this);
} }
} while((marking_stack()->size() != 0) || (overflow_stack()->length() != 0));
assert(marking_stack()->size() == 0, "Sanity"); // Process ObjArrays one at a time to avoid marking stack bloat.
assert(overflow_stack()->length() == 0, "Sanity"); ObjArrayTask task;
if (!_objarray_overflow_stack->is_empty()) {
task = _objarray_overflow_stack->pop();
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
k->oop_follow_contents(this, task.obj(), task.index());
} else if (_objarray_queue.pop_local(task)) {
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
k->oop_follow_contents(this, task.obj(), task.index());
}
} while (!marking_stacks_empty());
assert(marking_stacks_empty(), "Sanity");
} }
void ParCompactionManager::drain_region_overflow_stack() { void ParCompactionManager::drain_region_overflow_stack() {

View file

@ -22,18 +22,6 @@
* *
*/ */
//
// psPromotionManager is used by a single thread to manage object survival
// during a scavenge. The promotion manager contains thread local data only.
//
// NOTE! Be carefull when allocating the stacks on cheap. If you are going
// to use a promotion manager in more than one thread, the stacks MUST be
// on cheap. This can lead to memory leaks, though, as they are not auto
// deallocated.
//
// FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
//
// Move to some global location // Move to some global location
#define HAS_BEEN_MOVED 0x1501d01d #define HAS_BEEN_MOVED 0x1501d01d
// End move to some global location // End move to some global location
@ -46,8 +34,6 @@ class ObjectStartArray;
class ParallelCompactData; class ParallelCompactData;
class ParMarkBitMap; class ParMarkBitMap;
// Move to it's own file if this works out.
class ParCompactionManager : public CHeapObj { class ParCompactionManager : public CHeapObj {
friend class ParallelTaskTerminator; friend class ParallelTaskTerminator;
friend class ParMarkBitMap; friend class ParMarkBitMap;
@ -72,14 +58,27 @@ class ParCompactionManager : public CHeapObj {
// ------------------------ End don't putback if not needed // ------------------------ End don't putback if not needed
private: private:
// 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB
#define OBJARRAY_QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
typedef GenericTaskQueue<ObjArrayTask, OBJARRAY_QUEUE_SIZE> ObjArrayTaskQueue;
typedef GenericTaskQueueSet<ObjArrayTaskQueue> ObjArrayTaskQueueSet;
#undef OBJARRAY_QUEUE_SIZE
static ParCompactionManager** _manager_array; static ParCompactionManager** _manager_array;
static OopTaskQueueSet* _stack_array; static OopTaskQueueSet* _stack_array;
static ObjArrayTaskQueueSet* _objarray_queues;
static ObjectStartArray* _start_array; static ObjectStartArray* _start_array;
static RegionTaskQueueSet* _region_array; static RegionTaskQueueSet* _region_array;
static PSOldGen* _old_gen; static PSOldGen* _old_gen;
private:
OopTaskQueue _marking_stack; OopTaskQueue _marking_stack;
GrowableArray<oop>* _overflow_stack; GrowableArray<oop>* _overflow_stack;
typedef GrowableArray<ObjArrayTask> ObjArrayOverflowStack;
ObjArrayTaskQueue _objarray_queue;
ObjArrayOverflowStack* _objarray_overflow_stack;
// Is there a way to reuse the _marking_stack for the // Is there a way to reuse the _marking_stack for the
// saving empty regions? For now just create a different // saving empty regions? For now just create a different
// type of TaskQueue. // type of TaskQueue.
@ -128,8 +127,8 @@ class ParCompactionManager : public CHeapObj {
// Pushes onto the region stack. If the region stack is full, // Pushes onto the region stack. If the region stack is full,
// pushes onto the region overflow stack. // pushes onto the region overflow stack.
void region_stack_push(size_t region_index); void region_stack_push(size_t region_index);
public:
public:
Action action() { return _action; } Action action() { return _action; }
void set_action(Action v) { _action = v; } void set_action(Action v) { _action = v; }
@ -163,6 +162,8 @@ class ParCompactionManager : public CHeapObj {
// Get a oop for scanning. If returns null, no oop were found. // Get a oop for scanning. If returns null, no oop were found.
oop retrieve_for_scanning(); oop retrieve_for_scanning();
inline void push_objarray(oop obj, size_t index);
// Save region for later processing. Must not fail. // Save region for later processing. Must not fail.
void save_for_processing(size_t region_index); void save_for_processing(size_t region_index);
// Get a region for processing. If returns null, no region were found. // Get a region for processing. If returns null, no region were found.
@ -175,12 +176,17 @@ class ParCompactionManager : public CHeapObj {
return stack_array()->steal(queue_num, seed, t); return stack_array()->steal(queue_num, seed, t);
} }
static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t) {
return _objarray_queues->steal(queue_num, seed, t);
}
static bool steal(int queue_num, int* seed, RegionTask& t) { static bool steal(int queue_num, int* seed, RegionTask& t) {
return region_array()->steal(queue_num, seed, t); return region_array()->steal(queue_num, seed, t);
} }
// Process tasks remaining on any stack // Process tasks remaining on any marking stack
void drain_marking_stacks(OopClosure *blk); void follow_marking_stacks();
inline bool marking_stacks_empty() const;
// Process tasks remaining on any stack // Process tasks remaining on any stack
void drain_region_stacks(); void drain_region_stacks();
@ -200,3 +206,8 @@ inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
"out of range manager_array access"); "out of range manager_array access");
return _manager_array[index]; return _manager_array[index];
} }
bool ParCompactionManager::marking_stacks_empty() const {
return _marking_stack.size() == 0 && _overflow_stack->is_empty() &&
_objarray_queue.size() == 0 && _objarray_overflow_stack->is_empty();
}

View file

@ -0,0 +1,32 @@
/*
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
void ParCompactionManager::push_objarray(oop obj, size_t index)
{
ObjArrayTask task(obj, index);
assert(task.is_valid(), "bad ObjArrayTask");
if (!_objarray_queue.push(task)) {
_objarray_overflow_stack->push(task);
}
}

View file

@ -479,6 +479,7 @@ void PSMarkSweep::allocate_stacks() {
_preserved_oop_stack = NULL; _preserved_oop_stack = NULL;
_marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true); _marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
_objarray_stack = new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
int size = SystemDictionary::number_of_classes() * 2; int size = SystemDictionary::number_of_classes() * 2;
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
@ -497,6 +498,7 @@ void PSMarkSweep::deallocate_stacks() {
} }
delete _marking_stack; delete _marking_stack;
delete _objarray_stack;
delete _revisit_klass_stack; delete _revisit_klass_stack;
delete _revisit_mdo_stack; delete _revisit_mdo_stack;
} }

View file

@ -785,7 +785,7 @@ PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closu
void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); }
void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
void PSParallelCompact::FollowStackClosure::do_void() { follow_stack(_compaction_manager); } void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); } void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(_compaction_manager, p); }
void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); } void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
@ -2376,7 +2376,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
// Follow code cache roots. // Follow code cache roots.
CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure, CodeCache::do_unloading(is_alive_closure(), &mark_and_push_closure,
purged_class); purged_class);
follow_stack(cm); // Flush marking stack. cm->follow_marking_stacks(); // Flush marking stack.
// Update subklass/sibling/implementor links of live klasses // Update subklass/sibling/implementor links of live klasses
// revisit_klass_stack is used in follow_weak_klass_links(). // revisit_klass_stack is used in follow_weak_klass_links().
@ -2389,8 +2389,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
SymbolTable::unlink(is_alive_closure()); SymbolTable::unlink(is_alive_closure());
StringTable::unlink(is_alive_closure()); StringTable::unlink(is_alive_closure());
assert(cm->marking_stack()->size() == 0, "stack should be empty by now"); assert(cm->marking_stacks_empty(), "marking stacks should be empty");
assert(cm->overflow_stack()->is_empty(), "stack should be empty by now");
} }
// This should be moved to the shared markSweep code! // This should be moved to the shared markSweep code!
@ -2709,22 +2708,6 @@ void PSParallelCompact::compact_serial(ParCompactionManager* cm) {
young_gen->move_and_update(cm); young_gen->move_and_update(cm);
} }
void PSParallelCompact::follow_stack(ParCompactionManager* cm) {
while(!cm->overflow_stack()->is_empty()) {
oop obj = cm->overflow_stack()->pop();
obj->follow_contents(cm);
}
oop obj;
// obj is a reference!!!
while (cm->marking_stack()->pop_local(obj)) {
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
obj->follow_contents(cm);
}
}
void void
PSParallelCompact::follow_weak_klass_links() { PSParallelCompact::follow_weak_klass_links() {
// All klasses on the revisit stack are marked at this point. // All klasses on the revisit stack are marked at this point.
@ -2745,7 +2728,7 @@ PSParallelCompact::follow_weak_klass_links() {
&keep_alive_closure); &keep_alive_closure);
} }
// revisit_klass_stack is cleared in reset() // revisit_klass_stack is cleared in reset()
follow_stack(cm); cm->follow_marking_stacks();
} }
} }
@ -2776,7 +2759,7 @@ void PSParallelCompact::follow_mdo_weak_refs() {
rms->at(j)->follow_weak_refs(is_alive_closure()); rms->at(j)->follow_weak_refs(is_alive_closure());
} }
// revisit_mdo_stack is cleared in reset() // revisit_mdo_stack is cleared in reset()
follow_stack(cm); cm->follow_marking_stacks();
} }
} }

Some files were not shown because too many files have changed in this diff Show more