This commit is contained in:
Zhengyu Gu 2013-09-13 10:34:15 -04:00
commit 9e8f860fec
127 changed files with 3399 additions and 710 deletions

View file

@ -227,3 +227,4 @@ bbe43d712fe08e650808d774861b256ccb34e500 jdk8-b102
30a1d677a20c6a95f98043d8f20ce570304e3818 jdk8-b103 30a1d677a20c6a95f98043d8f20ce570304e3818 jdk8-b103
b5ed503c26ad38869c247c5e32debec217fd056b jdk8-b104 b5ed503c26ad38869c247c5e32debec217fd056b jdk8-b104
589f4fdc584e373a47cde0162e9eceec9165c381 jdk8-b105 589f4fdc584e373a47cde0162e9eceec9165c381 jdk8-b105
514b0b69fb9683ef52062fd962a3e0644431f64d jdk8-b106

View file

@ -227,3 +227,4 @@ d2dcb110e9dbaf9903c05b211df800e78e4b394e jdk8-b100
b7e64be81c8a7690703df5711f4fc2375da8a9cb jdk8-b103 b7e64be81c8a7690703df5711f4fc2375da8a9cb jdk8-b103
96c1b9b7524b52c3fcefc90ffad4c767396727c8 jdk8-b104 96c1b9b7524b52c3fcefc90ffad4c767396727c8 jdk8-b104
5166118c59178b5d31001bc4058e92486ee07d9b jdk8-b105 5166118c59178b5d31001bc4058e92486ee07d9b jdk8-b105
8e7b4d9fb00fdf1334376aeac050c9bca6d1b383 jdk8-b106

View file

@ -3818,7 +3818,7 @@ fi
#CUSTOM_AUTOCONF_INCLUDE #CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks: # Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1377850299 DATE_WHEN_GENERATED=1378914658
############################################################################### ###############################################################################
# #
@ -6775,6 +6775,18 @@ test -n "$target_alias" &&
VAR_CPU_BITS=64 VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big VAR_CPU_ENDIAN=big
;; ;;
s390)
VAR_CPU=s390
VAR_CPU_ARCH=s390
VAR_CPU_BITS=32
VAR_CPU_ENDIAN=big
;;
s390x)
VAR_CPU=s390x
VAR_CPU_ARCH=s390
VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big
;;
sparc) sparc)
VAR_CPU=sparc VAR_CPU=sparc
VAR_CPU_ARCH=sparc VAR_CPU_ARCH=sparc
@ -6883,6 +6895,18 @@ $as_echo "$OPENJDK_BUILD_OS-$OPENJDK_BUILD_CPU" >&6; }
VAR_CPU_BITS=64 VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big VAR_CPU_ENDIAN=big
;; ;;
s390)
VAR_CPU=s390
VAR_CPU_ARCH=s390
VAR_CPU_BITS=32
VAR_CPU_ENDIAN=big
;;
s390x)
VAR_CPU=s390x
VAR_CPU_ARCH=s390
VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big
;;
sparc) sparc)
VAR_CPU=sparc VAR_CPU=sparc
VAR_CPU_ARCH=sparc VAR_CPU_ARCH=sparc

View file

@ -60,6 +60,18 @@ AC_DEFUN([PLATFORM_EXTRACT_VARS_FROM_CPU],
VAR_CPU_BITS=64 VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big VAR_CPU_ENDIAN=big
;; ;;
s390)
VAR_CPU=s390
VAR_CPU_ARCH=s390
VAR_CPU_BITS=32
VAR_CPU_ENDIAN=big
;;
s390x)
VAR_CPU=s390x
VAR_CPU_ARCH=s390
VAR_CPU_BITS=64
VAR_CPU_ENDIAN=big
;;
sparc) sparc)
VAR_CPU=sparc VAR_CPU=sparc
VAR_CPU_ARCH=sparc VAR_CPU_ARCH=sparc

View file

@ -227,3 +227,4 @@ a013024b07475782f1fa8e196e950b34b4077663 jdk8-b101
49c4a777fdfd648d4c3fffc940fdb97a23108ca8 jdk8-b103 49c4a777fdfd648d4c3fffc940fdb97a23108ca8 jdk8-b103
d411c60a8c2fe8fdc572af907775e90f7eefd513 jdk8-b104 d411c60a8c2fe8fdc572af907775e90f7eefd513 jdk8-b104
4e38de7c767e34104fa147b5b346d9fe6b731279 jdk8-b105 4e38de7c767e34104fa147b5b346d9fe6b731279 jdk8-b105
2e3a056c84a71eba78945c18b05397858ffd7ad0 jdk8-b106

View file

@ -373,3 +373,5 @@ c4697c1c448416108743b59118b4a2498b339d0c jdk8-b102
c1604d5885a6f2adc0bcea2fa142a8f6bafad2f0 hs25-b47 c1604d5885a6f2adc0bcea2fa142a8f6bafad2f0 hs25-b47
acac3bde66b2c22791c257a8d99611d6d08c6713 jdk8-b105 acac3bde66b2c22791c257a8d99611d6d08c6713 jdk8-b105
18b4798adbc42c6fa16f5ecb7d5cd3ca130754bf hs25-b48 18b4798adbc42c6fa16f5ecb7d5cd3ca130754bf hs25-b48
aed585cafc0d9655726af6d1e1081d1c94cb3b5c jdk8-b106
50794d8ac11c9579b41dec4de23b808fef9f34a1 hs25-b49

View file

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25 HS_MAJOR_VER=25
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=49 HS_BUILD_NUMBER=50
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=8 JDK_MINOR_VER=8

View file

@ -307,7 +307,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
assert(a_byte == *start++, "should be the same code"); assert(a_byte == *start++, "should be the same code");
} }
#endif #endif
} else if (_id == load_mirror_id) { } else if (_id == load_mirror_id || _id == load_appendix_id) {
// produce a copy of the load mirror instruction for use by the being initialized case // produce a copy of the load mirror instruction for use by the being initialized case
#ifdef ASSERT #ifdef ASSERT
address start = __ pc(); address start = __ pc();
@ -384,6 +384,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
__ bind(call_patch); __ bind(call_patch);
@ -397,7 +398,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
ce->add_call_info_here(_info); ce->add_call_info_here(_info);
__ br(Assembler::always, false, Assembler::pt, _patch_site_entry); __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
__ delayed()->nop(); __ delayed()->nop();
if (_id == load_klass_id || _id == load_mirror_id) { if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
CodeSection* cs = __ code_section(); CodeSection* cs = __ code_section();
address pc = (address)_pc_start; address pc = (address)_pc_start;
RelocIterator iter(cs, pc, pc + 1); RelocIterator iter(cs, pc, pc + 1);

View file

@ -520,7 +520,7 @@ void LIR_Assembler::jobject2reg(jobject o, Register reg) {
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
// Allocate a new index in table to hold the object once it's been patched // Allocate a new index in table to hold the object once it's been patched
int oop_index = __ oop_recorder()->allocate_oop_index(NULL); int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index); PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");

View file

@ -804,6 +804,12 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
} }
break; break;
case load_appendix_patching_id:
{ __ set_info("load_appendix_patching", dont_gc_arguments);
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
}
break;
case dtrace_object_alloc_id: case dtrace_object_alloc_id:
{ // O0: object { // O0: object
__ set_info("dtrace_object_alloc", dont_gc_arguments); __ set_info("dtrace_object_alloc", dont_gc_arguments);

View file

@ -57,6 +57,7 @@ define_pd_global(intx, RegisterCostAreaRatio, 12000);
define_pd_global(bool, UseTLAB, true); define_pd_global(bool, UseTLAB, true);
define_pd_global(bool, ResizeTLAB, true); define_pd_global(bool, ResizeTLAB, true);
define_pd_global(intx, LoopUnrollLimit, 60); // Design center runs on 1.3.1 define_pd_global(intx, LoopUnrollLimit, 60); // Design center runs on 1.3.1
define_pd_global(intx, MinJumpTableSize, 5);
// Peephole and CISC spilling both break the graph, and so makes the // Peephole and CISC spilling both break the graph, and so makes the
// scheduler sick. // scheduler sick.

View file

@ -402,6 +402,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
__ bind(call_patch); __ bind(call_patch);
@ -419,7 +420,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) { for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
__ nop(); __ nop();
} }
if (_id == load_klass_id || _id == load_mirror_id) { if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
CodeSection* cs = __ code_section(); CodeSection* cs = __ code_section();
RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1)); RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none); relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);

View file

@ -362,7 +362,7 @@ int LIR_Assembler::check_icache() {
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
jobject o = NULL; jobject o = NULL;
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id); PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
__ movoop(reg, o); __ movoop(reg, o);
patching_epilog(patch, lir_patch_normal, reg, info); patching_epilog(patch, lir_patch_normal, reg, info);
} }

View file

@ -1499,6 +1499,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
} }
break; break;
case load_appendix_patching_id:
{ StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
// we should set up register map
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
}
break;
case dtrace_object_alloc_id: case dtrace_object_alloc_id:
{ // rax,: object { // rax,: object
StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);

View file

@ -30,7 +30,6 @@
// Sets the default values for platform dependent flags used by the server compiler. // Sets the default values for platform dependent flags used by the server compiler.
// (see c2_globals.hpp). Alpha-sorted. // (see c2_globals.hpp). Alpha-sorted.
define_pd_global(bool, BackgroundCompilation, true); define_pd_global(bool, BackgroundCompilation, true);
define_pd_global(bool, UseTLAB, true); define_pd_global(bool, UseTLAB, true);
define_pd_global(bool, ResizeTLAB, true); define_pd_global(bool, ResizeTLAB, true);
@ -52,6 +51,7 @@ define_pd_global(intx, OnStackReplacePercentage, 140);
define_pd_global(intx, ConditionalMoveLimit, 3); define_pd_global(intx, ConditionalMoveLimit, 3);
define_pd_global(intx, FLOATPRESSURE, 6); define_pd_global(intx, FLOATPRESSURE, 6);
define_pd_global(intx, FreqInlineSize, 325); define_pd_global(intx, FreqInlineSize, 325);
define_pd_global(intx, MinJumpTableSize, 10);
#ifdef AMD64 #ifdef AMD64
define_pd_global(intx, INTPRESSURE, 13); define_pd_global(intx, INTPRESSURE, 13);
define_pd_global(intx, InteriorEntryAlignment, 16); define_pd_global(intx, InteriorEntryAlignment, 16);

View file

@ -2767,7 +2767,19 @@ void os::numa_make_global(char *addr, size_t bytes) {
Linux::numa_interleave_memory(addr, bytes); Linux::numa_interleave_memory(addr, bytes);
} }
// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
// bind policy to MPOL_PREFERRED for the current thread.
#define USE_MPOL_PREFERRED 0
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
// To make NUMA and large pages more robust when both enabled, we need to ease
// the requirements on where the memory should be allocated. MPOL_BIND is the
// default policy and it will force memory to be allocated on the specified
// node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on
// the specified node, but will not force it. Using this policy will prevent
// getting SIGBUS when trying to allocate large pages on NUMA nodes with no
// free large pages.
Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);
Linux::numa_tonode_memory(addr, bytes, lgrp_hint); Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
} }
@ -2869,6 +2881,8 @@ bool os::Linux::libnuma_init() {
libnuma_dlsym(handle, "numa_tonode_memory"))); libnuma_dlsym(handle, "numa_tonode_memory")));
set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t, set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
libnuma_dlsym(handle, "numa_interleave_memory"))); libnuma_dlsym(handle, "numa_interleave_memory")));
set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
libnuma_dlsym(handle, "numa_set_bind_policy")));
if (numa_available() != -1) { if (numa_available() != -1) {
@ -2935,6 +2949,7 @@ os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
os::Linux::numa_available_func_t os::Linux::_numa_available; os::Linux::numa_available_func_t os::Linux::_numa_available;
os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory; os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory; os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
unsigned long* os::Linux::_numa_all_nodes; unsigned long* os::Linux::_numa_all_nodes;
bool os::pd_uncommit_memory(char* addr, size_t size) { bool os::pd_uncommit_memory(char* addr, size_t size) {

View file

@ -235,6 +235,7 @@ private:
typedef int (*numa_available_func_t)(void); typedef int (*numa_available_func_t)(void);
typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node); typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask); typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
typedef void (*numa_set_bind_policy_func_t)(int policy);
static sched_getcpu_func_t _sched_getcpu; static sched_getcpu_func_t _sched_getcpu;
static numa_node_to_cpus_func_t _numa_node_to_cpus; static numa_node_to_cpus_func_t _numa_node_to_cpus;
@ -242,6 +243,7 @@ private:
static numa_available_func_t _numa_available; static numa_available_func_t _numa_available;
static numa_tonode_memory_func_t _numa_tonode_memory; static numa_tonode_memory_func_t _numa_tonode_memory;
static numa_interleave_memory_func_t _numa_interleave_memory; static numa_interleave_memory_func_t _numa_interleave_memory;
static numa_set_bind_policy_func_t _numa_set_bind_policy;
static unsigned long* _numa_all_nodes; static unsigned long* _numa_all_nodes;
static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; } static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
@ -250,6 +252,7 @@ private:
static void set_numa_available(numa_available_func_t func) { _numa_available = func; } static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; } static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; } static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; } static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
static int sched_getcpu_syscall(void); static int sched_getcpu_syscall(void);
public: public:
@ -267,6 +270,11 @@ public:
_numa_interleave_memory(start, size, _numa_all_nodes); _numa_interleave_memory(start, size, _numa_all_nodes);
} }
} }
static void numa_set_bind_policy(int policy) {
if (_numa_set_bind_policy != NULL) {
_numa_set_bind_policy(policy);
}
}
static int get_node_by_cpu(int cpu_id); static int get_node_by_cpu(int cpu_id);
}; };

View file

@ -44,6 +44,6 @@ define_pd_global(uintx,JVMInvokeMethodSlack, 10*K);
define_pd_global(intx, CompilerThreadStackSize, 0); define_pd_global(intx, CompilerThreadStackSize, 0);
// Used on 64 bit platforms for UseCompressedOops base address // Used on 64 bit platforms for UseCompressedOops base address
define_pd_global(uintx,HeapBaseMinAddress, 256*M); define_pd_global(uintx,HeapBaseMinAddress, 2*G);
#endif // OS_CPU_SOLARIS_X86_VM_GLOBALS_SOLARIS_X86_HPP #endif // OS_CPU_SOLARIS_X86_VM_GLOBALS_SOLARIS_X86_HPP

View file

@ -106,10 +106,12 @@ public class CallSite {
" (" + getMethod().getBytes() + " bytes) " + getReason()); " (" + getMethod().getBytes() + " bytes) " + getReason());
} }
} }
stream.printf(" (end time: %6.4f", getTimeStamp());
if (getEndNodes() > 0) { if (getEndNodes() > 0) {
stream.printf(" (end time: %6.4f nodes: %d live: %d)", getTimeStamp(), getEndNodes(), getEndLiveNodes()); stream.printf(" nodes: %d live: %d", getEndNodes(), getEndLiveNodes());
} }
stream.println(""); stream.println(")");
if (getReceiver() != null) { if (getReceiver() != null) {
emit(stream, indent + 4); emit(stream, indent + 4);
// stream.println("type profile " + method.holder + " -> " + receiver + " (" + // stream.println("type profile " + method.holder + " -> " + receiver + " (" +

View file

@ -207,7 +207,12 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
} }
String search(Attributes attr, String name) { String search(Attributes attr, String name) {
return search(attr, name, null); String result = attr.getValue(name);
if (result != null) {
return result;
} else {
throw new InternalError("can't find " + name);
}
} }
String search(Attributes attr, String name, String defaultValue) { String search(Attributes attr, String name, String defaultValue) {
@ -215,13 +220,7 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
if (result != null) { if (result != null) {
return result; return result;
} }
if (defaultValue != null) { return defaultValue;
return defaultValue;
}
for (int i = 0; i < attr.getLength(); i++) {
System.out.println(attr.getQName(i) + " " + attr.getValue(attr.getQName(i)));
}
throw new InternalError("can't find " + name);
} }
int indent = 0; int indent = 0;
@ -268,17 +267,18 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
Phase p = new Phase(search(atts, "name"), Phase p = new Phase(search(atts, "name"),
Double.parseDouble(search(atts, "stamp")), Double.parseDouble(search(atts, "stamp")),
Integer.parseInt(search(atts, "nodes", "0")), Integer.parseInt(search(atts, "nodes", "0")),
Integer.parseInt(search(atts, "live"))); Integer.parseInt(search(atts, "live", "0")));
phaseStack.push(p); phaseStack.push(p);
} else if (qname.equals("phase_done")) { } else if (qname.equals("phase_done")) {
Phase p = phaseStack.pop(); Phase p = phaseStack.pop();
if (! p.getId().equals(search(atts, "name"))) { String phaseName = search(atts, "name", null);
if (phaseName != null && !p.getId().equals(phaseName)) {
System.out.println("phase: " + p.getId()); System.out.println("phase: " + p.getId());
throw new InternalError("phase name mismatch"); throw new InternalError("phase name mismatch");
} }
p.setEnd(Double.parseDouble(search(atts, "stamp"))); p.setEnd(Double.parseDouble(search(atts, "stamp")));
p.setEndNodes(Integer.parseInt(search(atts, "nodes", "0"))); p.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
p.setEndLiveNodes(Integer.parseInt(search(atts, "live"))); p.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0")));
compile.getPhases().add(p); compile.getPhases().add(p);
} else if (qname.equals("task")) { } else if (qname.equals("task")) {
compile = new Compilation(Integer.parseInt(search(atts, "compile_id", "-1"))); compile = new Compilation(Integer.parseInt(search(atts, "compile_id", "-1")));
@ -413,8 +413,8 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
} }
} else if (qname.equals("parse_done")) { } else if (qname.equals("parse_done")) {
CallSite call = scopes.pop(); CallSite call = scopes.pop();
call.setEndNodes(Integer.parseInt(search(atts, "nodes", "1"))); call.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "1"))); call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0")));
call.setTimeStamp(Double.parseDouble(search(atts, "stamp"))); call.setTimeStamp(Double.parseDouble(search(atts, "stamp")));
scopes.push(call); scopes.push(call);
} }

View file

@ -1095,7 +1095,7 @@ static void check_peepmatch_instruction_sequence(FILE *fp, PeepMatch *pmatch, Pe
fprintf(fp, " // Identify previous instruction if inside this block\n"); fprintf(fp, " // Identify previous instruction if inside this block\n");
fprintf(fp, " if( "); fprintf(fp, " if( ");
print_block_index(fp, inst_position); print_block_index(fp, inst_position);
fprintf(fp, " > 0 ) {\n Node *n = block->_nodes.at("); fprintf(fp, " > 0 ) {\n Node *n = block->get_node(");
print_block_index(fp, inst_position); print_block_index(fp, inst_position);
fprintf(fp, ");\n inst%d = (n->is_Mach()) ? ", inst_position); fprintf(fp, ");\n inst%d = (n->is_Mach()) ? ", inst_position);
fprintf(fp, "n->as_Mach() : NULL;\n }\n"); fprintf(fp, "n->as_Mach() : NULL;\n }\n");

View file

@ -364,7 +364,8 @@ class PatchingStub: public CodeStub {
enum PatchID { enum PatchID {
access_field_id, access_field_id,
load_klass_id, load_klass_id,
load_mirror_id load_mirror_id,
load_appendix_id
}; };
enum constants { enum constants {
patch_info_size = 3 patch_info_size = 3
@ -417,7 +418,7 @@ class PatchingStub: public CodeStub {
} }
NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start()); NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start());
n_move->set_offset(field_offset); n_move->set_offset(field_offset);
} else if (_id == load_klass_id || _id == load_mirror_id) { } else if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
assert(_obj != noreg, "must have register object for load_klass/load_mirror"); assert(_obj != noreg, "must have register object for load_klass/load_mirror");
#ifdef ASSERT #ifdef ASSERT
// verify that we're pointing at a NativeMovConstReg // verify that we're pointing at a NativeMovConstReg

View file

@ -74,16 +74,19 @@ class PhaseTraceTime: public TraceTime {
private: private:
JavaThread* _thread; JavaThread* _thread;
CompileLog* _log; CompileLog* _log;
TimerName _timer;
public: public:
PhaseTraceTime(TimerName timer) PhaseTraceTime(TimerName timer)
: TraceTime("", &timers[timer], CITime || CITimeEach, Verbose), _log(NULL) { : TraceTime("", &timers[timer], CITime || CITimeEach, Verbose),
_log(NULL), _timer(timer)
{
if (Compilation::current() != NULL) { if (Compilation::current() != NULL) {
_log = Compilation::current()->log(); _log = Compilation::current()->log();
} }
if (_log != NULL) { if (_log != NULL) {
_log->begin_head("phase name='%s'", timer_name[timer]); _log->begin_head("phase name='%s'", timer_name[_timer]);
_log->stamp(); _log->stamp();
_log->end_head(); _log->end_head();
} }
@ -91,7 +94,7 @@ class PhaseTraceTime: public TraceTime {
~PhaseTraceTime() { ~PhaseTraceTime() {
if (_log != NULL) if (_log != NULL)
_log->done("phase"); _log->done("phase name='%s'", timer_name[_timer]);
} }
}; };

View file

@ -1583,7 +1583,7 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
ObjectType* obj_type = obj->type()->as_ObjectType(); ObjectType* obj_type = obj->type()->as_ObjectType();
if (obj_type->is_constant() && !PatchALot) { if (obj_type->is_constant() && !PatchALot) {
ciObject* const_oop = obj_type->constant_value(); ciObject* const_oop = obj_type->constant_value();
if (!const_oop->is_null_object()) { if (!const_oop->is_null_object() && const_oop->is_loaded()) {
if (field->is_constant()) { if (field->is_constant()) {
ciConstant field_val = field->constant_value_of(const_oop); ciConstant field_val = field->constant_value_of(const_oop);
BasicType field_type = field_val.basic_type(); BasicType field_type = field_val.basic_type();
@ -1667,9 +1667,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
const Bytecodes::Code bc_raw = stream()->cur_bc_raw(); const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
assert(declared_signature != NULL, "cannot be null"); assert(declared_signature != NULL, "cannot be null");
// FIXME bail out for now if (!C1PatchInvokeDynamic && Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) { BAILOUT("unlinked call site (C1PatchInvokeDynamic is off)");
BAILOUT("unlinked call site (FIXME needs patching or recompile support)");
} }
// we have to make sure the argument size (incl. the receiver) // we have to make sure the argument size (incl. the receiver)
@ -1713,10 +1712,23 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial; code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
break; break;
} }
} else {
if (bc_raw == Bytecodes::_invokehandle) {
assert(!will_link, "should come here only for unlinked call");
code = Bytecodes::_invokespecial;
}
} }
// Push appendix argument (MethodType, CallSite, etc.), if one. // Push appendix argument (MethodType, CallSite, etc.), if one.
if (stream()->has_appendix()) { bool patch_for_appendix = false;
int patching_appendix_arg = 0;
if (C1PatchInvokeDynamic &&
(Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot))) {
Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
apush(arg);
patch_for_appendix = true;
patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1;
} else if (stream()->has_appendix()) {
ciObject* appendix = stream()->get_appendix(); ciObject* appendix = stream()->get_appendix();
Value arg = append(new Constant(new ObjectConstant(appendix))); Value arg = append(new Constant(new ObjectConstant(appendix)));
apush(arg); apush(arg);
@ -1732,7 +1744,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() && if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
!(// %%% FIXME: Are both of these relevant? !(// %%% FIXME: Are both of these relevant?
target->is_method_handle_intrinsic() || target->is_method_handle_intrinsic() ||
target->is_compiled_lambda_form())) { target->is_compiled_lambda_form()) &&
!patch_for_appendix) {
Value receiver = NULL; Value receiver = NULL;
ciInstanceKlass* receiver_klass = NULL; ciInstanceKlass* receiver_klass = NULL;
bool type_is_exact = false; bool type_is_exact = false;
@ -1850,7 +1863,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
// check if we could do inlining // check if we could do inlining
if (!PatchALot && Inline && klass->is_loaded() && if (!PatchALot && Inline && klass->is_loaded() &&
(klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized()) (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
&& target->is_loaded()) { && target->is_loaded()
&& !patch_for_appendix) {
// callee is known => check if we have static binding // callee is known => check if we have static binding
assert(target->is_loaded(), "callee must be known"); assert(target->is_loaded(), "callee must be known");
if (code == Bytecodes::_invokestatic || if (code == Bytecodes::_invokestatic ||
@ -1901,7 +1915,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
code == Bytecodes::_invokespecial || code == Bytecodes::_invokespecial ||
code == Bytecodes::_invokevirtual || code == Bytecodes::_invokevirtual ||
code == Bytecodes::_invokeinterface; code == Bytecodes::_invokeinterface;
Values* args = state()->pop_arguments(target->arg_size_no_receiver()); Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
Value recv = has_receiver ? apop() : NULL; Value recv = has_receiver ? apop() : NULL;
int vtable_index = Method::invalid_vtable_index; int vtable_index = Method::invalid_vtable_index;

View file

@ -1211,8 +1211,6 @@ class LIR_OpJavaCall: public LIR_OpCall {
bool is_invokedynamic() const { return code() == lir_dynamic_call; } bool is_invokedynamic() const { return code() == lir_dynamic_call; }
bool is_method_handle_invoke() const { bool is_method_handle_invoke() const {
return return
is_invokedynamic() // An invokedynamic is always a MethodHandle call site.
||
method()->is_compiled_lambda_form() // Java-generated adapter method()->is_compiled_lambda_form() // Java-generated adapter
|| ||
method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic

View file

@ -93,12 +93,23 @@ void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_cod
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
} }
} else if (patch->id() == PatchingStub::load_appendix_id) {
Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
#endif #endif
} }
PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
IRScope* scope = info->scope();
Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
if (Bytecodes::has_optional_appendix(bc_raw)) {
return PatchingStub::load_appendix_id;
}
return PatchingStub::load_mirror_id;
}
//--------------------------------------------------------------- //---------------------------------------------------------------

View file

@ -119,6 +119,8 @@ class LIR_Assembler: public CompilationResourceObj {
void comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op); void comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op);
PatchingStub::PatchID patching_id(CodeEmitInfo* info);
public: public:
LIR_Assembler(Compilation* c); LIR_Assembler(Compilation* c);
~LIR_Assembler(); ~LIR_Assembler();

View file

@ -819,6 +819,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code
Handle appendix(THREAD, NULL); // oop needed by appendix_patching code
bool load_klass_or_mirror_patch_id = bool load_klass_or_mirror_patch_id =
(stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id); (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
@ -888,10 +889,32 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
mirror = Handle(THREAD, m); mirror = Handle(THREAD, m);
} }
break; break;
default: Unimplemented(); default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
} }
// convert to handle // convert to handle
load_klass = KlassHandle(THREAD, k); load_klass = KlassHandle(THREAD, k);
} else if (stub_id == load_appendix_patching_id) {
Bytecode_invoke bytecode(caller_method, bci);
Bytecodes::Code bc = bytecode.invoke_code();
CallInfo info;
constantPoolHandle pool(thread, caller_method->constants());
int index = bytecode.index();
LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
appendix = info.resolved_appendix();
switch (bc) {
case Bytecodes::_invokehandle: {
int cache_index = ConstantPool::decode_cpcache_index(index, true);
assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
break;
}
case Bytecodes::_invokedynamic: {
pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
break;
}
default: fatal("unexpected bytecode for load_appendix_patching_id");
}
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
@ -992,8 +1015,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
n_copy->data() == (intptr_t)Universe::non_oop_word(), n_copy->data() == (intptr_t)Universe::non_oop_word(),
"illegal init value"); "illegal init value");
if (stub_id == Runtime1::load_klass_patching_id) { if (stub_id == Runtime1::load_klass_patching_id) {
assert(load_klass() != NULL, "klass not set"); assert(load_klass() != NULL, "klass not set");
n_copy->set_data((intx) (load_klass())); n_copy->set_data((intx) (load_klass()));
} else { } else {
assert(mirror() != NULL, "klass not set"); assert(mirror() != NULL, "klass not set");
n_copy->set_data((intx) (mirror())); n_copy->set_data((intx) (mirror()));
@ -1002,43 +1025,55 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
if (TracePatching) { if (TracePatching) {
Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
} }
}
} else if (stub_id == Runtime1::load_appendix_patching_id) {
NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
assert(n_copy->data() == 0 ||
n_copy->data() == (intptr_t)Universe::non_oop_word(),
"illegal init value");
n_copy->set_data((intx) (appendix()));
#if defined(SPARC) || defined(PPC) if (TracePatching) {
// Update the location in the nmethod with the proper Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
// metadata. When the code was generated, a NULL was stuffed
// in the metadata table and that table needs to be update to
// have the right value. On intel the value is kept
// directly in the instruction instead of in the metadata
// table, so set_data above effectively updated the value.
nmethod* nm = CodeCache::find_nmethod(instr_pc);
assert(nm != NULL, "invalid nmethod_pc");
RelocIterator mds(nm, copy_buff, copy_buff + 1);
bool found = false;
while (mds.next() && !found) {
if (mds.type() == relocInfo::oop_type) {
assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
oop_Relocation* r = mds.oop_reloc();
oop* oop_adr = r->oop_addr();
*oop_adr = mirror();
r->fix_oop_relocation();
found = true;
} else if (mds.type() == relocInfo::metadata_type) {
assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
metadata_Relocation* r = mds.metadata_reloc();
Metadata** metadata_adr = r->metadata_addr();
*metadata_adr = load_klass();
r->fix_metadata_relocation();
found = true;
}
}
assert(found, "the metadata must exist!");
#endif
} }
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
#if defined(SPARC) || defined(PPC)
if (load_klass_or_mirror_patch_id ||
stub_id == Runtime1::load_appendix_patching_id) {
// Update the location in the nmethod with the proper
// metadata. When the code was generated, a NULL was stuffed
// in the metadata table and that table needs to be update to
// have the right value. On intel the value is kept
// directly in the instruction instead of in the metadata
// table, so set_data above effectively updated the value.
nmethod* nm = CodeCache::find_nmethod(instr_pc);
assert(nm != NULL, "invalid nmethod_pc");
RelocIterator mds(nm, copy_buff, copy_buff + 1);
bool found = false;
while (mds.next() && !found) {
if (mds.type() == relocInfo::oop_type) {
assert(stub_id == Runtime1::load_mirror_patching_id ||
stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
oop_Relocation* r = mds.oop_reloc();
oop* oop_adr = r->oop_addr();
*oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
r->fix_oop_relocation();
found = true;
} else if (mds.type() == relocInfo::metadata_type) {
assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
metadata_Relocation* r = mds.metadata_reloc();
Metadata** metadata_adr = r->metadata_addr();
*metadata_adr = load_klass();
r->fix_metadata_relocation();
found = true;
}
}
assert(found, "the metadata must exist!");
}
#endif
if (do_patch) { if (do_patch) {
// replace instructions // replace instructions
// first replace the tail, then the call // first replace the tail, then the call
@ -1077,7 +1112,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
ICache::invalidate_range(instr_pc, *byte_count); ICache::invalidate_range(instr_pc, *byte_count);
NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff); NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
if (load_klass_or_mirror_patch_id) { if (load_klass_or_mirror_patch_id ||
stub_id == Runtime1::load_appendix_patching_id) {
relocInfo::relocType rtype = relocInfo::relocType rtype =
(stub_id == Runtime1::load_klass_patching_id) ? (stub_id == Runtime1::load_klass_patching_id) ?
relocInfo::metadata_type : relocInfo::metadata_type :
@ -1118,7 +1154,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// If we are patching in a non-perm oop, make sure the nmethod // If we are patching in a non-perm oop, make sure the nmethod
// is on the right list. // is on the right list.
if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) { if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
(appendix.not_null() && appendix->is_scavengable()))) {
MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
guarantee(nm != NULL, "only nmethods can contain non-perm oops"); guarantee(nm != NULL, "only nmethods can contain non-perm oops");
@ -1179,6 +1216,24 @@ int Runtime1::move_mirror_patching(JavaThread* thread) {
return caller_is_deopted(); return caller_is_deopted();
} }
int Runtime1::move_appendix_patching(JavaThread* thread) {
//
// NOTE: we are still in Java
//
Thread* THREAD = thread;
debug_only(NoHandleMark nhm;)
{
// Enter VM mode
ResetNoHandleMark rnhm;
patch_code(thread, load_appendix_patching_id);
}
// Back in JAVA, use no oops DON'T safepoint
// Return true if calling code is deoptimized
return caller_is_deopted();
}
// //
// Entry point for compiled code. We want to patch a nmethod. // Entry point for compiled code. We want to patch a nmethod.
// We don't do a normal VM transition here because we want to // We don't do a normal VM transition here because we want to

View file

@ -67,6 +67,7 @@ class StubAssembler;
stub(access_field_patching) \ stub(access_field_patching) \
stub(load_klass_patching) \ stub(load_klass_patching) \
stub(load_mirror_patching) \ stub(load_mirror_patching) \
stub(load_appendix_patching) \
stub(g1_pre_barrier_slow) \ stub(g1_pre_barrier_slow) \
stub(g1_post_barrier_slow) \ stub(g1_post_barrier_slow) \
stub(fpu2long_stub) \ stub(fpu2long_stub) \
@ -160,6 +161,7 @@ class Runtime1: public AllStatic {
static int access_field_patching(JavaThread* thread); static int access_field_patching(JavaThread* thread);
static int move_klass_patching(JavaThread* thread); static int move_klass_patching(JavaThread* thread);
static int move_mirror_patching(JavaThread* thread); static int move_mirror_patching(JavaThread* thread);
static int move_appendix_patching(JavaThread* thread);
static void patch_code(JavaThread* thread, StubID stub_id); static void patch_code(JavaThread* thread, StubID stub_id);

View file

@ -25,4 +25,4 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "c1/c1_globals.hpp" #include "c1/c1_globals.hpp"
C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_NOTPRODUCT_FLAG) C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)

View file

@ -54,7 +54,7 @@
// //
// Defines all global flags used by the client compiler. // Defines all global flags used by the client compiler.
// //
#define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct) \ #define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
\ \
/* Printing */ \ /* Printing */ \
notproduct(bool, PrintC1Statistics, false, \ notproduct(bool, PrintC1Statistics, false, \
@ -333,15 +333,19 @@
"Use CHA and exact type results at call sites when updating MDOs")\ "Use CHA and exact type results at call sites when updating MDOs")\
\ \
product(bool, C1UpdateMethodData, trueInTiered, \ product(bool, C1UpdateMethodData, trueInTiered, \
"Update MethodData*s in Tier1-generated code") \ "Update MethodData*s in Tier1-generated code") \
\ \
develop(bool, PrintCFGToFile, false, \ develop(bool, PrintCFGToFile, false, \
"print control flow graph to a separate file during compilation") \ "print control flow graph to a separate file during compilation") \
\ \
diagnostic(bool, C1PatchInvokeDynamic, true, \
"Patch invokedynamic appendix not known at compile time") \
\
\
// Read default values for c1 globals // Read default values for c1 globals
C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG) C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
#endif // SHARE_VM_C1_C1_GLOBALS_HPP #endif // SHARE_VM_C1_C1_GLOBALS_HPP

View file

@ -24,13 +24,92 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "ci/ciArray.hpp" #include "ci/ciArray.hpp"
#include "ci/ciArrayKlass.hpp"
#include "ci/ciConstant.hpp"
#include "ci/ciKlass.hpp" #include "ci/ciKlass.hpp"
#include "ci/ciUtilities.hpp" #include "ci/ciUtilities.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/typeArrayOop.hpp"
// ciArray // ciArray
// //
// This class represents an arrayOop in the HotSpot virtual // This class represents an arrayOop in the HotSpot virtual
// machine. // machine.
static BasicType fixup_element_type(BasicType bt) {
if (bt == T_ARRAY) return T_OBJECT;
if (bt == T_BOOLEAN) return T_BYTE;
return bt;
}
ciConstant ciArray::element_value_impl(BasicType elembt,
arrayOop ary,
int index) {
if (ary == NULL)
return ciConstant();
assert(ary->is_array(), "");
if (index < 0 || index >= ary->length())
return ciConstant();
ArrayKlass* ak = (ArrayKlass*) ary->klass();
BasicType abt = ak->element_type();
if (fixup_element_type(elembt) !=
fixup_element_type(abt))
return ciConstant();
switch (elembt) {
case T_ARRAY:
case T_OBJECT:
{
assert(ary->is_objArray(), "");
objArrayOop objary = (objArrayOop) ary;
oop elem = objary->obj_at(index);
ciEnv* env = CURRENT_ENV;
ciObject* box = env->get_object(elem);
return ciConstant(T_OBJECT, box);
}
}
assert(ary->is_typeArray(), "");
typeArrayOop tary = (typeArrayOop) ary;
jint value = 0;
switch (elembt) {
case T_LONG: return ciConstant(tary->long_at(index));
case T_FLOAT: return ciConstant(tary->float_at(index));
case T_DOUBLE: return ciConstant(tary->double_at(index));
default: return ciConstant();
case T_BYTE: value = tary->byte_at(index); break;
case T_BOOLEAN: value = tary->byte_at(index) & 1; break;
case T_SHORT: value = tary->short_at(index); break;
case T_CHAR: value = tary->char_at(index); break;
case T_INT: value = tary->int_at(index); break;
}
return ciConstant(elembt, value);
}
// ------------------------------------------------------------------
// ciArray::element_value
//
// Current value of an element.
// Returns T_ILLEGAL if there is no element at the given index.
ciConstant ciArray::element_value(int index) {
BasicType elembt = element_basic_type();
GUARDED_VM_ENTRY(
return element_value_impl(elembt, get_arrayOop(), index);
)
}
// ------------------------------------------------------------------
// ciArray::element_value_by_offset
//
// Current value of an element at the specified offset.
// Returns T_ILLEGAL if there is no element at the given offset.
ciConstant ciArray::element_value_by_offset(intptr_t element_offset) {
BasicType elembt = element_basic_type();
intptr_t shift = exact_log2(type2aelembytes(elembt));
intptr_t header = arrayOopDesc::base_offset_in_bytes(elembt);
intptr_t index = (element_offset - header) >> shift;
intptr_t offset = header + ((intptr_t)index << shift);
if (offset != element_offset || index != (jint)index)
return ciConstant();
return element_value((jint) index);
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciArray::print_impl // ciArray::print_impl

View file

@ -25,6 +25,8 @@
#ifndef SHARE_VM_CI_CIARRAY_HPP #ifndef SHARE_VM_CI_CIARRAY_HPP
#define SHARE_VM_CI_CIARRAY_HPP #define SHARE_VM_CI_CIARRAY_HPP
#include "ci/ciArrayKlass.hpp"
#include "ci/ciConstant.hpp"
#include "ci/ciObject.hpp" #include "ci/ciObject.hpp"
#include "oops/arrayOop.hpp" #include "oops/arrayOop.hpp"
#include "oops/objArrayOop.hpp" #include "oops/objArrayOop.hpp"
@ -45,15 +47,30 @@ protected:
ciArray(ciKlass* klass, int len) : ciObject(klass), _length(len) {} ciArray(ciKlass* klass, int len) : ciObject(klass), _length(len) {}
arrayOop get_arrayOop() { return (arrayOop)get_oop(); } arrayOop get_arrayOop() const { return (arrayOop)get_oop(); }
const char* type_string() { return "ciArray"; } const char* type_string() { return "ciArray"; }
void print_impl(outputStream* st); void print_impl(outputStream* st);
ciConstant element_value_impl(BasicType elembt, arrayOop ary, int index);
public: public:
int length() { return _length; } int length() { return _length; }
// Convenience routines.
ciArrayKlass* array_type() { return klass()->as_array_klass(); }
ciType* element_type() { return array_type()->element_type(); }
BasicType element_basic_type() { return element_type()->basic_type(); }
// Current value of an element.
// Returns T_ILLEGAL if there is no element at the given index.
ciConstant element_value(int index);
// Current value of an element at the specified offset.
// Returns T_ILLEGAL if there is no element at the given offset.
ciConstant element_value_by_offset(intptr_t element_offset);
// What kind of ciObject is this? // What kind of ciObject is this?
bool is_array() { return true; } bool is_array() { return true; }
bool is_java_object() { return true; } bool is_java_object() { return true; }

View file

@ -41,7 +41,6 @@ private:
union { union {
jint _int; jint _int;
jlong _long; jlong _long;
jint _long_half[2];
jfloat _float; jfloat _float;
jdouble _double; jdouble _double;
ciObject* _object; ciObject* _object;
@ -111,6 +110,20 @@ public:
return _value._object; return _value._object;
} }
bool is_null_or_zero() const {
if (!is_java_primitive(basic_type())) {
return as_object()->is_null_object();
} else if (type2size[basic_type()] == 1) {
// treat float bits as int, to avoid comparison with -0 and NaN
return (_value._int == 0);
} else if (type2size[basic_type()] == 2) {
// treat double bits as long, to avoid comparison with -0 and NaN
return (_value._long == 0);
} else {
return false;
}
}
// Debugging output // Debugging output
void print(); void print();
}; };

View file

@ -1150,6 +1150,10 @@ void ciEnv::record_out_of_memory_failure() {
record_method_not_compilable("out of memory"); record_method_not_compilable("out of memory");
} }
ciInstance* ciEnv::unloaded_ciinstance() {
GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
}
void ciEnv::dump_replay_data(outputStream* out) { void ciEnv::dump_replay_data(outputStream* out) {
VM_ENTRY_MARK; VM_ENTRY_MARK;
MutexLocker ml(Compile_lock); MutexLocker ml(Compile_lock);

View file

@ -400,6 +400,7 @@ public:
static ciInstanceKlass* unloaded_ciinstance_klass() { static ciInstanceKlass* unloaded_ciinstance_klass() {
return _unloaded_ciinstance_klass; return _unloaded_ciinstance_klass;
} }
ciInstance* unloaded_ciinstance();
ciKlass* find_system_klass(ciSymbol* klass_name); ciKlass* find_system_klass(ciSymbol* klass_name);
// Note: To find a class from its name string, use ciSymbol::make, // Note: To find a class from its name string, use ciSymbol::make,

View file

@ -189,12 +189,14 @@ void ciField::initialize_from(fieldDescriptor* fd) {
_holder = CURRENT_ENV->get_instance_klass(fd->field_holder()); _holder = CURRENT_ENV->get_instance_klass(fd->field_holder());
// Check to see if the field is constant. // Check to see if the field is constant.
if (_holder->is_initialized() && this->is_final()) { bool is_final = this->is_final();
bool is_stable = FoldStableValues && this->is_stable();
if (_holder->is_initialized() && (is_final || is_stable)) {
if (!this->is_static()) { if (!this->is_static()) {
// A field can be constant if it's a final static field or if // A field can be constant if it's a final static field or if
// it's a final non-static field of a trusted class (classes in // it's a final non-static field of a trusted class (classes in
// java.lang.invoke and sun.invoke packages and subpackages). // java.lang.invoke and sun.invoke packages and subpackages).
if (trust_final_non_static_fields(_holder)) { if (is_stable || trust_final_non_static_fields(_holder)) {
_is_constant = true; _is_constant = true;
return; return;
} }
@ -227,7 +229,6 @@ void ciField::initialize_from(fieldDescriptor* fd) {
Handle mirror = k->java_mirror(); Handle mirror = k->java_mirror();
_is_constant = true;
switch(type()->basic_type()) { switch(type()->basic_type()) {
case T_BYTE: case T_BYTE:
_constant_value = ciConstant(type()->basic_type(), mirror->byte_field(_offset)); _constant_value = ciConstant(type()->basic_type(), mirror->byte_field(_offset));
@ -273,6 +274,12 @@ void ciField::initialize_from(fieldDescriptor* fd) {
} }
} }
} }
if (is_stable && _constant_value.is_null_or_zero()) {
// It is not a constant after all; treat it as uninitialized.
_is_constant = false;
} else {
_is_constant = true;
}
} else { } else {
_is_constant = false; _is_constant = false;
} }
@ -373,8 +380,11 @@ void ciField::print() {
tty->print(" signature="); tty->print(" signature=");
_signature->print_symbol(); _signature->print_symbol();
tty->print(" offset=%d type=", _offset); tty->print(" offset=%d type=", _offset);
if (_type != NULL) _type->print_name(); if (_type != NULL)
else tty->print("(reference)"); _type->print_name();
else
tty->print("(reference)");
tty->print(" flags=%04x", flags().as_int());
tty->print(" is_constant=%s", bool_to_str(_is_constant)); tty->print(" is_constant=%s", bool_to_str(_is_constant));
if (_is_constant && is_static()) { if (_is_constant && is_static()) {
tty->print(" constant_value="); tty->print(" constant_value=");

View file

@ -139,7 +139,10 @@ public:
// non-constant fields. These are java.lang.System.in // non-constant fields. These are java.lang.System.in
// and java.lang.System.out. Abomination. // and java.lang.System.out. Abomination.
// //
// Note: the check for case 4 is not yet implemented. // A field is also considered constant if it is marked @Stable
// and is non-null (or non-zero, if a primitive).
// For non-static fields, the null/zero check must be
// arranged by the user, as constant_value().is_null_or_zero().
bool is_constant() { return _is_constant; } bool is_constant() { return _is_constant; }
// Get the constant value of this field. // Get the constant value of this field.
@ -173,6 +176,7 @@ public:
bool is_protected () { return flags().is_protected(); } bool is_protected () { return flags().is_protected(); }
bool is_static () { return flags().is_static(); } bool is_static () { return flags().is_static(); }
bool is_final () { return flags().is_final(); } bool is_final () { return flags().is_final(); }
bool is_stable () { return flags().is_stable(); }
bool is_volatile () { return flags().is_volatile(); } bool is_volatile () { return flags().is_volatile(); }
bool is_transient () { return flags().is_transient(); } bool is_transient () { return flags().is_transient(); }

View file

@ -59,6 +59,7 @@ public:
bool is_interface () const { return (_flags & JVM_ACC_INTERFACE ) != 0; } bool is_interface () const { return (_flags & JVM_ACC_INTERFACE ) != 0; }
bool is_abstract () const { return (_flags & JVM_ACC_ABSTRACT ) != 0; } bool is_abstract () const { return (_flags & JVM_ACC_ABSTRACT ) != 0; }
bool is_strict () const { return (_flags & JVM_ACC_STRICT ) != 0; } bool is_strict () const { return (_flags & JVM_ACC_STRICT ) != 0; }
bool is_stable () const { return (_flags & JVM_ACC_FIELD_STABLE) != 0; }
// Conversion // Conversion
jint as_int() { return _flags; } jint as_int() { return _flags; }

View file

@ -60,10 +60,10 @@ ciType* ciInstance::java_mirror_type() {
// //
// Constant value of a field. // Constant value of a field.
ciConstant ciInstance::field_value(ciField* field) { ciConstant ciInstance::field_value(ciField* field) {
assert(is_loaded() && assert(is_loaded(), "invalid access - must be loaded");
field->holder()->is_loaded() && assert(field->holder()->is_loaded(), "invalid access - holder must be loaded");
klass()->is_subclass_of(field->holder()), assert(klass()->is_subclass_of(field->holder()), "invalid access - must be subclass");
"invalid access");
VM_ENTRY_MARK; VM_ENTRY_MARK;
ciConstant result; ciConstant result;
Handle obj = get_oop(); Handle obj = get_oop();
@ -127,6 +127,8 @@ ciConstant ciInstance::field_value(ciField* field) {
ciConstant ciInstance::field_value_by_offset(int field_offset) { ciConstant ciInstance::field_value_by_offset(int field_offset) {
ciInstanceKlass* ik = klass()->as_instance_klass(); ciInstanceKlass* ik = klass()->as_instance_klass();
ciField* field = ik->get_field_by_offset(field_offset, false); ciField* field = ik->get_field_by_offset(field_offset, false);
if (field == NULL)
return ciConstant(); // T_ILLEGAL
return field_value(field); return field_value(field);
} }

View file

@ -177,6 +177,10 @@ class ciMethod : public ciMetadata {
address bcp = code() + bci; address bcp = code() + bci;
return Bytecodes::java_code_at(NULL, bcp); return Bytecodes::java_code_at(NULL, bcp);
} }
Bytecodes::Code raw_code_at_bci(int bci) {
address bcp = code() + bci;
return Bytecodes::code_at(NULL, bcp);
}
BCEscapeAnalyzer *get_bcea(); BCEscapeAnalyzer *get_bcea();
ciMethodBlocks *get_method_blocks(); ciMethodBlocks *get_method_blocks();

View file

@ -563,7 +563,10 @@ ciInstance* ciObjectFactory::get_unloaded_method_type_constant(ciSymbol* signatu
return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass()); return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass());
} }
ciInstance* ciObjectFactory::get_unloaded_object_constant() {
if (ciEnv::_Object_klass == NULL) return NULL;
return get_unloaded_instance(ciEnv::_Object_klass->as_instance_klass());
}
//------------------------------------------------------------------ //------------------------------------------------------------------
// ciObjectFactory::get_empty_methodData // ciObjectFactory::get_empty_methodData

View file

@ -131,6 +131,8 @@ public:
ciInstance* get_unloaded_method_type_constant(ciSymbol* signature); ciInstance* get_unloaded_method_type_constant(ciSymbol* signature);
ciInstance* get_unloaded_object_constant();
// Get the ciMethodData representing the methodData for a method // Get the ciMethodData representing the methodData for a method
// with none. // with none.
ciMethodData* get_empty_methodData(); ciMethodData* get_empty_methodData();

View file

@ -39,5 +39,10 @@
jchar ciTypeArray::char_at(int index) { jchar ciTypeArray::char_at(int index) {
VM_ENTRY_MARK; VM_ENTRY_MARK;
assert(index >= 0 && index < length(), "out of range"); assert(index >= 0 && index < length(), "out of range");
return get_typeArrayOop()->char_at(index); jchar c = get_typeArrayOop()->char_at(index);
#ifdef ASSERT
jchar d = element_value(index).as_char();
assert(c == d, "");
#endif //ASSERT
return c;
} }

View file

@ -1787,6 +1787,10 @@ ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_d
if (_location != _in_method) break; // only allow for methods if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code if (!privileged) break; // only allow in privileged code
return _method_LambdaForm_Hidden; return _method_LambdaForm_Hidden;
case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_invoke_Stable_signature):
if (_location != _in_field) break; // only allow for fields
if (!privileged) break; // only allow in privileged code
return _field_Stable;
case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_misc_Contended_signature): case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_misc_Contended_signature):
if (_location != _in_field && _location != _in_class) break; // only allow for fields and classes if (_location != _in_field && _location != _in_class) break; // only allow for fields and classes
if (!EnableContended || (RestrictContended && !privileged)) break; // honor privileges if (!EnableContended || (RestrictContended && !privileged)) break; // honor privileges
@ -1799,6 +1803,8 @@ ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_d
void ClassFileParser::FieldAnnotationCollector::apply_to(FieldInfo* f) { void ClassFileParser::FieldAnnotationCollector::apply_to(FieldInfo* f) {
if (is_contended()) if (is_contended())
f->set_contended_group(contended_group()); f->set_contended_group(contended_group());
if (is_stable())
f->set_stable(true);
} }
ClassFileParser::FieldAnnotationCollector::~FieldAnnotationCollector() { ClassFileParser::FieldAnnotationCollector::~FieldAnnotationCollector() {

View file

@ -125,6 +125,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
_method_LambdaForm_Compiled, _method_LambdaForm_Compiled,
_method_LambdaForm_Hidden, _method_LambdaForm_Hidden,
_sun_misc_Contended, _sun_misc_Contended,
_field_Stable,
_annotation_LIMIT _annotation_LIMIT
}; };
const Location _location; const Location _location;
@ -143,14 +144,23 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob"); assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
_annotations_present |= nth_bit((int)id); _annotations_present |= nth_bit((int)id);
} }
void remove_annotation(ID id) {
assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
_annotations_present &= ~nth_bit((int)id);
}
// Report if the annotation is present. // Report if the annotation is present.
bool has_any_annotations() { return _annotations_present != 0; } bool has_any_annotations() const { return _annotations_present != 0; }
bool has_annotation(ID id) { return (nth_bit((int)id) & _annotations_present) != 0; } bool has_annotation(ID id) const { return (nth_bit((int)id) & _annotations_present) != 0; }
void set_contended_group(u2 group) { _contended_group = group; } void set_contended_group(u2 group) { _contended_group = group; }
u2 contended_group() { return _contended_group; } u2 contended_group() const { return _contended_group; }
bool is_contended() { return has_annotation(_sun_misc_Contended); } bool is_contended() const { return has_annotation(_sun_misc_Contended); }
void set_stable(bool stable) { set_annotation(_field_Stable); }
bool is_stable() const { return has_annotation(_field_Stable); }
}; };
// This class also doubles as a holder for metadata cleanup. // This class also doubles as a holder for metadata cleanup.

View file

@ -270,6 +270,7 @@
template(java_lang_invoke_LambdaForm, "java/lang/invoke/LambdaForm") \ template(java_lang_invoke_LambdaForm, "java/lang/invoke/LambdaForm") \
template(java_lang_invoke_ForceInline_signature, "Ljava/lang/invoke/ForceInline;") \ template(java_lang_invoke_ForceInline_signature, "Ljava/lang/invoke/ForceInline;") \
template(java_lang_invoke_DontInline_signature, "Ljava/lang/invoke/DontInline;") \ template(java_lang_invoke_DontInline_signature, "Ljava/lang/invoke/DontInline;") \
template(sun_invoke_Stable_signature, "Lsun/invoke/Stable;") \
template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \ template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;") \ template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;") \
template(java_lang_invoke_MagicLambdaImpl, "java/lang/invoke/MagicLambdaImpl") \ template(java_lang_invoke_MagicLambdaImpl, "java/lang/invoke/MagicLambdaImpl") \

View file

@ -93,18 +93,21 @@ HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
#endif #endif
bool nmethod::is_compiled_by_c1() const { bool nmethod::is_compiled_by_c1() const {
if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing if (compiler() == NULL) {
if (is_native_method()) return false; return false;
}
return compiler()->is_c1(); return compiler()->is_c1();
} }
bool nmethod::is_compiled_by_c2() const { bool nmethod::is_compiled_by_c2() const {
if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing if (compiler() == NULL) {
if (is_native_method()) return false; return false;
}
return compiler()->is_c2(); return compiler()->is_c2();
} }
bool nmethod::is_compiled_by_shark() const { bool nmethod::is_compiled_by_shark() const {
if (is_native_method()) return false; if (compiler() == NULL) {
assert(compiler() != NULL, "must be"); return false;
}
return compiler()->is_shark(); return compiler()->is_shark();
} }
@ -1401,6 +1404,9 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// nmethods aren't scanned for GC. // nmethods aren't scanned for GC.
_oops_are_stale = true; _oops_are_stale = true;
#endif #endif
// the Method may be reclaimed by class unloading now that the
// nmethod is in zombie state
set_method(NULL);
} else { } else {
assert(state == not_entrant, "other cases may need to be handled differently"); assert(state == not_entrant, "other cases may need to be handled differently");
} }

View file

@ -1718,7 +1718,7 @@ static void codecache_print(bool detailed)
CodeCache::print_summary(&s, detailed); CodeCache::print_summary(&s, detailed);
} }
ttyLocker ttyl; ttyLocker ttyl;
tty->print_cr(s.as_string()); tty->print(s.as_string());
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------

View file

@ -2493,11 +2493,11 @@ void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) {
void G1CollectedHeap::register_concurrent_cycle_end() { void G1CollectedHeap::register_concurrent_cycle_end() {
if (_concurrent_cycle_started) { if (_concurrent_cycle_started) {
_gc_timer_cm->register_gc_end(os::elapsed_counter());
if (_cm->has_aborted()) { if (_cm->has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure(); _gc_tracer_cm->report_concurrent_mode_failure();
} }
_gc_timer_cm->register_gc_end(os::elapsed_counter());
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
_concurrent_cycle_started = false; _concurrent_cycle_started = false;

View file

@ -168,7 +168,15 @@ G1CollectorPolicy::G1CollectorPolicy() :
// Set up the region size and associated fields. Given that the // Set up the region size and associated fields. Given that the
// policy is created before the heap, we have to set this up here, // policy is created before the heap, we have to set this up here,
// so it's done as soon as possible. // so it's done as soon as possible.
HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
// It would have been natural to pass initial_heap_byte_size() and
// max_heap_byte_size() to setup_heap_region_size() but those have
// not been set up at this point since they should be aligned with
// the region size. So, there is a circular dependency here. We base
// the region size on the heap size, but the heap size should be
// aligned with the region size. To get around this we use the
// unaligned values for the heap.
HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
HeapRegionRemSet::setup_remset_size(); HeapRegionRemSet::setup_remset_size();
G1ErgoVerbose::initialize(); G1ErgoVerbose::initialize();

View file

@ -149,18 +149,11 @@ void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
// many regions in the heap (based on the min heap size). // many regions in the heap (based on the min heap size).
#define TARGET_REGION_NUMBER 2048 #define TARGET_REGION_NUMBER 2048
void HeapRegion::setup_heap_region_size(uintx min_heap_size) { void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
// region_size in bytes
uintx region_size = G1HeapRegionSize; uintx region_size = G1HeapRegionSize;
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
// We base the automatic calculation on the min heap size. This size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
// can be problematic if the spread between min and max is quite region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
// wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
// the max size, the region size might be way too large for the
// min size. Either way, some users might have to set the region
// size manually for some -Xms / -Xmx combos.
region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
(uintx) MIN_REGION_SIZE); (uintx) MIN_REGION_SIZE);
} }

View file

@ -361,7 +361,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// CardsPerRegion). All those fields are considered constant // CardsPerRegion). All those fields are considered constant
// throughout the JVM's execution, therefore they should only be set // throughout the JVM's execution, therefore they should only be set
// up once during initialization time. // up once during initialization time.
static void setup_heap_region_size(uintx min_heap_size); static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
enum ClaimValues { enum ClaimValues {
InitialClaimValue = 0, InitialClaimValue = 0,

View file

@ -28,6 +28,7 @@
#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcWhen.hpp" #include "gc_implementation/shared/gcWhen.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp" #include "gc_implementation/shared/copyFailedInfo.hpp"
#include "runtime/os.hpp"
#include "trace/tracing.hpp" #include "trace/tracing.hpp"
#include "trace/traceBackend.hpp" #include "trace/traceBackend.hpp"
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
@ -54,11 +55,12 @@ void GCTracer::send_garbage_collection_event() const {
} }
void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const { void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
EventGCReferenceStatistics e; EventGCReferenceStatistics e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id()); e.set_gcId(_shared_gc_info.id());
e.set_type((u1)type); e.set_type((u1)type);
e.set_count(count); e.set_count(count);
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
@ -105,20 +107,22 @@ static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) {
} }
void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const { void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
EventPromotionFailed e; EventPromotionFailed e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id()); e.set_gcId(_shared_gc_info.id());
e.set_data(to_trace_struct(pf_info)); e.set_data(to_trace_struct(pf_info));
e.set_thread(pf_info.thread()->thread_id()); e.set_thread(pf_info.thread()->thread_id());
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
// Common to CMS and G1 // Common to CMS and G1
void OldGCTracer::send_concurrent_mode_failure_event() { void OldGCTracer::send_concurrent_mode_failure_event() {
EventConcurrentModeFailure e; EventConcurrentModeFailure e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id()); e.set_gcId(_shared_gc_info.id());
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
@ -136,7 +140,7 @@ void G1NewTracer::send_g1_young_gc_event() {
} }
void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
EventEvacuationInfo e; EventEvacuationInfo e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id()); e.set_gcId(_shared_gc_info.id());
e.set_cSetRegions(info->collectionset_regions()); e.set_cSetRegions(info->collectionset_regions());
@ -147,15 +151,17 @@ void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied()); e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
e.set_bytesCopied(info->bytes_copied()); e.set_bytesCopied(info->bytes_copied());
e.set_regionsFreed(info->regions_freed()); e.set_regionsFreed(info->regions_freed());
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const { void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
EventEvacuationFailed e; EventEvacuationFailed e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id()); e.set_gcId(_shared_gc_info.id());
e.set_data(to_trace_struct(ef_info)); e.set_data(to_trace_struct(ef_info));
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
@ -189,12 +195,13 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
void visit(const GCHeapSummary* heap_summary) const { void visit(const GCHeapSummary* heap_summary) const {
const VirtualSpaceSummary& heap_space = heap_summary->heap(); const VirtualSpaceSummary& heap_space = heap_summary->heap();
EventGCHeapSummary e; EventGCHeapSummary e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_id); e.set_gcId(_id);
e.set_when((u1)_when); e.set_when((u1)_when);
e.set_heapSpace(to_trace_struct(heap_space)); e.set_heapSpace(to_trace_struct(heap_space));
e.set_heapUsed(heap_summary->used()); e.set_heapUsed(heap_summary->used());
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
@ -209,7 +216,7 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
const SpaceSummary& from_space = ps_heap_summary->from(); const SpaceSummary& from_space = ps_heap_summary->from();
const SpaceSummary& to_space = ps_heap_summary->to(); const SpaceSummary& to_space = ps_heap_summary->to();
EventPSHeapSummary e; EventPSHeapSummary e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_id); e.set_gcId(_id);
e.set_when((u1)_when); e.set_when((u1)_when);
@ -220,6 +227,7 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
e.set_edenSpace(to_trace_struct(ps_heap_summary->eden())); e.set_edenSpace(to_trace_struct(ps_heap_summary->eden()));
e.set_fromSpace(to_trace_struct(ps_heap_summary->from())); e.set_fromSpace(to_trace_struct(ps_heap_summary->from()));
e.set_toSpace(to_trace_struct(ps_heap_summary->to())); e.set_toSpace(to_trace_struct(ps_heap_summary->to()));
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
@ -241,13 +249,14 @@ static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
} }
void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const { void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
EventMetaspaceSummary e; EventMetaspaceSummary e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.id()); e.set_gcId(_shared_gc_info.id());
e.set_when((u1) when); e.set_when((u1) when);
e.set_metaspace(to_trace_struct(meta_space_summary.meta_space())); e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
e.set_dataSpace(to_trace_struct(meta_space_summary.data_space())); e.set_dataSpace(to_trace_struct(meta_space_summary.data_space()));
e.set_classSpace(to_trace_struct(meta_space_summary.class_space())); e.set_classSpace(to_trace_struct(meta_space_summary.class_space()));
e.set_endtime(os::elapsed_counter());
e.commit(); e.commit();
} }
} }
@ -282,8 +291,6 @@ class PhaseSender : public PhaseVisitor {
default: /* Ignore sending this phase */ break; default: /* Ignore sending this phase */ break;
} }
} }
#undef send_phase
}; };
void GCTracer::send_phase_events(TimePartitions* time_partitions) const { void GCTracer::send_phase_events(TimePartitions* time_partitions) const {

View file

@ -240,6 +240,14 @@ class FieldInfo VALUE_OBJ_CLASS_SPEC {
return (access_flags() & JVM_ACC_FIELD_INTERNAL) != 0; return (access_flags() & JVM_ACC_FIELD_INTERNAL) != 0;
} }
bool is_stable() const {
return (access_flags() & JVM_ACC_FIELD_STABLE) != 0;
}
void set_stable(bool z) {
if (z) _shorts[access_flags_offset] |= JVM_ACC_FIELD_STABLE;
else _shorts[access_flags_offset] &= ~JVM_ACC_FIELD_STABLE;
}
Symbol* lookup_symbol(int symbol_index) const { Symbol* lookup_symbol(int symbol_index) const {
assert(is_internal(), "only internal fields"); assert(is_internal(), "only internal fields");
return vmSymbols::symbol_at((vmSymbols::SID)symbol_index); return vmSymbols::symbol_at((vmSymbols::SID)symbol_index);

View file

@ -720,11 +720,22 @@ void Method::print_made_not_compilable(int comp_level, bool is_osr, bool report,
} }
} }
bool Method::is_always_compilable() const {
// Generated adapters must be compiled
if (is_method_handle_intrinsic() && is_synthetic()) {
assert(!is_not_c1_compilable(), "sanity check");
assert(!is_not_c2_compilable(), "sanity check");
return true;
}
return false;
}
bool Method::is_not_compilable(int comp_level) const { bool Method::is_not_compilable(int comp_level) const {
if (number_of_breakpoints() > 0) if (number_of_breakpoints() > 0)
return true; return true;
if (is_method_handle_intrinsic()) if (is_always_compilable())
return !is_synthetic(); // the generated adapters must be compiled return false;
if (comp_level == CompLevel_any) if (comp_level == CompLevel_any)
return is_not_c1_compilable() || is_not_c2_compilable(); return is_not_c1_compilable() || is_not_c2_compilable();
if (is_c1_compile(comp_level)) if (is_c1_compile(comp_level))
@ -736,6 +747,10 @@ bool Method::is_not_compilable(int comp_level) const {
// call this when compiler finds that this method is not compilable // call this when compiler finds that this method is not compilable
void Method::set_not_compilable(int comp_level, bool report, const char* reason) { void Method::set_not_compilable(int comp_level, bool report, const char* reason) {
if (is_always_compilable()) {
// Don't mark a method which should be always compilable
return;
}
print_made_not_compilable(comp_level, /*is_osr*/ false, report, reason); print_made_not_compilable(comp_level, /*is_osr*/ false, report, reason);
if (comp_level == CompLevel_all) { if (comp_level == CompLevel_all) {
set_not_c1_compilable(); set_not_c1_compilable();

View file

@ -796,6 +796,7 @@ class Method : public Metadata {
void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) { void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
set_not_osr_compilable(comp_level, false); set_not_osr_compilable(comp_level, false);
} }
bool is_always_compilable() const;
private: private:
void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);

View file

@ -112,9 +112,9 @@ uint Block::compute_loop_alignment() {
// exceeds OptoLoopAlignment. // exceeds OptoLoopAlignment.
uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt, uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
PhaseRegAlloc* ra) { PhaseRegAlloc* ra) {
uint last_inst = _nodes.size(); uint last_inst = number_of_nodes();
for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) { for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
uint inst_size = _nodes[j]->size(ra); uint inst_size = get_node(j)->size(ra);
if( inst_size > 0 ) { if( inst_size > 0 ) {
inst_cnt--; inst_cnt--;
uint sz = sum_size + inst_size; uint sz = sum_size + inst_size;
@ -131,8 +131,8 @@ uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
} }
uint Block::find_node( const Node *n ) const { uint Block::find_node( const Node *n ) const {
for( uint i = 0; i < _nodes.size(); i++ ) { for( uint i = 0; i < number_of_nodes(); i++ ) {
if( _nodes[i] == n ) if( get_node(i) == n )
return i; return i;
} }
ShouldNotReachHere(); ShouldNotReachHere();
@ -141,7 +141,7 @@ uint Block::find_node( const Node *n ) const {
// Find and remove n from block list // Find and remove n from block list
void Block::find_remove( const Node *n ) { void Block::find_remove( const Node *n ) {
_nodes.remove(find_node(n)); remove_node(find_node(n));
} }
// Return empty status of a block. Empty blocks contain only the head, other // Return empty status of a block. Empty blocks contain only the head, other
@ -154,10 +154,10 @@ int Block::is_Empty() const {
} }
int success_result = completely_empty; int success_result = completely_empty;
int end_idx = _nodes.size()-1; int end_idx = number_of_nodes() - 1;
// Check for ending goto // Check for ending goto
if ((end_idx > 0) && (_nodes[end_idx]->is_MachGoto())) { if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) {
success_result = empty_with_goto; success_result = empty_with_goto;
end_idx--; end_idx--;
} }
@ -170,7 +170,7 @@ int Block::is_Empty() const {
// Ideal nodes are allowable in empty blocks: skip them Only MachNodes // Ideal nodes are allowable in empty blocks: skip them Only MachNodes
// turn directly into code, because only MachNodes have non-trivial // turn directly into code, because only MachNodes have non-trivial
// emit() functions. // emit() functions.
while ((end_idx > 0) && !_nodes[end_idx]->is_Mach()) { while ((end_idx > 0) && !get_node(end_idx)->is_Mach()) {
end_idx--; end_idx--;
} }
@ -209,15 +209,15 @@ bool Block::has_uncommon_code() const {
// True if block is low enough frequency or guarded by a test which // True if block is low enough frequency or guarded by a test which
// mostly does not go here. // mostly does not go here.
bool Block::is_uncommon(PhaseCFG* cfg) const { bool PhaseCFG::is_uncommon(const Block* block) {
// Initial blocks must never be moved, so are never uncommon. // Initial blocks must never be moved, so are never uncommon.
if (head()->is_Root() || head()->is_Start()) return false; if (block->head()->is_Root() || block->head()->is_Start()) return false;
// Check for way-low freq // Check for way-low freq
if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true; if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true;
// Look for code shape indicating uncommon_trap or slow path // Look for code shape indicating uncommon_trap or slow path
if (has_uncommon_code()) return true; if (block->has_uncommon_code()) return true;
const float epsilon = 0.05f; const float epsilon = 0.05f;
const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon); const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
@ -225,8 +225,8 @@ bool Block::is_uncommon(PhaseCFG* cfg) const {
uint freq_preds = 0; uint freq_preds = 0;
uint uncommon_for_freq_preds = 0; uint uncommon_for_freq_preds = 0;
for( uint i=1; i<num_preds(); i++ ) { for( uint i=1; i< block->num_preds(); i++ ) {
Block* guard = cfg->get_block_for_node(pred(i)); Block* guard = get_block_for_node(block->pred(i));
// Check to see if this block follows its guard 1 time out of 10000 // Check to see if this block follows its guard 1 time out of 10000
// or less. // or less.
// //
@ -244,14 +244,14 @@ bool Block::is_uncommon(PhaseCFG* cfg) const {
uncommon_preds++; uncommon_preds++;
} else { } else {
freq_preds++; freq_preds++;
if( _freq < guard->_freq * guard_factor ) { if(block->_freq < guard->_freq * guard_factor ) {
uncommon_for_freq_preds++; uncommon_for_freq_preds++;
} }
} }
} }
if( num_preds() > 1 && if( block->num_preds() > 1 &&
// The block is uncommon if all preds are uncommon or // The block is uncommon if all preds are uncommon or
(uncommon_preds == (num_preds()-1) || (uncommon_preds == (block->num_preds()-1) ||
// it is uncommon for all frequent preds. // it is uncommon for all frequent preds.
uncommon_for_freq_preds == freq_preds) ) { uncommon_for_freq_preds == freq_preds) ) {
return true; return true;
@ -344,8 +344,8 @@ void Block::dump() const {
void Block::dump(const PhaseCFG* cfg) const { void Block::dump(const PhaseCFG* cfg) const {
dump_head(cfg); dump_head(cfg);
for (uint i=0; i< _nodes.size(); i++) { for (uint i=0; i< number_of_nodes(); i++) {
_nodes[i]->dump(); get_node(i)->dump();
} }
tty->print("\n"); tty->print("\n");
} }
@ -434,7 +434,7 @@ uint PhaseCFG::build_cfg() {
map_node_to_block(p, bb); map_node_to_block(p, bb);
map_node_to_block(x, bb); map_node_to_block(x, bb);
if( x != p ) { // Only for root is x == p if( x != p ) { // Only for root is x == p
bb->_nodes.push((Node*)x); bb->push_node((Node*)x);
} }
// Now handle predecessors // Now handle predecessors
++sum; // Count 1 for self block ++sum; // Count 1 for self block
@ -469,11 +469,11 @@ uint PhaseCFG::build_cfg() {
assert( x != proj, "" ); assert( x != proj, "" );
// Map basic block of projection // Map basic block of projection
map_node_to_block(proj, pb); map_node_to_block(proj, pb);
pb->_nodes.push(proj); pb->push_node(proj);
} }
// Insert self as a child of my predecessor block // Insert self as a child of my predecessor block
pb->_succs.map(pb->_num_succs++, get_block_for_node(np)); pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(), assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(),
"too many control users, not a CFG?" ); "too many control users, not a CFG?" );
} }
} }
@ -495,7 +495,7 @@ void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
// surrounding blocks. // surrounding blocks.
float freq = in->_freq * in->succ_prob(succ_no); float freq = in->_freq * in->succ_prob(succ_no);
// get ProjNode corresponding to the succ_no'th successor of the in block // get ProjNode corresponding to the succ_no'th successor of the in block
ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj(); ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj();
// create region for basic block // create region for basic block
RegionNode* region = new (C) RegionNode(2); RegionNode* region = new (C) RegionNode(2);
region->init_req(1, proj); region->init_req(1, proj);
@ -507,7 +507,7 @@ void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
Node* gto = _goto->clone(); // get a new goto node Node* gto = _goto->clone(); // get a new goto node
gto->set_req(0, region); gto->set_req(0, region);
// add it to the basic block // add it to the basic block
block->_nodes.push(gto); block->push_node(gto);
map_node_to_block(gto, block); map_node_to_block(gto, block);
C->regalloc()->set_bad(gto->_idx); C->regalloc()->set_bad(gto->_idx);
// hook up successor block // hook up successor block
@ -527,9 +527,9 @@ void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
// Does this block end in a multiway branch that cannot have the default case // Does this block end in a multiway branch that cannot have the default case
// flipped for another case? // flipped for another case?
static bool no_flip_branch( Block *b ) { static bool no_flip_branch( Block *b ) {
int branch_idx = b->_nodes.size() - b->_num_succs-1; int branch_idx = b->number_of_nodes() - b->_num_succs-1;
if( branch_idx < 1 ) return false; if( branch_idx < 1 ) return false;
Node *bra = b->_nodes[branch_idx]; Node *bra = b->get_node(branch_idx);
if( bra->is_Catch() ) if( bra->is_Catch() )
return true; return true;
if( bra->is_Mach() ) { if( bra->is_Mach() ) {
@ -550,16 +550,16 @@ static bool no_flip_branch( Block *b ) {
void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) { void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
// Find true target // Find true target
int end_idx = b->end_idx(); int end_idx = b->end_idx();
int idx = b->_nodes[end_idx+1]->as_Proj()->_con; int idx = b->get_node(end_idx+1)->as_Proj()->_con;
Block *succ = b->_succs[idx]; Block *succ = b->_succs[idx];
Node* gto = _goto->clone(); // get a new goto node Node* gto = _goto->clone(); // get a new goto node
gto->set_req(0, b->head()); gto->set_req(0, b->head());
Node *bp = b->_nodes[end_idx]; Node *bp = b->get_node(end_idx);
b->_nodes.map(end_idx,gto); // Slam over NeverBranch b->map_node(gto, end_idx); // Slam over NeverBranch
map_node_to_block(gto, b); map_node_to_block(gto, b);
C->regalloc()->set_bad(gto->_idx); C->regalloc()->set_bad(gto->_idx);
b->_nodes.pop(); // Yank projections b->pop_node(); // Yank projections
b->_nodes.pop(); // Yank projections b->pop_node(); // Yank projections
b->_succs.map(0,succ); // Map only successor b->_succs.map(0,succ); // Map only successor
b->_num_succs = 1; b->_num_succs = 1;
// remap successor's predecessors if necessary // remap successor's predecessors if necessary
@ -575,8 +575,8 @@ void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
// Scan through block, yanking dead path from // Scan through block, yanking dead path from
// all regions and phis. // all regions and phis.
dead->head()->del_req(j); dead->head()->del_req(j);
for( int k = 1; dead->_nodes[k]->is_Phi(); k++ ) for( int k = 1; dead->get_node(k)->is_Phi(); k++ )
dead->_nodes[k]->del_req(j); dead->get_node(k)->del_req(j);
} }
// Helper function to move block bx to the slot following b_index. Return // Helper function to move block bx to the slot following b_index. Return
@ -620,7 +620,7 @@ void PhaseCFG::move_to_end(Block *b, uint i) {
if (e != Block::not_empty) { if (e != Block::not_empty) {
if (e == Block::empty_with_goto) { if (e == Block::empty_with_goto) {
// Remove the goto, but leave the block. // Remove the goto, but leave the block.
b->_nodes.pop(); b->pop_node();
} }
// Mark this block as a connector block, which will cause it to be // Mark this block as a connector block, which will cause it to be
// ignored in certain functions such as non_connector_successor(). // ignored in certain functions such as non_connector_successor().
@ -663,13 +663,13 @@ void PhaseCFG::remove_empty_blocks() {
// to give a fake exit path to infinite loops. At this late stage they // to give a fake exit path to infinite loops. At this late stage they
// need to turn into Goto's so that when you enter the infinite loop you // need to turn into Goto's so that when you enter the infinite loop you
// indeed hang. // indeed hang.
if (block->_nodes[block->end_idx()]->Opcode() == Op_NeverBranch) { if (block->get_node(block->end_idx())->Opcode() == Op_NeverBranch) {
convert_NeverBranch_to_Goto(block); convert_NeverBranch_to_Goto(block);
} }
// Look for uncommon blocks and move to end. // Look for uncommon blocks and move to end.
if (!C->do_freq_based_layout()) { if (!C->do_freq_based_layout()) {
if (block->is_uncommon(this)) { if (is_uncommon(block)) {
move_to_end(block, i); move_to_end(block, i);
last--; // No longer check for being uncommon! last--; // No longer check for being uncommon!
if (no_flip_branch(block)) { // Fall-thru case must follow? if (no_flip_branch(block)) { // Fall-thru case must follow?
@ -720,9 +720,9 @@ void PhaseCFG::fixup_flow() {
// exchange the true and false targets. // exchange the true and false targets.
if (no_flip_branch(block)) { if (no_flip_branch(block)) {
// Find fall through case - if must fall into its target // Find fall through case - if must fall into its target
int branch_idx = block->_nodes.size() - block->_num_succs; int branch_idx = block->number_of_nodes() - block->_num_succs;
for (uint j2 = 0; j2 < block->_num_succs; j2++) { for (uint j2 = 0; j2 < block->_num_succs; j2++) {
const ProjNode* p = block->_nodes[branch_idx + j2]->as_Proj(); const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj();
if (p->_con == 0) { if (p->_con == 0) {
// successor j2 is fall through case // successor j2 is fall through case
if (block->non_connector_successor(j2) != bnext) { if (block->non_connector_successor(j2) != bnext) {
@ -743,14 +743,14 @@ void PhaseCFG::fixup_flow() {
// Remove all CatchProjs // Remove all CatchProjs
for (uint j = 0; j < block->_num_succs; j++) { for (uint j = 0; j < block->_num_succs; j++) {
block->_nodes.pop(); block->pop_node();
} }
} else if (block->_num_succs == 1) { } else if (block->_num_succs == 1) {
// Block ends in a Goto? // Block ends in a Goto?
if (bnext == bs0) { if (bnext == bs0) {
// We fall into next block; remove the Goto // We fall into next block; remove the Goto
block->_nodes.pop(); block->pop_node();
} }
} else if(block->_num_succs == 2) { // Block ends in a If? } else if(block->_num_succs == 2) { // Block ends in a If?
@ -759,9 +759,9 @@ void PhaseCFG::fixup_flow() {
// be projections (in any order), the 3rd last node must be // be projections (in any order), the 3rd last node must be
// the IfNode (we have excluded other 2-way exits such as // the IfNode (we have excluded other 2-way exits such as
// CatchNodes already). // CatchNodes already).
MachNode* iff = block->_nodes[block->_nodes.size() - 3]->as_Mach(); MachNode* iff = block->get_node(block->number_of_nodes() - 3)->as_Mach();
ProjNode* proj0 = block->_nodes[block->_nodes.size() - 2]->as_Proj(); ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj();
ProjNode* proj1 = block->_nodes[block->_nodes.size() - 1]->as_Proj(); ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj();
// Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1]. // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0"); assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
@ -833,8 +833,8 @@ void PhaseCFG::fixup_flow() {
iff->as_MachIf()->negate(); iff->as_MachIf()->negate();
} }
block->_nodes.pop(); // Remove IfFalse & IfTrue projections block->pop_node(); // Remove IfFalse & IfTrue projections
block->_nodes.pop(); block->pop_node();
} else { } else {
// Multi-exit block, e.g. a switch statement // Multi-exit block, e.g. a switch statement
@ -895,13 +895,13 @@ void PhaseCFG::verify() const {
// Verify sane CFG // Verify sane CFG
for (uint i = 0; i < number_of_blocks(); i++) { for (uint i = 0; i < number_of_blocks(); i++) {
Block* block = get_block(i); Block* block = get_block(i);
uint cnt = block->_nodes.size(); uint cnt = block->number_of_nodes();
uint j; uint j;
for (j = 0; j < cnt; j++) { for (j = 0; j < cnt; j++) {
Node *n = block->_nodes[j]; Node *n = block->get_node(j);
assert(get_block_for_node(n) == block, ""); assert(get_block_for_node(n) == block, "");
if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) { if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
assert(j == 1 || block->_nodes[j-1]->is_Phi(), "CreateEx must be first instruction in block"); assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block");
} }
for (uint k = 0; k < n->req(); k++) { for (uint k = 0; k < n->req(); k++) {
Node *def = n->in(k); Node *def = n->in(k);
@ -930,14 +930,14 @@ void PhaseCFG::verify() const {
} }
j = block->end_idx(); j = block->end_idx();
Node* bp = (Node*)block->_nodes[block->_nodes.size() - 1]->is_block_proj(); Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj();
assert(bp, "last instruction must be a block proj"); assert(bp, "last instruction must be a block proj");
assert(bp == block->_nodes[j], "wrong number of successors for this block"); assert(bp == block->get_node(j), "wrong number of successors for this block");
if (bp->is_Catch()) { if (bp->is_Catch()) {
while (block->_nodes[--j]->is_MachProj()) { while (block->get_node(--j)->is_MachProj()) {
; ;
} }
assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call"); assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
} else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) { } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
assert(block->_num_succs == 2, "Conditional branch must have two targets"); assert(block->_num_succs == 2, "Conditional branch must have two targets");
} }
@ -1440,9 +1440,9 @@ void Trace::fixup_blocks(PhaseCFG &cfg) {
Block *bnext = next(b); Block *bnext = next(b);
Block *bs0 = b->non_connector_successor(0); Block *bs0 = b->non_connector_successor(0);
MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach(); MachNode *iff = b->get_node(b->number_of_nodes() - 3)->as_Mach();
ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj(); ProjNode *proj0 = b->get_node(b->number_of_nodes() - 2)->as_Proj();
ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj(); ProjNode *proj1 = b->get_node(b->number_of_nodes() - 1)->as_Proj();
if (bnext == bs0) { if (bnext == bs0) {
// Fall-thru case in succs[0], should be in succs[1] // Fall-thru case in succs[0], should be in succs[1]
@ -1454,8 +1454,8 @@ void Trace::fixup_blocks(PhaseCFG &cfg) {
b->_succs.map( 1, tbs0 ); b->_succs.map( 1, tbs0 );
// Flip projections to match targets // Flip projections to match targets
b->_nodes.map(b->_nodes.size()-2, proj1); b->map_node(proj1, b->number_of_nodes() - 2);
b->_nodes.map(b->_nodes.size()-1, proj0); b->map_node(proj0, b->number_of_nodes() - 1);
} }
} }
} }

View file

@ -105,15 +105,53 @@ class CFGElement : public ResourceObj {
// any optimization pass. They are created late in the game. // any optimization pass. They are created late in the game.
class Block : public CFGElement { class Block : public CFGElement {
friend class VMStructs; friend class VMStructs;
public:
private:
// Nodes in this block, in order // Nodes in this block, in order
Node_List _nodes; Node_List _nodes;
public:
// Get the node at index 'at_index', if 'at_index' is out of bounds return NULL
Node* get_node(uint at_index) const {
return _nodes[at_index];
}
// Get the number of nodes in this block
uint number_of_nodes() const {
return _nodes.size();
}
// Map a node 'node' to index 'to_index' in the block, if the index is out of bounds the size of the node list is increased
void map_node(Node* node, uint to_index) {
_nodes.map(to_index, node);
}
// Insert a node 'node' at index 'at_index', moving all nodes that are on a higher index one step, if 'at_index' is out of bounds we crash
void insert_node(Node* node, uint at_index) {
_nodes.insert(at_index, node);
}
// Remove a node at index 'at_index'
void remove_node(uint at_index) {
_nodes.remove(at_index);
}
// Push a node 'node' onto the node list
void push_node(Node* node) {
_nodes.push(node);
}
// Pop the last node off the node list
Node* pop_node() {
return _nodes.pop();
}
// Basic blocks have a Node which defines Control for all Nodes pinned in // Basic blocks have a Node which defines Control for all Nodes pinned in
// this block. This Node is a RegionNode. Exception-causing Nodes // this block. This Node is a RegionNode. Exception-causing Nodes
// (division, subroutines) and Phi functions are always pinned. Later, // (division, subroutines) and Phi functions are always pinned. Later,
// every Node will get pinned to some block. // every Node will get pinned to some block.
Node *head() const { return _nodes[0]; } Node *head() const { return get_node(0); }
// CAUTION: num_preds() is ONE based, so that predecessor numbers match // CAUTION: num_preds() is ONE based, so that predecessor numbers match
// input edges to Regions and Phis. // input edges to Regions and Phis.
@ -274,29 +312,12 @@ class Block : public CFGElement {
// Add an instruction to an existing block. It must go after the head // Add an instruction to an existing block. It must go after the head
// instruction and before the end instruction. // instruction and before the end instruction.
void add_inst( Node *n ) { _nodes.insert(end_idx(),n); } void add_inst( Node *n ) { insert_node(n, end_idx()); }
// Find node in block // Find node in block
uint find_node( const Node *n ) const; uint find_node( const Node *n ) const;
// Find and remove n from block list // Find and remove n from block list
void find_remove( const Node *n ); void find_remove( const Node *n );
// helper function that adds caller save registers to MachProjNode
void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
// Schedule a call next in the block
uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
// Perform basic-block local scheduling
Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
// Cleanup if any code lands between a Call and his Catch
void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
// Detect implicit-null-check opportunities. Basically, find NULL checks
// with suitable memory ops nearby. Use the memory op to do the NULL check.
// I can generate a memory op if there is not one nearby.
void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons);
// Return the empty status of a block // Return the empty status of a block
enum { not_empty, empty_with_goto, completely_empty }; enum { not_empty, empty_with_goto, completely_empty };
int is_Empty() const; int is_Empty() const;
@ -328,10 +349,6 @@ class Block : public CFGElement {
// Examine block's code shape to predict if it is not commonly executed. // Examine block's code shape to predict if it is not commonly executed.
bool has_uncommon_code() const; bool has_uncommon_code() const;
// Use frequency calculations and code shape to predict if the block
// is uncommon.
bool is_uncommon(PhaseCFG* cfg) const;
#ifndef PRODUCT #ifndef PRODUCT
// Debugging print of basic block // Debugging print of basic block
void dump_bidx(const Block* orig, outputStream* st = tty) const; void dump_bidx(const Block* orig, outputStream* st = tty) const;
@ -414,6 +431,27 @@ class PhaseCFG : public Phase {
// to late. Helper for schedule_late. // to late. Helper for schedule_late.
Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self); Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
bool schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call);
void set_next_call(Block* block, Node* n, VectorSet& next_call);
void needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call);
// Perform basic-block local scheduling
Node* select(Block* block, Node_List& worklist, GrowableArray<int>& ready_cnt, VectorSet& next_call, uint sched_slot);
// Schedule a call next in the block
uint sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call);
// Cleanup if any code lands between a Call and his Catch
void call_catch_cleanup(Block* block);
Node* catch_cleanup_find_cloned_def(Block* use_blk, Node* def, Block* def_blk, int n_clone_idx);
void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx);
// Detect implicit-null-check opportunities. Basically, find NULL checks
// with suitable memory ops nearby. Use the memory op to do the NULL check.
// I can generate a memory op if there is not one nearby.
void implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons);
// Perform a Depth First Search (DFS). // Perform a Depth First Search (DFS).
// Setup 'vertex' as DFS to vertex mapping. // Setup 'vertex' as DFS to vertex mapping.
// Setup 'semi' as vertex to DFS mapping. // Setup 'semi' as vertex to DFS mapping.
@ -530,6 +568,10 @@ class PhaseCFG : public Phase {
return (_node_to_block_mapping.lookup(node->_idx) != NULL); return (_node_to_block_mapping.lookup(node->_idx) != NULL);
} }
// Use frequency calculations and code shape to predict if the block
// is uncommon.
bool is_uncommon(const Block* block);
#ifdef ASSERT #ifdef ASSERT
Unique_Node_List _raw_oops; Unique_Node_List _raw_oops;
#endif #endif
@ -550,7 +592,7 @@ class PhaseCFG : public Phase {
// Insert a node into a block at index and map the node to the block // Insert a node into a block at index and map the node to the block
void insert(Block *b, uint idx, Node *n) { void insert(Block *b, uint idx, Node *n) {
b->_nodes.insert( idx, n ); b->insert_node(n , idx);
map_node_to_block(n, b); map_node_to_block(n, b);
} }

View file

@ -121,8 +121,8 @@ struct OopFlow : public ResourceObj {
// Given reaching-defs for this block start, compute it for this block end // Given reaching-defs for this block start, compute it for this block end
void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ) { void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ) {
for( uint i=0; i<_b->_nodes.size(); i++ ) { for( uint i=0; i<_b->number_of_nodes(); i++ ) {
Node *n = _b->_nodes[i]; Node *n = _b->get_node(i);
if( n->jvms() ) { // Build an OopMap here? if( n->jvms() ) { // Build an OopMap here?
JVMState *jvms = n->jvms(); JVMState *jvms = n->jvms();
@ -447,8 +447,8 @@ static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* work
} }
// Now walk tmp_live up the block backwards, computing live // Now walk tmp_live up the block backwards, computing live
for( int k=b->_nodes.size()-1; k>=0; k-- ) { for( int k=b->number_of_nodes()-1; k>=0; k-- ) {
Node *n = b->_nodes[k]; Node *n = b->get_node(k);
// KILL def'd bits // KILL def'd bits
int first = regalloc->get_reg_first(n); int first = regalloc->get_reg_first(n);
int second = regalloc->get_reg_second(n); int second = regalloc->get_reg_second(n);
@ -544,12 +544,12 @@ static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* work
for (i = 1; i < cfg->number_of_blocks(); i++) { for (i = 1; i < cfg->number_of_blocks(); i++) {
Block* block = cfg->get_block(i); Block* block = cfg->get_block(i);
uint j; uint j;
for (j = 1; j < block->_nodes.size(); j++) { for (j = 1; j < block->number_of_nodes(); j++) {
if (block->_nodes[j]->jvms() && (*safehash)[block->_nodes[j]] == NULL) { if (block->get_node(j)->jvms() && (*safehash)[block->get_node(j)] == NULL) {
break; break;
} }
} }
if (j < block->_nodes.size()) { if (j < block->number_of_nodes()) {
break; break;
} }
} }

View file

@ -421,7 +421,7 @@
product(bool, UseDivMod, true, \ product(bool, UseDivMod, true, \
"Use combined DivMod instruction if available") \ "Use combined DivMod instruction if available") \
\ \
product(intx, MinJumpTableSize, 18, \ product_pd(intx, MinJumpTableSize, \
"Minimum number of targets in a generated jump table") \ "Minimum number of targets in a generated jump table") \
\ \
product(intx, MaxJumpTableSize, 65000, \ product(intx, MaxJumpTableSize, 65000, \
@ -448,6 +448,9 @@
product(bool, EliminateAutoBox, true, \ product(bool, EliminateAutoBox, true, \
"Control optimizations for autobox elimination") \ "Control optimizations for autobox elimination") \
\ \
experimental(bool, UseImplicitStableValues, false, \
"Mark well-known stable fields as such (e.g. String.value)") \
\
product(intx, AutoBoxCacheMax, 128, \ product(intx, AutoBoxCacheMax, 128, \
"Sets max value cached by the java.lang.Integer autobox cache") \ "Sets max value cached by the java.lang.Integer autobox cache") \
\ \

View file

@ -458,7 +458,7 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st)
st->print("={"); st->print("={");
uint nf = spobj->n_fields(); uint nf = spobj->n_fields();
if (nf > 0) { if (nf > 0) {
uint first_ind = spobj->first_index(); uint first_ind = spobj->first_index(mcall->jvms());
Node* fld_node = mcall->in(first_ind); Node* fld_node = mcall->in(first_ind);
ciField* cifield; ciField* cifield;
if (iklass != NULL) { if (iklass != NULL) {
@ -1063,7 +1063,6 @@ void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
int scloff = jvms->scloff(); int scloff = jvms->scloff();
int endoff = jvms->endoff(); int endoff = jvms->endoff();
assert(endoff == (int)req(), "no other states or debug info after me"); assert(endoff == (int)req(), "no other states or debug info after me");
assert(jvms->scl_size() == 0, "parsed code should not have scalar objects");
Node* top = Compile::current()->top(); Node* top = Compile::current()->top();
for (uint i = 0; i < grow_by; i++) { for (uint i = 0; i < grow_by; i++) {
ins_req(monoff, top); ins_req(monoff, top);
@ -1079,32 +1078,31 @@ void SafePointNode::push_monitor(const FastLockNode *lock) {
const int MonitorEdges = 2; const int MonitorEdges = 2;
assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
assert(req() == jvms()->endoff(), "correct sizing"); assert(req() == jvms()->endoff(), "correct sizing");
assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects");
int nextmon = jvms()->scloff(); int nextmon = jvms()->scloff();
if (GenerateSynchronizationCode) { if (GenerateSynchronizationCode) {
add_req(lock->box_node()); ins_req(nextmon, lock->box_node());
add_req(lock->obj_node()); ins_req(nextmon+1, lock->obj_node());
} else { } else {
Node* top = Compile::current()->top(); Node* top = Compile::current()->top();
add_req(top); ins_req(nextmon, top);
add_req(top); ins_req(nextmon, top);
} }
jvms()->set_scloff(nextmon+MonitorEdges); jvms()->set_scloff(nextmon + MonitorEdges);
jvms()->set_endoff(req()); jvms()->set_endoff(req());
} }
void SafePointNode::pop_monitor() { void SafePointNode::pop_monitor() {
// Delete last monitor from debug info // Delete last monitor from debug info
assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects");
debug_only(int num_before_pop = jvms()->nof_monitors()); debug_only(int num_before_pop = jvms()->nof_monitors());
const int MonitorEdges = (1<<JVMState::logMonitorEdges); const int MonitorEdges = 2;
assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
int scloff = jvms()->scloff(); int scloff = jvms()->scloff();
int endoff = jvms()->endoff(); int endoff = jvms()->endoff();
int new_scloff = scloff - MonitorEdges; int new_scloff = scloff - MonitorEdges;
int new_endoff = endoff - MonitorEdges; int new_endoff = endoff - MonitorEdges;
jvms()->set_scloff(new_scloff); jvms()->set_scloff(new_scloff);
jvms()->set_endoff(new_endoff); jvms()->set_endoff(new_endoff);
while (scloff > new_scloff) del_req(--scloff); while (scloff > new_scloff) del_req_ordered(--scloff);
assert(jvms()->nof_monitors() == num_before_pop-1, ""); assert(jvms()->nof_monitors() == num_before_pop-1, "");
} }
@ -1169,13 +1167,12 @@ uint SafePointScalarObjectNode::match_edge(uint idx) const {
} }
SafePointScalarObjectNode* SafePointScalarObjectNode*
SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const { SafePointScalarObjectNode::clone(Dict* sosn_map) const {
void* cached = (*sosn_map)[(void*)this]; void* cached = (*sosn_map)[(void*)this];
if (cached != NULL) { if (cached != NULL) {
return (SafePointScalarObjectNode*)cached; return (SafePointScalarObjectNode*)cached;
} }
SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
res->_first_index += jvms_adj;
sosn_map->Insert((void*)this, (void*)res); sosn_map->Insert((void*)this, (void*)res);
return res; return res;
} }

View file

@ -449,14 +449,17 @@ public:
// at a safepoint. // at a safepoint.
class SafePointScalarObjectNode: public TypeNode { class SafePointScalarObjectNode: public TypeNode {
uint _first_index; // First input edge index of a SafePoint node where uint _first_index; // First input edge relative index of a SafePoint node where
// states of the scalarized object fields are collected. // states of the scalarized object fields are collected.
// It is relative to the last (youngest) jvms->_scloff.
uint _n_fields; // Number of non-static fields of the scalarized object. uint _n_fields; // Number of non-static fields of the scalarized object.
DEBUG_ONLY(AllocateNode* _alloc;) DEBUG_ONLY(AllocateNode* _alloc;)
virtual uint hash() const ; // { return NO_HASH; } virtual uint hash() const ; // { return NO_HASH; }
virtual uint cmp( const Node &n ) const; virtual uint cmp( const Node &n ) const;
uint first_index() const { return _first_index; }
public: public:
SafePointScalarObjectNode(const TypeOopPtr* tp, SafePointScalarObjectNode(const TypeOopPtr* tp,
#ifdef ASSERT #ifdef ASSERT
@ -469,7 +472,10 @@ public:
virtual const RegMask &out_RegMask() const; virtual const RegMask &out_RegMask() const;
virtual uint match_edge(uint idx) const; virtual uint match_edge(uint idx) const;
uint first_index() const { return _first_index; } uint first_index(JVMState* jvms) const {
assert(jvms != NULL, "missed JVMS");
return jvms->scloff() + _first_index;
}
uint n_fields() const { return _n_fields; } uint n_fields() const { return _n_fields; }
#ifdef ASSERT #ifdef ASSERT
@ -485,7 +491,7 @@ public:
// corresponds appropriately to "this" in "new_call". Assumes that // corresponds appropriately to "this" in "new_call". Assumes that
// "sosn_map" is a map, specific to the translation of "s" to "new_call", // "sosn_map" is a map, specific to the translation of "s" to "new_call",
// mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const; SafePointScalarObjectNode* clone(Dict* sosn_map) const;
#ifndef PRODUCT #ifndef PRODUCT
virtual void dump_spec(outputStream *st) const; virtual void dump_spec(outputStream *st) const;

View file

@ -301,7 +301,7 @@ int PhaseChaitin::clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint&
// Copy kill projections after the cloned node // Copy kill projections after the cloned node
Node* kills = proj->clone(); Node* kills = proj->clone();
kills->set_req(0, copy); kills->set_req(0, copy);
b->_nodes.insert(idx++, kills); b->insert_node(kills, idx++);
_cfg.map_node_to_block(kills, b); _cfg.map_node_to_block(kills, b);
new_lrg(kills, max_lrg_id++); new_lrg(kills, max_lrg_id++);
} }
@ -682,11 +682,11 @@ void PhaseChaitin::de_ssa() {
uint lr_counter = 1; uint lr_counter = 1;
for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) { for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
Block* block = _cfg.get_block(i); Block* block = _cfg.get_block(i);
uint cnt = block->_nodes.size(); uint cnt = block->number_of_nodes();
// Handle all the normal Nodes in the block // Handle all the normal Nodes in the block
for( uint j = 0; j < cnt; j++ ) { for( uint j = 0; j < cnt; j++ ) {
Node *n = block->_nodes[j]; Node *n = block->get_node(j);
// Pre-color to the zero live range, or pick virtual register // Pre-color to the zero live range, or pick virtual register
const RegMask &rm = n->out_RegMask(); const RegMask &rm = n->out_RegMask();
_lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0); _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
@ -710,8 +710,8 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
Block* block = _cfg.get_block(i); Block* block = _cfg.get_block(i);
// For all instructions // For all instructions
for (uint j = 1; j < block->_nodes.size(); j++) { for (uint j = 1; j < block->number_of_nodes(); j++) {
Node* n = block->_nodes[j]; Node* n = block->get_node(j);
uint input_edge_start =1; // Skip control most nodes uint input_edge_start =1; // Skip control most nodes
if (n->is_Mach()) { if (n->is_Mach()) {
input_edge_start = n->as_Mach()->oper_input_base(); input_edge_start = n->as_Mach()->oper_input_base();
@ -1604,7 +1604,7 @@ void PhaseChaitin::fixup_spills() {
// For all instructions in block // For all instructions in block
uint last_inst = block->end_idx(); uint last_inst = block->end_idx();
for (uint j = 1; j <= last_inst; j++) { for (uint j = 1; j <= last_inst; j++) {
Node* n = block->_nodes[j]; Node* n = block->get_node(j);
// Dead instruction??? // Dead instruction???
assert( n->outcnt() != 0 ||// Nothing dead after post alloc assert( n->outcnt() != 0 ||// Nothing dead after post alloc
@ -1641,7 +1641,7 @@ void PhaseChaitin::fixup_spills() {
assert( cisc->oper_input_base() == 2, "Only adding one edge"); assert( cisc->oper_input_base() == 2, "Only adding one edge");
cisc->ins_req(1,src); // Requires a memory edge cisc->ins_req(1,src); // Requires a memory edge
} }
block->_nodes.map(j,cisc); // Insert into basic block block->map_node(cisc, j); // Insert into basic block
n->subsume_by(cisc, C); // Correct graph n->subsume_by(cisc, C); // Correct graph
// //
++_used_cisc_instructions; ++_used_cisc_instructions;
@ -1698,7 +1698,7 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive
// (where top() node is placed). // (where top() node is placed).
base->init_req(0, _cfg.get_root_node()); base->init_req(0, _cfg.get_root_node());
Block *startb = _cfg.get_block_for_node(C->top()); Block *startb = _cfg.get_block_for_node(C->top());
startb->_nodes.insert(startb->find_node(C->top()), base ); startb->insert_node(base, startb->find_node(C->top()));
_cfg.map_node_to_block(base, startb); _cfg.map_node_to_block(base, startb);
assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet"); assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
} }
@ -1743,9 +1743,9 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive
// Search the current block for an existing base-Phi // Search the current block for an existing base-Phi
Block *b = _cfg.get_block_for_node(derived); Block *b = _cfg.get_block_for_node(derived);
for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
Node *phi = b->_nodes[i]; Node *phi = b->get_node(i);
if( !phi->is_Phi() ) { // Found end of Phis with no match? if( !phi->is_Phi() ) { // Found end of Phis with no match?
b->_nodes.insert( i, base ); // Must insert created Phi here as base b->insert_node(base, i); // Must insert created Phi here as base
_cfg.map_node_to_block(base, b); _cfg.map_node_to_block(base, b);
new_lrg(base,maxlrg++); new_lrg(base,maxlrg++);
break; break;
@ -1786,7 +1786,7 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
IndexSet liveout(_live->live(block)); IndexSet liveout(_live->live(block));
for (uint j = block->end_idx() + 1; j > 1; j--) { for (uint j = block->end_idx() + 1; j > 1; j--) {
Node* n = block->_nodes[j - 1]; Node* n = block->get_node(j - 1);
// Pre-split compares of loop-phis. Loop-phis form a cycle we would // Pre-split compares of loop-phis. Loop-phis form a cycle we would
// like to see in the same register. Compare uses the loop-phi and so // like to see in the same register. Compare uses the loop-phi and so
@ -1979,8 +1979,8 @@ void PhaseChaitin::dump(const Block *b) const {
b->dump_head(&_cfg); b->dump_head(&_cfg);
// For all instructions // For all instructions
for( uint j = 0; j < b->_nodes.size(); j++ ) for( uint j = 0; j < b->number_of_nodes(); j++ )
dump(b->_nodes[j]); dump(b->get_node(j));
// Print live-out info at end of block // Print live-out info at end of block
if( _live ) { if( _live ) {
tty->print("Liveout: "); tty->print("Liveout: ");
@ -2271,8 +2271,8 @@ void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
int dump_once = 0; int dump_once = 0;
// For all instructions // For all instructions
for( uint j = 0; j < block->_nodes.size(); j++ ) { for( uint j = 0; j < block->number_of_nodes(); j++ ) {
Node *n = block->_nodes[j]; Node *n = block->get_node(j);
if (_lrg_map.find_const(n) == lidx) { if (_lrg_map.find_const(n) == lidx) {
if (!dump_once++) { if (!dump_once++) {
tty->cr(); tty->cr();

View file

@ -54,9 +54,9 @@ void PhaseCoalesce::dump() const {
for( j=0; j<b->_num_succs; j++ ) for( j=0; j<b->_num_succs; j++ )
tty->print("B%d ",b->_succs[j]->_pre_order); tty->print("B%d ",b->_succs[j]->_pre_order);
tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth); tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth);
uint cnt = b->_nodes.size(); uint cnt = b->number_of_nodes();
for( j=0; j<cnt; j++ ) { for( j=0; j<cnt; j++ ) {
Node *n = b->_nodes[j]; Node *n = b->get_node(j);
dump( n ); dump( n );
tty->print("\t%s\t",n->Name()); tty->print("\t%s\t",n->Name());
@ -152,7 +152,7 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui
// after the last use. Last use is really first-use on a backwards scan. // after the last use. Last use is really first-use on a backwards scan.
uint i = b->end_idx()-1; uint i = b->end_idx()-1;
while(1) { while(1) {
Node *n = b->_nodes[i]; Node *n = b->get_node(i);
// Check for end of virtual copies; this is also the end of the // Check for end of virtual copies; this is also the end of the
// parallel renaming effort. // parallel renaming effort.
if (n->_idx < _unique) { if (n->_idx < _unique) {
@ -174,7 +174,7 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui
// the last kill. Thus it is the first kill on a backwards scan. // the last kill. Thus it is the first kill on a backwards scan.
i = b->end_idx()-1; i = b->end_idx()-1;
while (1) { while (1) {
Node *n = b->_nodes[i]; Node *n = b->get_node(i);
// Check for end of virtual copies; this is also the end of the // Check for end of virtual copies; this is also the end of the
// parallel renaming effort. // parallel renaming effort.
if (n->_idx < _unique) { if (n->_idx < _unique) {
@ -200,13 +200,13 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui
tmp ->set_req(idx,copy->in(idx)); tmp ->set_req(idx,copy->in(idx));
copy->set_req(idx,tmp); copy->set_req(idx,tmp);
// Save source in temp early, before source is killed // Save source in temp early, before source is killed
b->_nodes.insert(kill_src_idx,tmp); b->insert_node(tmp, kill_src_idx);
_phc._cfg.map_node_to_block(tmp, b); _phc._cfg.map_node_to_block(tmp, b);
last_use_idx++; last_use_idx++;
} }
// Insert just after last use // Insert just after last use
b->_nodes.insert(last_use_idx+1,copy); b->insert_node(copy, last_use_idx + 1);
} }
void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
@ -237,8 +237,8 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
Block *b = _phc._cfg.get_block(i); Block *b = _phc._cfg.get_block(i);
uint cnt = b->num_preds(); // Number of inputs to the Phi uint cnt = b->num_preds(); // Number of inputs to the Phi
for( uint l = 1; l<b->_nodes.size(); l++ ) { for( uint l = 1; l<b->number_of_nodes(); l++ ) {
Node *n = b->_nodes[l]; Node *n = b->get_node(l);
// Do not use removed-copies, use copied value instead // Do not use removed-copies, use copied value instead
uint ncnt = n->req(); uint ncnt = n->req();
@ -260,7 +260,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) { if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) {
n->replace_by(def); n->replace_by(def);
n->set_req(cidx,NULL); n->set_req(cidx,NULL);
b->_nodes.remove(l); b->remove_node(l);
l--; l--;
continue; continue;
} }
@ -321,13 +321,13 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
m->as_Mach()->rematerialize()) { m->as_Mach()->rematerialize()) {
copy = m->clone(); copy = m->clone();
// Insert the copy in the basic block, just before us // Insert the copy in the basic block, just before us
b->_nodes.insert(l++, copy); b->insert_node(copy, l++);
l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map); l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map);
} else { } else {
const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()]; const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
copy = new (C) MachSpillCopyNode(m, *rm, *rm); copy = new (C) MachSpillCopyNode(m, *rm, *rm);
// Insert the copy in the basic block, just before us // Insert the copy in the basic block, just before us
b->_nodes.insert(l++, copy); b->insert_node(copy, l++);
} }
// Insert the copy in the use-def chain // Insert the copy in the use-def chain
n->set_req(idx, copy); n->set_req(idx, copy);
@ -339,7 +339,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
} // End of is two-adr } // End of is two-adr
// Insert a copy at a debug use for a lrg which has high frequency // Insert a copy at a debug use for a lrg which has high frequency
if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(&_phc._cfg)) { if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || _phc._cfg.is_uncommon(b)) {
// Walk the debug inputs to the node and check for lrg freq // Walk the debug inputs to the node and check for lrg freq
JVMState* jvms = n->jvms(); JVMState* jvms = n->jvms();
uint debug_start = jvms ? jvms->debug_start() : 999999; uint debug_start = jvms ? jvms->debug_start() : 999999;
@ -376,7 +376,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
// Insert the copy in the use-def chain // Insert the copy in the use-def chain
n->set_req(inpidx, copy ); n->set_req(inpidx, copy );
// Insert the copy in the basic block, just before us // Insert the copy in the basic block, just before us
b->_nodes.insert( l++, copy ); b->insert_node(copy, l++);
// Extend ("register allocate") the names array for the copy. // Extend ("register allocate") the names array for the copy.
uint max_lrg_id = _phc._lrg_map.max_lrg_id(); uint max_lrg_id = _phc._lrg_map.max_lrg_id();
_phc.new_lrg(copy, max_lrg_id); _phc.new_lrg(copy, max_lrg_id);
@ -431,8 +431,8 @@ void PhaseAggressiveCoalesce::coalesce( Block *b ) {
} }
// Visit all the Phis in successor block // Visit all the Phis in successor block
for( uint k = 1; k<bs->_nodes.size(); k++ ) { for( uint k = 1; k<bs->number_of_nodes(); k++ ) {
Node *n = bs->_nodes[k]; Node *n = bs->get_node(k);
if( !n->is_Phi() ) break; if( !n->is_Phi() ) break;
combine_these_two( n, n->in(j) ); combine_these_two( n, n->in(j) );
} }
@ -442,7 +442,7 @@ void PhaseAggressiveCoalesce::coalesce( Block *b ) {
// Check _this_ block for 2-address instructions and copies. // Check _this_ block for 2-address instructions and copies.
uint cnt = b->end_idx(); uint cnt = b->end_idx();
for( i = 1; i<cnt; i++ ) { for( i = 1; i<cnt; i++ ) {
Node *n = b->_nodes[i]; Node *n = b->get_node(i);
uint idx; uint idx;
// 2-address instructions have a virtual Copy matching their input // 2-address instructions have a virtual Copy matching their input
// to their output // to their output
@ -490,10 +490,10 @@ void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, ui
dst_copy->set_req( didx, src_def ); dst_copy->set_req( didx, src_def );
// Add copy to free list // Add copy to free list
// _phc.free_spillcopy(b->_nodes[bindex]); // _phc.free_spillcopy(b->_nodes[bindex]);
assert( b->_nodes[bindex] == dst_copy, "" ); assert( b->get_node(bindex) == dst_copy, "" );
dst_copy->replace_by( dst_copy->in(didx) ); dst_copy->replace_by( dst_copy->in(didx) );
dst_copy->set_req( didx, NULL); dst_copy->set_req( didx, NULL);
b->_nodes.remove(bindex); b->remove_node(bindex);
if( bindex < b->_ihrp_index ) b->_ihrp_index--; if( bindex < b->_ihrp_index ) b->_ihrp_index--;
if( bindex < b->_fhrp_index ) b->_fhrp_index--; if( bindex < b->_fhrp_index ) b->_fhrp_index--;
@ -523,8 +523,8 @@ uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy,
bindex2 = b2->end_idx()-1; bindex2 = b2->end_idx()-1;
} }
// Get prior instruction // Get prior instruction
assert(bindex2 < b2->_nodes.size(), "index out of bounds"); assert(bindex2 < b2->number_of_nodes(), "index out of bounds");
Node *x = b2->_nodes[bindex2]; Node *x = b2->get_node(bindex2);
if( x == prev_copy ) { // Previous copy in copy chain? if( x == prev_copy ) { // Previous copy in copy chain?
if( prev_copy == src_copy)// Found end of chain and all interferences if( prev_copy == src_copy)// Found end of chain and all interferences
break; // So break out of loop break; // So break out of loop
@ -769,14 +769,14 @@ bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block
// Conservative (but pessimistic) copy coalescing of a single block // Conservative (but pessimistic) copy coalescing of a single block
void PhaseConservativeCoalesce::coalesce( Block *b ) { void PhaseConservativeCoalesce::coalesce( Block *b ) {
// Bail out on infrequent blocks // Bail out on infrequent blocks
if (b->is_uncommon(&_phc._cfg)) { if (_phc._cfg.is_uncommon(b)) {
return; return;
} }
// Check this block for copies. // Check this block for copies.
for( uint i = 1; i<b->end_idx(); i++ ) { for( uint i = 1; i<b->end_idx(); i++ ) {
// Check for actual copies on inputs. Coalesce a copy into its // Check for actual copies on inputs. Coalesce a copy into its
// input if use and copy's input are compatible. // input if use and copy's input are compatible.
Node *copy1 = b->_nodes[i]; Node *copy1 = b->get_node(i);
uint idx1 = copy1->is_Copy(); uint idx1 = copy1->is_Copy();
if( !idx1 ) continue; // Not a copy if( !idx1 ) continue; // Not a copy

View file

@ -1297,6 +1297,10 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
// Array pointers need some flattening // Array pointers need some flattening
const TypeAryPtr *ta = tj->isa_aryptr(); const TypeAryPtr *ta = tj->isa_aryptr();
if (ta && ta->is_stable()) {
// Erase stability property for alias analysis.
tj = ta = ta->cast_to_stable(false);
}
if( ta && is_known_inst ) { if( ta && is_known_inst ) {
if ( offset != Type::OffsetBot && if ( offset != Type::OffsetBot &&
offset > arrayOopDesc::length_offset_in_bytes() ) { offset > arrayOopDesc::length_offset_in_bytes() ) {
@ -1497,6 +1501,7 @@ void Compile::AliasType::Init(int i, const TypePtr* at) {
_index = i; _index = i;
_adr_type = at; _adr_type = at;
_field = NULL; _field = NULL;
_element = NULL;
_is_rewritable = true; // default _is_rewritable = true; // default
const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL; const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
if (atoop != NULL && atoop->is_known_instance()) { if (atoop != NULL && atoop->is_known_instance()) {
@ -1615,6 +1620,16 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr
&& flat->is_instptr()->klass() == env()->Class_klass()) && flat->is_instptr()->klass() == env()->Class_klass())
alias_type(idx)->set_rewritable(false); alias_type(idx)->set_rewritable(false);
} }
if (flat->isa_aryptr()) {
#ifdef ASSERT
const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
// (T_BYTE has the weakest alignment and size restrictions...)
assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
#endif
if (flat->offset() == TypePtr::OffsetBot) {
alias_type(idx)->set_element(flat->is_aryptr()->elem());
}
}
if (flat->isa_klassptr()) { if (flat->isa_klassptr()) {
if (flat->offset() == in_bytes(Klass::super_check_offset_offset())) if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
alias_type(idx)->set_rewritable(false); alias_type(idx)->set_rewritable(false);
@ -1677,7 +1692,7 @@ Compile::AliasType* Compile::alias_type(ciField* field) {
else else
t = TypeOopPtr::make_from_klass_raw(field->holder()); t = TypeOopPtr::make_from_klass_raw(field->holder());
AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field); AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct"); assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
return atp; return atp;
} }
@ -2258,7 +2273,7 @@ void Compile::dump_asm(int *pcs, uint pc_limit) {
if (block->is_connector() && !Verbose) { if (block->is_connector() && !Verbose) {
continue; continue;
} }
n = block->_nodes[0]; n = block->head();
if (pcs && n->_idx < pc_limit) { if (pcs && n->_idx < pc_limit) {
tty->print("%3.3x ", pcs[n->_idx]); tty->print("%3.3x ", pcs[n->_idx]);
} else { } else {
@ -2273,12 +2288,12 @@ void Compile::dump_asm(int *pcs, uint pc_limit) {
// For all instructions // For all instructions
Node *delay = NULL; Node *delay = NULL;
for (uint j = 0; j < block->_nodes.size(); j++) { for (uint j = 0; j < block->number_of_nodes(); j++) {
if (VMThread::should_terminate()) { if (VMThread::should_terminate()) {
cut_short = true; cut_short = true;
break; break;
} }
n = block->_nodes[j]; n = block->get_node(j);
if (valid_bundle_info(n)) { if (valid_bundle_info(n)) {
Bundle* bundle = node_bundling(n); Bundle* bundle = node_bundling(n);
if (bundle->used_in_unconditional_delay()) { if (bundle->used_in_unconditional_delay()) {

View file

@ -72,6 +72,7 @@ class Scope;
class StartNode; class StartNode;
class SafePointNode; class SafePointNode;
class JVMState; class JVMState;
class Type;
class TypeData; class TypeData;
class TypePtr; class TypePtr;
class TypeOopPtr; class TypeOopPtr;
@ -119,6 +120,7 @@ class Compile : public Phase {
int _index; // unique index, used with MergeMemNode int _index; // unique index, used with MergeMemNode
const TypePtr* _adr_type; // normalized address type const TypePtr* _adr_type; // normalized address type
ciField* _field; // relevant instance field, or null if none ciField* _field; // relevant instance field, or null if none
const Type* _element; // relevant array element type, or null if none
bool _is_rewritable; // false if the memory is write-once only bool _is_rewritable; // false if the memory is write-once only
int _general_index; // if this is type is an instance, the general int _general_index; // if this is type is an instance, the general
// type that this is an instance of // type that this is an instance of
@ -129,6 +131,7 @@ class Compile : public Phase {
int index() const { return _index; } int index() const { return _index; }
const TypePtr* adr_type() const { return _adr_type; } const TypePtr* adr_type() const { return _adr_type; }
ciField* field() const { return _field; } ciField* field() const { return _field; }
const Type* element() const { return _element; }
bool is_rewritable() const { return _is_rewritable; } bool is_rewritable() const { return _is_rewritable; }
bool is_volatile() const { return (_field ? _field->is_volatile() : false); } bool is_volatile() const { return (_field ? _field->is_volatile() : false); }
int general_index() const { return (_general_index != 0) ? _general_index : _index; } int general_index() const { return (_general_index != 0) ? _general_index : _index; }
@ -137,7 +140,14 @@ class Compile : public Phase {
void set_field(ciField* f) { void set_field(ciField* f) {
assert(!_field,""); assert(!_field,"");
_field = f; _field = f;
if (f->is_final()) _is_rewritable = false; if (f->is_final() || f->is_stable()) {
// In the case of @Stable, multiple writes are possible but may be assumed to be no-ops.
_is_rewritable = false;
}
}
void set_element(const Type* e) {
assert(_element == NULL, "");
_element = e;
} }
void print_on(outputStream* st) PRODUCT_RETURN; void print_on(outputStream* st) PRODUCT_RETURN;

View file

@ -211,21 +211,21 @@ class Block_Stack {
uint Block_Stack::most_frequent_successor( Block *b ) { uint Block_Stack::most_frequent_successor( Block *b ) {
uint freq_idx = 0; uint freq_idx = 0;
int eidx = b->end_idx(); int eidx = b->end_idx();
Node *n = b->_nodes[eidx]; Node *n = b->get_node(eidx);
int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode(); int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode();
switch( op ) { switch( op ) {
case Op_CountedLoopEnd: case Op_CountedLoopEnd:
case Op_If: { // Split frequency amongst children case Op_If: { // Split frequency amongst children
float prob = n->as_MachIf()->_prob; float prob = n->as_MachIf()->_prob;
// Is succ[0] the TRUE branch or the FALSE branch? // Is succ[0] the TRUE branch or the FALSE branch?
if( b->_nodes[eidx+1]->Opcode() == Op_IfFalse ) if( b->get_node(eidx+1)->Opcode() == Op_IfFalse )
prob = 1.0f - prob; prob = 1.0f - prob;
freq_idx = prob < PROB_FAIR; // freq=1 for succ[0] < 0.5 prob freq_idx = prob < PROB_FAIR; // freq=1 for succ[0] < 0.5 prob
break; break;
} }
case Op_Catch: // Split frequency amongst children case Op_Catch: // Split frequency amongst children
for( freq_idx = 0; freq_idx < b->_num_succs; freq_idx++ ) for( freq_idx = 0; freq_idx < b->_num_succs; freq_idx++ )
if( b->_nodes[eidx+1+freq_idx]->as_CatchProj()->_con == CatchProjNode::fall_through_index ) if( b->get_node(eidx+1+freq_idx)->as_CatchProj()->_con == CatchProjNode::fall_through_index )
break; break;
// Handle case of no fall-thru (e.g., check-cast MUST throw an exception) // Handle case of no fall-thru (e.g., check-cast MUST throw an exception)
if( freq_idx == b->_num_succs ) freq_idx = 0; if( freq_idx == b->_num_succs ) freq_idx = 0;

View file

@ -102,12 +102,12 @@ void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
uint j = 0; uint j = 0;
if (pb->_num_succs != 1) { // More then 1 successor? if (pb->_num_succs != 1) { // More then 1 successor?
// Search for successor // Search for successor
uint max = pb->_nodes.size(); uint max = pb->number_of_nodes();
assert( max > 1, "" ); assert( max > 1, "" );
uint start = max - pb->_num_succs; uint start = max - pb->_num_succs;
// Find which output path belongs to projection // Find which output path belongs to projection
for (j = start; j < max; j++) { for (j = start; j < max; j++) {
if( pb->_nodes[j] == in0 ) if( pb->get_node(j) == in0 )
break; break;
} }
assert( j < max, "must find" ); assert( j < max, "must find" );
@ -1027,8 +1027,8 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
Block* least = LCA; Block* least = LCA;
double least_freq = least->_freq; double least_freq = least->_freq;
uint target = get_latency_for_node(self); uint target = get_latency_for_node(self);
uint start_latency = get_latency_for_node(LCA->_nodes[0]); uint start_latency = get_latency_for_node(LCA->head());
uint end_latency = get_latency_for_node(LCA->_nodes[LCA->end_idx()]); uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx()));
bool in_latency = (target <= start_latency); bool in_latency = (target <= start_latency);
const Block* root_block = get_block_for_node(_root); const Block* root_block = get_block_for_node(_root);
@ -1049,9 +1049,9 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
self->dump(); self->dump();
tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
LCA->_pre_order, LCA->_pre_order,
LCA->_nodes[0]->_idx, LCA->head()->_idx,
start_latency, start_latency,
LCA->_nodes[LCA->end_idx()]->_idx, LCA->get_node(LCA->end_idx())->_idx,
end_latency, end_latency,
least_freq); least_freq);
} }
@ -1074,14 +1074,14 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
if (mach && LCA == root_block) if (mach && LCA == root_block)
break; break;
uint start_lat = get_latency_for_node(LCA->_nodes[0]); uint start_lat = get_latency_for_node(LCA->head());
uint end_idx = LCA->end_idx(); uint end_idx = LCA->end_idx();
uint end_lat = get_latency_for_node(LCA->_nodes[end_idx]); uint end_lat = get_latency_for_node(LCA->get_node(end_idx));
double LCA_freq = LCA->_freq; double LCA_freq = LCA->_freq;
#ifndef PRODUCT #ifndef PRODUCT
if (trace_opto_pipelining()) { if (trace_opto_pipelining()) {
tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq); LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
} }
#endif #endif
cand_cnt++; cand_cnt++;
@ -1342,7 +1342,7 @@ void PhaseCFG::global_code_motion() {
Node* proj = _matcher._null_check_tests[i]; Node* proj = _matcher._null_check_tests[i];
Node* val = _matcher._null_check_tests[i + 1]; Node* val = _matcher._null_check_tests[i + 1];
Block* block = get_block_for_node(proj); Block* block = get_block_for_node(proj);
block->implicit_null_check(this, proj, val, allowed_reasons); implicit_null_check(block, proj, val, allowed_reasons);
// The implicit_null_check will only perform the transformation // The implicit_null_check will only perform the transformation
// if the null branch is truly uncommon, *and* it leads to an // if the null branch is truly uncommon, *and* it leads to an
// uncommon trap. Combined with the too_many_traps guards // uncommon trap. Combined with the too_many_traps guards
@ -1363,7 +1363,7 @@ void PhaseCFG::global_code_motion() {
visited.Clear(); visited.Clear();
for (uint i = 0; i < number_of_blocks(); i++) { for (uint i = 0; i < number_of_blocks(); i++) {
Block* block = get_block(i); Block* block = get_block(i);
if (!block->schedule_local(this, _matcher, ready_cnt, visited)) { if (!schedule_local(block, ready_cnt, visited)) {
if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
C->record_method_not_compilable("local schedule failed"); C->record_method_not_compilable("local schedule failed");
} }
@ -1375,7 +1375,7 @@ void PhaseCFG::global_code_motion() {
// clone the instructions on all paths below the Catch. // clone the instructions on all paths below the Catch.
for (uint i = 0; i < number_of_blocks(); i++) { for (uint i = 0; i < number_of_blocks(); i++) {
Block* block = get_block(i); Block* block = get_block(i);
block->call_catch_cleanup(this, C); call_catch_cleanup(block);
} }
#ifndef PRODUCT #ifndef PRODUCT
@ -1726,7 +1726,7 @@ void CFGLoop::compute_freq() {
// Determine the probability of reaching successor 'i' from the receiver block. // Determine the probability of reaching successor 'i' from the receiver block.
float Block::succ_prob(uint i) { float Block::succ_prob(uint i) {
int eidx = end_idx(); int eidx = end_idx();
Node *n = _nodes[eidx]; // Get ending Node Node *n = get_node(eidx); // Get ending Node
int op = n->Opcode(); int op = n->Opcode();
if (n->is_Mach()) { if (n->is_Mach()) {
@ -1761,7 +1761,7 @@ float Block::succ_prob(uint i) {
float prob = n->as_MachIf()->_prob; float prob = n->as_MachIf()->_prob;
assert(prob >= 0.0 && prob <= 1.0, "out of range probability"); assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
// If succ[i] is the FALSE branch, invert path info // If succ[i] is the FALSE branch, invert path info
if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) { if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
return 1.0f - prob; // not taken return 1.0f - prob; // not taken
} else { } else {
return prob; // taken return prob; // taken
@ -1773,7 +1773,7 @@ float Block::succ_prob(uint i) {
return 1.0f/_num_succs; return 1.0f/_num_succs;
case Op_Catch: { case Op_Catch: {
const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj(); const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
if (ci->_con == CatchProjNode::fall_through_index) { if (ci->_con == CatchProjNode::fall_through_index) {
// Fall-thru path gets the lion's share. // Fall-thru path gets the lion's share.
return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs; return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
@ -1810,7 +1810,7 @@ float Block::succ_prob(uint i) {
// Return the number of fall-through candidates for a block // Return the number of fall-through candidates for a block
int Block::num_fall_throughs() { int Block::num_fall_throughs() {
int eidx = end_idx(); int eidx = end_idx();
Node *n = _nodes[eidx]; // Get ending Node Node *n = get_node(eidx); // Get ending Node
int op = n->Opcode(); int op = n->Opcode();
if (n->is_Mach()) { if (n->is_Mach()) {
@ -1834,7 +1834,7 @@ int Block::num_fall_throughs() {
case Op_Catch: { case Op_Catch: {
for (uint i = 0; i < _num_succs; i++) { for (uint i = 0; i < _num_succs; i++) {
const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj(); const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
if (ci->_con == CatchProjNode::fall_through_index) { if (ci->_con == CatchProjNode::fall_through_index) {
return 1; return 1;
} }
@ -1862,14 +1862,14 @@ int Block::num_fall_throughs() {
// Return true if a specific successor could be fall-through target. // Return true if a specific successor could be fall-through target.
bool Block::succ_fall_through(uint i) { bool Block::succ_fall_through(uint i) {
int eidx = end_idx(); int eidx = end_idx();
Node *n = _nodes[eidx]; // Get ending Node Node *n = get_node(eidx); // Get ending Node
int op = n->Opcode(); int op = n->Opcode();
if (n->is_Mach()) { if (n->is_Mach()) {
if (n->is_MachNullCheck()) { if (n->is_MachNullCheck()) {
// In theory, either side can fall-thru, for simplicity sake, // In theory, either side can fall-thru, for simplicity sake,
// let's say only the false branch can now. // let's say only the false branch can now.
return _nodes[i + eidx + 1]->Opcode() == Op_IfFalse; return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
} }
op = n->as_Mach()->ideal_Opcode(); op = n->as_Mach()->ideal_Opcode();
} }
@ -1883,7 +1883,7 @@ bool Block::succ_fall_through(uint i) {
return true; return true;
case Op_Catch: { case Op_Catch: {
const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj(); const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
return ci->_con == CatchProjNode::fall_through_index; return ci->_con == CatchProjNode::fall_through_index;
} }
@ -1907,7 +1907,7 @@ bool Block::succ_fall_through(uint i) {
// Update the probability of a two-branch to be uncommon // Update the probability of a two-branch to be uncommon
void Block::update_uncommon_branch(Block* ub) { void Block::update_uncommon_branch(Block* ub) {
int eidx = end_idx(); int eidx = end_idx();
Node *n = _nodes[eidx]; // Get ending Node Node *n = get_node(eidx); // Get ending Node
int op = n->as_Mach()->ideal_Opcode(); int op = n->as_Mach()->ideal_Opcode();
@ -1923,7 +1923,7 @@ void Block::update_uncommon_branch(Block* ub) {
// If ub is the true path, make the proability small, else // If ub is the true path, make the proability small, else
// ub is the false path, and make the probability large // ub is the false path, and make the probability large
bool invert = (_nodes[s + eidx + 1]->Opcode() == Op_IfFalse); bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
// Get existing probability // Get existing probability
float p = n->as_MachIf()->_prob; float p = n->as_MachIf()->_prob;

View file

@ -61,6 +61,7 @@ void GraphKit::gen_stub(address C_function,
JVMState* jvms = new (C) JVMState(0); JVMState* jvms = new (C) JVMState(0);
jvms->set_bci(InvocationEntryBci); jvms->set_bci(InvocationEntryBci);
jvms->set_monoff(max_map); jvms->set_monoff(max_map);
jvms->set_scloff(max_map);
jvms->set_endoff(max_map); jvms->set_endoff(max_map);
{ {
SafePointNode *map = new (C) SafePointNode( max_map, jvms ); SafePointNode *map = new (C) SafePointNode( max_map, jvms );

View file

@ -1501,6 +1501,25 @@ void GraphKit::pre_barrier(bool do_load,
} }
} }
bool GraphKit::can_move_pre_barrier() const {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
return true; // Can move it if no safepoint
case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension:
case BarrierSet::ModRef:
return true; // There is no pre-barrier
case BarrierSet::Other:
default :
ShouldNotReachHere();
}
return false;
}
void GraphKit::post_barrier(Node* ctl, void GraphKit::post_barrier(Node* ctl,
Node* store, Node* store,
Node* obj, Node* obj,
@ -3551,6 +3570,8 @@ void GraphKit::g1_write_barrier_pre(bool do_load,
} else { } else {
// In this case both val_type and alias_idx are unused. // In this case both val_type and alias_idx are unused.
assert(pre_val != NULL, "must be loaded already"); assert(pre_val != NULL, "must be loaded already");
// Nothing to be done if pre_val is null.
if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here"); assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
} }
assert(bt == T_OBJECT, "or we shouldn't be here"); assert(bt == T_OBJECT, "or we shouldn't be here");
@ -3595,7 +3616,7 @@ void GraphKit::g1_write_barrier_pre(bool do_load,
if (do_load) { if (do_load) {
// load original value // load original value
// alias_idx correct?? // alias_idx correct??
pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx); pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
} }
// if (pre_val != NULL) // if (pre_val != NULL)
@ -3804,8 +3825,13 @@ Node* GraphKit::load_String_value(Node* ctrl, Node* str) {
TypeAry::make(TypeInt::CHAR,TypeInt::POS), TypeAry::make(TypeInt::CHAR,TypeInt::POS),
ciTypeArrayKlass::make(T_CHAR), true, 0); ciTypeArrayKlass::make(T_CHAR), true, 0);
int value_field_idx = C->get_alias_index(value_field_type); int value_field_idx = C->get_alias_index(value_field_type);
return make_load(ctrl, basic_plus_adr(str, str, value_offset), Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
value_type, T_OBJECT, value_field_idx); value_type, T_OBJECT, value_field_idx);
// String.value field is known to be @Stable.
if (UseImplicitStableValues) {
load = cast_array_to_stable(load, value_type);
}
return load;
} }
void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) { void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
@ -3823,9 +3849,6 @@ void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
false, NULL, 0); false, NULL, 0);
const TypePtr* value_field_type = string_type->add_offset(value_offset); const TypePtr* value_field_type = string_type->add_offset(value_offset);
const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
TypeAry::make(TypeInt::CHAR,TypeInt::POS),
ciTypeArrayKlass::make(T_CHAR), true, 0);
int value_field_idx = C->get_alias_index(value_field_type); int value_field_idx = C->get_alias_index(value_field_type);
store_to_memory(ctrl, basic_plus_adr(str, value_offset), store_to_memory(ctrl, basic_plus_adr(str, value_offset),
value, T_OBJECT, value_field_idx); value, T_OBJECT, value_field_idx);
@ -3840,3 +3863,9 @@ void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) {
store_to_memory(ctrl, basic_plus_adr(str, count_offset), store_to_memory(ctrl, basic_plus_adr(str, count_offset),
value, T_INT, count_field_idx); value, T_INT, count_field_idx);
} }
Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
// Reify the property as a CastPP node in Ideal graph to comply with monotonicity
// assumption of CCP analysis.
return _gvn.transform(new(C) CastPPNode(ary, ary_type->cast_to_stable(true)));
}

View file

@ -695,6 +695,10 @@ class GraphKit : public Phase {
void write_barrier_post(Node *store, Node* obj, void write_barrier_post(Node *store, Node* obj,
Node* adr, uint adr_idx, Node* val, bool use_precise); Node* adr, uint adr_idx, Node* val, bool use_precise);
// Allow reordering of pre-barrier with oop store and/or post-barrier.
// Used for load_store operations which loads old value.
bool can_move_pre_barrier() const;
// G1 pre/post barriers // G1 pre/post barriers
void g1_write_barrier_pre(bool do_load, void g1_write_barrier_pre(bool do_load,
Node* obj, Node* obj,
@ -832,6 +836,9 @@ class GraphKit : public Phase {
// Insert a loop predicate into the graph // Insert a loop predicate into the graph
void add_predicate(int nargs = 0); void add_predicate(int nargs = 0);
void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs); void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
// Produce new array node of stable type
Node* cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type);
}; };
// Helper class to support building of control flow branches. Upon // Helper class to support building of control flow branches. Upon

View file

@ -639,8 +639,8 @@ void IdealGraphPrinter::walk_nodes(Node *start, bool edges, VectorSet* temp_set)
// reachable but are in the CFG so add them here. // reachable but are in the CFG so add them here.
for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) { for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
Block* block = C->cfg()->get_block(i); Block* block = C->cfg()->get_block(i);
for (uint s = 0; s < block->_nodes.size(); s++) { for (uint s = 0; s < block->number_of_nodes(); s++) {
nodeStack.push(block->_nodes[s]); nodeStack.push(block->get_node(s));
} }
} }
} }
@ -713,9 +713,9 @@ void IdealGraphPrinter::print(Compile* compile, const char *name, Node *node, in
tail(SUCCESSORS_ELEMENT); tail(SUCCESSORS_ELEMENT);
head(NODES_ELEMENT); head(NODES_ELEMENT);
for (uint s = 0; s < block->_nodes.size(); s++) { for (uint s = 0; s < block->number_of_nodes(); s++) {
begin_elem(NODE_ELEMENT); begin_elem(NODE_ELEMENT);
print_attr(NODE_ID_PROPERTY, get_node_id(block->_nodes[s])); print_attr(NODE_ID_PROPERTY, get_node_id(block->get_node(s)));
end_elem(); end_elem();
} }
tail(NODES_ELEMENT); tail(NODES_ELEMENT);

View file

@ -319,7 +319,7 @@ void PhaseChaitin::build_ifg_virtual( ) {
// value is then removed from the live-ness set and it's inputs are // value is then removed from the live-ness set and it's inputs are
// added to the live-ness set. // added to the live-ness set.
for (uint j = block->end_idx() + 1; j > 1; j--) { for (uint j = block->end_idx() + 1; j > 1; j--) {
Node* n = block->_nodes[j - 1]; Node* n = block->get_node(j - 1);
// Get value being defined // Get value being defined
uint r = _lrg_map.live_range_id(n); uint r = _lrg_map.live_range_id(n);
@ -456,7 +456,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// Compute first nonphi node index // Compute first nonphi node index
uint first_inst; uint first_inst;
for (first_inst = 1; first_inst < last_inst; first_inst++) { for (first_inst = 1; first_inst < last_inst; first_inst++) {
if (!block->_nodes[first_inst]->is_Phi()) { if (!block->get_node(first_inst)->is_Phi()) {
break; break;
} }
} }
@ -464,15 +464,15 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// Spills could be inserted before CreateEx node which should be // Spills could be inserted before CreateEx node which should be
// first instruction in block after Phis. Move CreateEx up. // first instruction in block after Phis. Move CreateEx up.
for (uint insidx = first_inst; insidx < last_inst; insidx++) { for (uint insidx = first_inst; insidx < last_inst; insidx++) {
Node *ex = block->_nodes[insidx]; Node *ex = block->get_node(insidx);
if (ex->is_SpillCopy()) { if (ex->is_SpillCopy()) {
continue; continue;
} }
if (insidx > first_inst && ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) { if (insidx > first_inst && ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) {
// If the CreateEx isn't above all the MachSpillCopies // If the CreateEx isn't above all the MachSpillCopies
// then move it to the top. // then move it to the top.
block->_nodes.remove(insidx); block->remove_node(insidx);
block->_nodes.insert(first_inst, ex); block->insert_node(ex, first_inst);
} }
// Stop once a CreateEx or any other node is found // Stop once a CreateEx or any other node is found
break; break;
@ -523,7 +523,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// to the live-ness set. // to the live-ness set.
uint j; uint j;
for (j = last_inst + 1; j > 1; j--) { for (j = last_inst + 1; j > 1; j--) {
Node* n = block->_nodes[j - 1]; Node* n = block->get_node(j - 1);
// Get value being defined // Get value being defined
uint r = _lrg_map.live_range_id(n); uint r = _lrg_map.live_range_id(n);
@ -541,7 +541,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if( !n->is_Proj() || if( !n->is_Proj() ||
// Could also be a flags-projection of a dead ADD or such. // Could also be a flags-projection of a dead ADD or such.
(_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) { (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
block->_nodes.remove(j - 1); block->remove_node(j - 1);
if (lrgs(r)._def == n) { if (lrgs(r)._def == n) {
lrgs(r)._def = 0; lrgs(r)._def = 0;
} }
@ -605,7 +605,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// (j - 1) is index for current instruction 'n' // (j - 1) is index for current instruction 'n'
Node *m = n; Node *m = n;
for (uint i = j; i <= last_inst && m->is_SpillCopy(); ++i) { for (uint i = j; i <= last_inst && m->is_SpillCopy(); ++i) {
m = block->_nodes[i]; m = block->get_node(i);
} }
if (m == single_use) { if (m == single_use) {
lrgs(r)._area = 0.0; lrgs(r)._area = 0.0;
@ -772,20 +772,20 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// Compute high pressure indice; avoid landing in the middle of projnodes // Compute high pressure indice; avoid landing in the middle of projnodes
j = hrp_index[0]; j = hrp_index[0];
if (j < block->_nodes.size() && j < block->end_idx() + 1) { if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
Node* cur = block->_nodes[j]; Node* cur = block->get_node(j);
while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) { while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
j--; j--;
cur = block->_nodes[j]; cur = block->get_node(j);
} }
} }
block->_ihrp_index = j; block->_ihrp_index = j;
j = hrp_index[1]; j = hrp_index[1];
if (j < block->_nodes.size() && j < block->end_idx() + 1) { if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
Node* cur = block->_nodes[j]; Node* cur = block->get_node(j);
while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) { while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
j--; j--;
cur = block->_nodes[j]; cur = block->get_node(j);
} }
} }
block->_fhrp_index = j; block->_fhrp_index = j;

View file

@ -58,14 +58,14 @@
// The proj is the control projection for the not-null case. // The proj is the control projection for the not-null case.
// The val is the pointer being checked for nullness or // The val is the pointer being checked for nullness or
// decodeHeapOop_not_null node if it did not fold into address. // decodeHeapOop_not_null node if it did not fold into address.
void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) { void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons) {
// Assume if null check need for 0 offset then always needed // Assume if null check need for 0 offset then always needed
// Intel solaris doesn't support any null checks yet and no // Intel solaris doesn't support any null checks yet and no
// mechanism exists (yet) to set the switches at an os_cpu level // mechanism exists (yet) to set the switches at an os_cpu level
if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return; if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return;
// Make sure the ptr-is-null path appears to be uncommon! // Make sure the ptr-is-null path appears to be uncommon!
float f = end()->as_MachIf()->_prob; float f = block->end()->as_MachIf()->_prob;
if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f; if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f;
if( f > PROB_UNLIKELY_MAG(4) ) return; if( f > PROB_UNLIKELY_MAG(4) ) return;
@ -75,13 +75,13 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// Get the successor block for if the test ptr is non-null // Get the successor block for if the test ptr is non-null
Block* not_null_block; // this one goes with the proj Block* not_null_block; // this one goes with the proj
Block* null_block; Block* null_block;
if (_nodes[_nodes.size()-1] == proj) { if (block->get_node(block->number_of_nodes()-1) == proj) {
null_block = _succs[0]; null_block = block->_succs[0];
not_null_block = _succs[1]; not_null_block = block->_succs[1];
} else { } else {
assert(_nodes[_nodes.size()-2] == proj, "proj is one or the other"); assert(block->get_node(block->number_of_nodes()-2) == proj, "proj is one or the other");
not_null_block = _succs[0]; not_null_block = block->_succs[0];
null_block = _succs[1]; null_block = block->_succs[1];
} }
while (null_block->is_Empty() == Block::empty_with_goto) { while (null_block->is_Empty() == Block::empty_with_goto) {
null_block = null_block->_succs[0]; null_block = null_block->_succs[0];
@ -93,8 +93,8 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// detect failure of this optimization, as in 6366351.) // detect failure of this optimization, as in 6366351.)
{ {
bool found_trap = false; bool found_trap = false;
for (uint i1 = 0; i1 < null_block->_nodes.size(); i1++) { for (uint i1 = 0; i1 < null_block->number_of_nodes(); i1++) {
Node* nn = null_block->_nodes[i1]; Node* nn = null_block->get_node(i1);
if (nn->is_MachCall() && if (nn->is_MachCall() &&
nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) { nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type(); const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type();
@ -237,20 +237,20 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
} }
// Check ctrl input to see if the null-check dominates the memory op // Check ctrl input to see if the null-check dominates the memory op
Block *cb = cfg->get_block_for_node(mach); Block *cb = get_block_for_node(mach);
cb = cb->_idom; // Always hoist at least 1 block cb = cb->_idom; // Always hoist at least 1 block
if( !was_store ) { // Stores can be hoisted only one block if( !was_store ) { // Stores can be hoisted only one block
while( cb->_dom_depth > (_dom_depth + 1)) while( cb->_dom_depth > (block->_dom_depth + 1))
cb = cb->_idom; // Hoist loads as far as we want cb = cb->_idom; // Hoist loads as far as we want
// The non-null-block should dominate the memory op, too. Live // The non-null-block should dominate the memory op, too. Live
// range spilling will insert a spill in the non-null-block if it is // range spilling will insert a spill in the non-null-block if it is
// needs to spill the memory op for an implicit null check. // needs to spill the memory op for an implicit null check.
if (cb->_dom_depth == (_dom_depth + 1)) { if (cb->_dom_depth == (block->_dom_depth + 1)) {
if (cb != not_null_block) continue; if (cb != not_null_block) continue;
cb = cb->_idom; cb = cb->_idom;
} }
} }
if( cb != this ) continue; if( cb != block ) continue;
// Found a memory user; see if it can be hoisted to check-block // Found a memory user; see if it can be hoisted to check-block
uint vidx = 0; // Capture index of value into memop uint vidx = 0; // Capture index of value into memop
@ -262,8 +262,8 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
if( is_decoden ) continue; if( is_decoden ) continue;
} }
// Block of memory-op input // Block of memory-op input
Block *inb = cfg->get_block_for_node(mach->in(j)); Block *inb = get_block_for_node(mach->in(j));
Block *b = this; // Start from nul check Block *b = block; // Start from nul check
while( b != inb && b->_dom_depth > inb->_dom_depth ) while( b != inb && b->_dom_depth > inb->_dom_depth )
b = b->_idom; // search upwards for input b = b->_idom; // search upwards for input
// See if input dominates null check // See if input dominates null check
@ -272,28 +272,28 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
} }
if( j > 0 ) if( j > 0 )
continue; continue;
Block *mb = cfg->get_block_for_node(mach); Block *mb = get_block_for_node(mach);
// Hoisting stores requires more checks for the anti-dependence case. // Hoisting stores requires more checks for the anti-dependence case.
// Give up hoisting if we have to move the store past any load. // Give up hoisting if we have to move the store past any load.
if( was_store ) { if( was_store ) {
Block *b = mb; // Start searching here for a local load Block *b = mb; // Start searching here for a local load
// mach use (faulting) trying to hoist // mach use (faulting) trying to hoist
// n might be blocker to hoisting // n might be blocker to hoisting
while( b != this ) { while( b != block ) {
uint k; uint k;
for( k = 1; k < b->_nodes.size(); k++ ) { for( k = 1; k < b->number_of_nodes(); k++ ) {
Node *n = b->_nodes[k]; Node *n = b->get_node(k);
if( n->needs_anti_dependence_check() && if( n->needs_anti_dependence_check() &&
n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) ) n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) )
break; // Found anti-dependent load break; // Found anti-dependent load
} }
if( k < b->_nodes.size() ) if( k < b->number_of_nodes() )
break; // Found anti-dependent load break; // Found anti-dependent load
// Make sure control does not do a merge (would have to check allpaths) // Make sure control does not do a merge (would have to check allpaths)
if( b->num_preds() != 2 ) break; if( b->num_preds() != 2 ) break;
b = cfg->get_block_for_node(b->pred(1)); // Move up to predecessor block b = get_block_for_node(b->pred(1)); // Move up to predecessor block
} }
if( b != this ) continue; if( b != block ) continue;
} }
// Make sure this memory op is not already being used for a NullCheck // Make sure this memory op is not already being used for a NullCheck
@ -303,7 +303,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// Found a candidate! Pick one with least dom depth - the highest // Found a candidate! Pick one with least dom depth - the highest
// in the dom tree should be closest to the null check. // in the dom tree should be closest to the null check.
if (best == NULL || cfg->get_block_for_node(mach)->_dom_depth < cfg->get_block_for_node(best)->_dom_depth) { if (best == NULL || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) {
best = mach; best = mach;
bidx = vidx; bidx = vidx;
} }
@ -319,46 +319,45 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
if( is_decoden ) { if( is_decoden ) {
// Check if we need to hoist decodeHeapOop_not_null first. // Check if we need to hoist decodeHeapOop_not_null first.
Block *valb = cfg->get_block_for_node(val); Block *valb = get_block_for_node(val);
if( this != valb && this->_dom_depth < valb->_dom_depth ) { if( block != valb && block->_dom_depth < valb->_dom_depth ) {
// Hoist it up to the end of the test block. // Hoist it up to the end of the test block.
valb->find_remove(val); valb->find_remove(val);
this->add_inst(val); block->add_inst(val);
cfg->map_node_to_block(val, this); map_node_to_block(val, block);
// DecodeN on x86 may kill flags. Check for flag-killing projections // DecodeN on x86 may kill flags. Check for flag-killing projections
// that also need to be hoisted. // that also need to be hoisted.
for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) { for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
Node* n = val->fast_out(j); Node* n = val->fast_out(j);
if( n->is_MachProj() ) { if( n->is_MachProj() ) {
cfg->get_block_for_node(n)->find_remove(n); get_block_for_node(n)->find_remove(n);
this->add_inst(n); block->add_inst(n);
cfg->map_node_to_block(n, this); map_node_to_block(n, block);
} }
} }
} }
} }
// Hoist the memory candidate up to the end of the test block. // Hoist the memory candidate up to the end of the test block.
Block *old_block = cfg->get_block_for_node(best); Block *old_block = get_block_for_node(best);
old_block->find_remove(best); old_block->find_remove(best);
add_inst(best); block->add_inst(best);
cfg->map_node_to_block(best, this); map_node_to_block(best, block);
// Move the control dependence // Move the control dependence
if (best->in(0) && best->in(0) == old_block->_nodes[0]) if (best->in(0) && best->in(0) == old_block->head())
best->set_req(0, _nodes[0]); best->set_req(0, block->head());
// Check for flag-killing projections that also need to be hoisted // Check for flag-killing projections that also need to be hoisted
// Should be DU safe because no edge updates. // Should be DU safe because no edge updates.
for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) { for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
Node* n = best->fast_out(j); Node* n = best->fast_out(j);
if( n->is_MachProj() ) { if( n->is_MachProj() ) {
cfg->get_block_for_node(n)->find_remove(n); get_block_for_node(n)->find_remove(n);
add_inst(n); block->add_inst(n);
cfg->map_node_to_block(n, this); map_node_to_block(n, block);
} }
} }
Compile *C = cfg->C;
// proj==Op_True --> ne test; proj==Op_False --> eq test. // proj==Op_True --> ne test; proj==Op_False --> eq test.
// One of two graph shapes got matched: // One of two graph shapes got matched:
// (IfTrue (If (Bool NE (CmpP ptr NULL)))) // (IfTrue (If (Bool NE (CmpP ptr NULL))))
@ -368,10 +367,10 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// We need to flip the projections to keep the same semantics. // We need to flip the projections to keep the same semantics.
if( proj->Opcode() == Op_IfTrue ) { if( proj->Opcode() == Op_IfTrue ) {
// Swap order of projections in basic block to swap branch targets // Swap order of projections in basic block to swap branch targets
Node *tmp1 = _nodes[end_idx()+1]; Node *tmp1 = block->get_node(block->end_idx()+1);
Node *tmp2 = _nodes[end_idx()+2]; Node *tmp2 = block->get_node(block->end_idx()+2);
_nodes.map(end_idx()+1, tmp2); block->map_node(tmp2, block->end_idx()+1);
_nodes.map(end_idx()+2, tmp1); block->map_node(tmp1, block->end_idx()+2);
Node *tmp = new (C) Node(C->top()); // Use not NULL input Node *tmp = new (C) Node(C->top()); // Use not NULL input
tmp1->replace_by(tmp); tmp1->replace_by(tmp);
tmp2->replace_by(tmp1); tmp2->replace_by(tmp1);
@ -384,8 +383,8 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// it as well. // it as well.
Node *old_tst = proj->in(0); Node *old_tst = proj->in(0);
MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx); MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
_nodes.map(end_idx(),nul_chk); block->map_node(nul_chk, block->end_idx());
cfg->map_node_to_block(nul_chk, this); map_node_to_block(nul_chk, block);
// Redirect users of old_test to nul_chk // Redirect users of old_test to nul_chk
for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2) for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
old_tst->last_out(i2)->set_req(0, nul_chk); old_tst->last_out(i2)->set_req(0, nul_chk);
@ -393,8 +392,8 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
for (uint i3 = 0; i3 < old_tst->req(); i3++) for (uint i3 = 0; i3 < old_tst->req(); i3++)
old_tst->set_req(i3, NULL); old_tst->set_req(i3, NULL);
cfg->latency_from_uses(nul_chk); latency_from_uses(nul_chk);
cfg->latency_from_uses(best); latency_from_uses(best);
} }
@ -408,7 +407,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// remaining cases (most), choose the instruction with the greatest latency // remaining cases (most), choose the instruction with the greatest latency
// (that is, the most number of pseudo-cycles required to the end of the // (that is, the most number of pseudo-cycles required to the end of the
// routine). If there is a tie, choose the instruction with the most inputs. // routine). If there is a tie, choose the instruction with the most inputs.
Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) { Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
// If only a single entry on the stack, use it // If only a single entry on the stack, use it
uint cnt = worklist.size(); uint cnt = worklist.size();
@ -442,7 +441,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
} }
// Final call in a block must be adjacent to 'catch' // Final call in a block must be adjacent to 'catch'
Node *e = end(); Node *e = block->end();
if( e->is_Catch() && e->in(0)->in(0) == n ) if( e->is_Catch() && e->in(0)->in(0) == n )
continue; continue;
@ -468,7 +467,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
Node* use = n->fast_out(j); Node* use = n->fast_out(j);
// The use is a conditional branch, make them adjacent // The use is a conditional branch, make them adjacent
if (use->is_MachIf() && cfg->get_block_for_node(use) == this) { if (use->is_MachIf() && get_block_for_node(use) == block) {
found_machif = true; found_machif = true;
break; break;
} }
@ -501,7 +500,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
n_choice = 1; n_choice = 1;
} }
uint n_latency = cfg->get_latency_for_node(n); uint n_latency = get_latency_for_node(n);
uint n_score = n->req(); // Many inputs get high score to break ties uint n_score = n->req(); // Many inputs get high score to break ties
// Keep best latency found // Keep best latency found
@ -529,13 +528,13 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
//------------------------------set_next_call---------------------------------- //------------------------------set_next_call----------------------------------
void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) { void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) {
if( next_call.test_set(n->_idx) ) return; if( next_call.test_set(n->_idx) ) return;
for( uint i=0; i<n->len(); i++ ) { for( uint i=0; i<n->len(); i++ ) {
Node *m = n->in(i); Node *m = n->in(i);
if( !m ) continue; // must see all nodes in block that precede call if( !m ) continue; // must see all nodes in block that precede call
if (cfg->get_block_for_node(m) == this) { if (get_block_for_node(m) == block) {
set_next_call(m, next_call, cfg); set_next_call(block, m, next_call);
} }
} }
} }
@ -546,24 +545,26 @@ void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) {
// next subroutine call get priority - basically it moves things NOT needed // next subroutine call get priority - basically it moves things NOT needed
// for the next call till after the call. This prevents me from trying to // for the next call till after the call. This prevents me from trying to
// carry lots of stuff live across a call. // carry lots of stuff live across a call.
void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg) { void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) {
// Find the next control-defining Node in this block // Find the next control-defining Node in this block
Node* call = NULL; Node* call = NULL;
for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) { for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
Node* m = this_call->fast_out(i); Node* m = this_call->fast_out(i);
if(cfg->get_block_for_node(m) == this && // Local-block user if (get_block_for_node(m) == block && // Local-block user
m != this_call && // Not self-start node m != this_call && // Not self-start node
m->is_MachCall() ) m->is_MachCall()) {
call = m; call = m;
break; break;
}
} }
if (call == NULL) return; // No next call (e.g., block end is near) if (call == NULL) return; // No next call (e.g., block end is near)
// Set next-call for all inputs to this call // Set next-call for all inputs to this call
set_next_call(call, next_call, cfg); set_next_call(block, call, next_call);
} }
//------------------------------add_call_kills------------------------------------- //------------------------------add_call_kills-------------------------------------
void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) { // helper function that adds caller save registers to MachProjNode
static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
// Fill in the kill mask for the call // Fill in the kill mask for the call
for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) { for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
if( !regs.Member(r) ) { // Not already defined by the call if( !regs.Member(r) ) { // Not already defined by the call
@ -579,7 +580,7 @@ void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_p
//------------------------------sched_call------------------------------------- //------------------------------sched_call-------------------------------------
uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) { uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call) {
RegMask regs; RegMask regs;
// Schedule all the users of the call right now. All the users are // Schedule all the users of the call right now. All the users are
@ -592,18 +593,18 @@ uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_Lis
ready_cnt.at_put(n->_idx, n_cnt); ready_cnt.at_put(n->_idx, n_cnt);
assert( n_cnt == 0, "" ); assert( n_cnt == 0, "" );
// Schedule next to call // Schedule next to call
_nodes.map(node_cnt++, n); block->map_node(n, node_cnt++);
// Collect defined registers // Collect defined registers
regs.OR(n->out_RegMask()); regs.OR(n->out_RegMask());
// Check for scheduling the next control-definer // Check for scheduling the next control-definer
if( n->bottom_type() == Type::CONTROL ) if( n->bottom_type() == Type::CONTROL )
// Warm up next pile of heuristic bits // Warm up next pile of heuristic bits
needed_for_next_call(n, next_call, cfg); needed_for_next_call(block, n, next_call);
// Children of projections are now all ready // Children of projections are now all ready
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
Node* m = n->fast_out(j); // Get user Node* m = n->fast_out(j); // Get user
if(cfg->get_block_for_node(m) != this) { if(get_block_for_node(m) != block) {
continue; continue;
} }
if( m->is_Phi() ) continue; if( m->is_Phi() ) continue;
@ -617,14 +618,14 @@ uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_Lis
// Act as if the call defines the Frame Pointer. // Act as if the call defines the Frame Pointer.
// Certainly the FP is alive and well after the call. // Certainly the FP is alive and well after the call.
regs.Insert(matcher.c_frame_pointer()); regs.Insert(_matcher.c_frame_pointer());
// Set all registers killed and not already defined by the call. // Set all registers killed and not already defined by the call.
uint r_cnt = mcall->tf()->range()->cnt(); uint r_cnt = mcall->tf()->range()->cnt();
int op = mcall->ideal_Opcode(); int op = mcall->ideal_Opcode();
MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj ); MachProjNode *proj = new (C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
cfg->map_node_to_block(proj, this); map_node_to_block(proj, block);
_nodes.insert(node_cnt++, proj); block->insert_node(proj, node_cnt++);
// Select the right register save policy. // Select the right register save policy.
const char * save_policy; const char * save_policy;
@ -633,13 +634,13 @@ uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_Lis
case Op_CallLeaf: case Op_CallLeaf:
case Op_CallLeafNoFP: case Op_CallLeafNoFP:
// Calling C code so use C calling convention // Calling C code so use C calling convention
save_policy = matcher._c_reg_save_policy; save_policy = _matcher._c_reg_save_policy;
break; break;
case Op_CallStaticJava: case Op_CallStaticJava:
case Op_CallDynamicJava: case Op_CallDynamicJava:
// Calling Java code so use Java calling convention // Calling Java code so use Java calling convention
save_policy = matcher._register_save_policy; save_policy = _matcher._register_save_policy;
break; break;
default: default:
@ -674,44 +675,46 @@ uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_Lis
//------------------------------schedule_local--------------------------------- //------------------------------schedule_local---------------------------------
// Topological sort within a block. Someday become a real scheduler. // Topological sort within a block. Someday become a real scheduler.
bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &ready_cnt, VectorSet &next_call) { bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call) {
// Already "sorted" are the block start Node (as the first entry), and // Already "sorted" are the block start Node (as the first entry), and
// the block-ending Node and any trailing control projections. We leave // the block-ending Node and any trailing control projections. We leave
// these alone. PhiNodes and ParmNodes are made to follow the block start // these alone. PhiNodes and ParmNodes are made to follow the block start
// Node. Everything else gets topo-sorted. // Node. Everything else gets topo-sorted.
#ifndef PRODUCT #ifndef PRODUCT
if (cfg->trace_opto_pipelining()) { if (trace_opto_pipelining()) {
tty->print_cr("# --- schedule_local B%d, before: ---", _pre_order); tty->print_cr("# --- schedule_local B%d, before: ---", block->_pre_order);
for (uint i = 0;i < _nodes.size();i++) { for (uint i = 0;i < block->number_of_nodes(); i++) {
tty->print("# "); tty->print("# ");
_nodes[i]->fast_dump(); block->get_node(i)->fast_dump();
} }
tty->print_cr("#"); tty->print_cr("#");
} }
#endif #endif
// RootNode is already sorted // RootNode is already sorted
if( _nodes.size() == 1 ) return true; if (block->number_of_nodes() == 1) {
return true;
}
// Move PhiNodes and ParmNodes from 1 to cnt up to the start // Move PhiNodes and ParmNodes from 1 to cnt up to the start
uint node_cnt = end_idx(); uint node_cnt = block->end_idx();
uint phi_cnt = 1; uint phi_cnt = 1;
uint i; uint i;
for( i = 1; i<node_cnt; i++ ) { // Scan for Phi for( i = 1; i<node_cnt; i++ ) { // Scan for Phi
Node *n = _nodes[i]; Node *n = block->get_node(i);
if( n->is_Phi() || // Found a PhiNode or ParmNode if( n->is_Phi() || // Found a PhiNode or ParmNode
(n->is_Proj() && n->in(0) == head()) ) { (n->is_Proj() && n->in(0) == block->head()) ) {
// Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt // Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt
_nodes.map(i,_nodes[phi_cnt]); block->map_node(block->get_node(phi_cnt), i);
_nodes.map(phi_cnt++,n); // swap Phi/Parm up front block->map_node(n, phi_cnt++); // swap Phi/Parm up front
} else { // All others } else { // All others
// Count block-local inputs to 'n' // Count block-local inputs to 'n'
uint cnt = n->len(); // Input count uint cnt = n->len(); // Input count
uint local = 0; uint local = 0;
for( uint j=0; j<cnt; j++ ) { for( uint j=0; j<cnt; j++ ) {
Node *m = n->in(j); Node *m = n->in(j);
if( m && cfg->get_block_for_node(m) == this && !m->is_top() ) if( m && get_block_for_node(m) == block && !m->is_top() )
local++; // One more block-local input local++; // One more block-local input
} }
ready_cnt.at_put(n->_idx, local); // Count em up ready_cnt.at_put(n->_idx, local); // Count em up
@ -723,7 +726,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
for (uint prec = n->req(); prec < n->len(); prec++) { for (uint prec = n->req(); prec < n->len(); prec++) {
Node* oop_store = n->in(prec); Node* oop_store = n->in(prec);
if (oop_store != NULL) { if (oop_store != NULL) {
assert(cfg->get_block_for_node(oop_store)->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark"); assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark");
} }
} }
} }
@ -747,16 +750,16 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
} }
} }
} }
for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count for(uint i2=i; i2< block->number_of_nodes(); i2++ ) // Trailing guys get zapped count
ready_cnt.at_put(_nodes[i2]->_idx, 0); ready_cnt.at_put(block->get_node(i2)->_idx, 0);
// All the prescheduled guys do not hold back internal nodes // All the prescheduled guys do not hold back internal nodes
uint i3; uint i3;
for(i3 = 0; i3<phi_cnt; i3++ ) { // For all pre-scheduled for(i3 = 0; i3<phi_cnt; i3++ ) { // For all pre-scheduled
Node *n = _nodes[i3]; // Get pre-scheduled Node *n = block->get_node(i3); // Get pre-scheduled
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
Node* m = n->fast_out(j); Node* m = n->fast_out(j);
if (cfg->get_block_for_node(m) == this) { // Local-block user if (get_block_for_node(m) == block) { // Local-block user
int m_cnt = ready_cnt.at(m->_idx)-1; int m_cnt = ready_cnt.at(m->_idx)-1;
ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count
} }
@ -767,7 +770,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
// Make a worklist // Make a worklist
Node_List worklist; Node_List worklist;
for(uint i4=i3; i4<node_cnt; i4++ ) { // Put ready guys on worklist for(uint i4=i3; i4<node_cnt; i4++ ) { // Put ready guys on worklist
Node *m = _nodes[i4]; Node *m = block->get_node(i4);
if( !ready_cnt.at(m->_idx) ) { // Zero ready count? if( !ready_cnt.at(m->_idx) ) { // Zero ready count?
if (m->is_iteratively_computed()) { if (m->is_iteratively_computed()) {
// Push induction variable increments last to allow other uses // Push induction variable increments last to allow other uses
@ -789,15 +792,15 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
} }
// Warm up the 'next_call' heuristic bits // Warm up the 'next_call' heuristic bits
needed_for_next_call(_nodes[0], next_call, cfg); needed_for_next_call(block, block->head(), next_call);
#ifndef PRODUCT #ifndef PRODUCT
if (cfg->trace_opto_pipelining()) { if (trace_opto_pipelining()) {
for (uint j=0; j<_nodes.size(); j++) { for (uint j=0; j< block->number_of_nodes(); j++) {
Node *n = _nodes[j]; Node *n = block->get_node(j);
int idx = n->_idx; int idx = n->_idx;
tty->print("# ready cnt:%3d ", ready_cnt.at(idx)); tty->print("# ready cnt:%3d ", ready_cnt.at(idx));
tty->print("latency:%3d ", cfg->get_latency_for_node(n)); tty->print("latency:%3d ", get_latency_for_node(n));
tty->print("%4d: %s\n", idx, n->Name()); tty->print("%4d: %s\n", idx, n->Name());
} }
} }
@ -808,7 +811,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
while( worklist.size() ) { // Worklist is not ready while( worklist.size() ) { // Worklist is not ready
#ifndef PRODUCT #ifndef PRODUCT
if (cfg->trace_opto_pipelining()) { if (trace_opto_pipelining()) {
tty->print("# ready list:"); tty->print("# ready list:");
for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
Node *n = worklist[i]; // Get Node on worklist Node *n = worklist[i]; // Get Node on worklist
@ -819,13 +822,13 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
#endif #endif
// Select and pop a ready guy from worklist // Select and pop a ready guy from worklist
Node* n = select(cfg, worklist, ready_cnt, next_call, phi_cnt); Node* n = select(block, worklist, ready_cnt, next_call, phi_cnt);
_nodes.map(phi_cnt++,n); // Schedule him next block->map_node(n, phi_cnt++); // Schedule him next
#ifndef PRODUCT #ifndef PRODUCT
if (cfg->trace_opto_pipelining()) { if (trace_opto_pipelining()) {
tty->print("# select %d: %s", n->_idx, n->Name()); tty->print("# select %d: %s", n->_idx, n->Name());
tty->print(", latency:%d", cfg->get_latency_for_node(n)); tty->print(", latency:%d", get_latency_for_node(n));
n->dump(); n->dump();
if (Verbose) { if (Verbose) {
tty->print("# ready list:"); tty->print("# ready list:");
@ -840,26 +843,26 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
#endif #endif
if( n->is_MachCall() ) { if( n->is_MachCall() ) {
MachCallNode *mcall = n->as_MachCall(); MachCallNode *mcall = n->as_MachCall();
phi_cnt = sched_call(matcher, cfg, phi_cnt, worklist, ready_cnt, mcall, next_call); phi_cnt = sched_call(block, phi_cnt, worklist, ready_cnt, mcall, next_call);
continue; continue;
} }
if (n->is_Mach() && n->as_Mach()->has_call()) { if (n->is_Mach() && n->as_Mach()->has_call()) {
RegMask regs; RegMask regs;
regs.Insert(matcher.c_frame_pointer()); regs.Insert(_matcher.c_frame_pointer());
regs.OR(n->out_RegMask()); regs.OR(n->out_RegMask());
MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj ); MachProjNode *proj = new (C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
cfg->map_node_to_block(proj, this); map_node_to_block(proj, block);
_nodes.insert(phi_cnt++, proj); block->insert_node(proj, phi_cnt++);
add_call_kills(proj, regs, matcher._c_reg_save_policy, false); add_call_kills(proj, regs, _matcher._c_reg_save_policy, false);
} }
// Children are now all ready // Children are now all ready
for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) { for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
Node* m = n->fast_out(i5); // Get user Node* m = n->fast_out(i5); // Get user
if (cfg->get_block_for_node(m) != this) { if (get_block_for_node(m) != block) {
continue; continue;
} }
if( m->is_Phi() ) continue; if( m->is_Phi() ) continue;
@ -874,9 +877,8 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
} }
} }
if( phi_cnt != end_idx() ) { if( phi_cnt != block->end_idx() ) {
// did not schedule all. Retry, Bailout, or Die // did not schedule all. Retry, Bailout, or Die
Compile* C = matcher.C;
if (C->subsume_loads() == true && !C->failing()) { if (C->subsume_loads() == true && !C->failing()) {
// Retry with subsume_loads == false // Retry with subsume_loads == false
// If this is the first failure, the sentinel string will "stick" // If this is the first failure, the sentinel string will "stick"
@ -888,12 +890,12 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
} }
#ifndef PRODUCT #ifndef PRODUCT
if (cfg->trace_opto_pipelining()) { if (trace_opto_pipelining()) {
tty->print_cr("#"); tty->print_cr("#");
tty->print_cr("# after schedule_local"); tty->print_cr("# after schedule_local");
for (uint i = 0;i < _nodes.size();i++) { for (uint i = 0;i < block->number_of_nodes();i++) {
tty->print("# "); tty->print("# ");
_nodes[i]->fast_dump(); block->get_node(i)->fast_dump();
} }
tty->cr(); tty->cr();
} }
@ -919,7 +921,7 @@ static void catch_cleanup_fix_all_inputs(Node *use, Node *old_def, Node *new_def
} }
//------------------------------catch_cleanup_find_cloned_def------------------ //------------------------------catch_cleanup_find_cloned_def------------------
static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) { Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
assert( use_blk != def_blk, "Inter-block cleanup only"); assert( use_blk != def_blk, "Inter-block cleanup only");
// The use is some block below the Catch. Find and return the clone of the def // The use is some block below the Catch. Find and return the clone of the def
@ -945,14 +947,14 @@ static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def
// PhiNode, the PhiNode uses from the def and IT's uses need fixup. // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
Node_Array inputs = new Node_List(Thread::current()->resource_area()); Node_Array inputs = new Node_List(Thread::current()->resource_area());
for(uint k = 1; k < use_blk->num_preds(); k++) { for(uint k = 1; k < use_blk->num_preds(); k++) {
Block* block = cfg->get_block_for_node(use_blk->pred(k)); Block* block = get_block_for_node(use_blk->pred(k));
inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, cfg, n_clone_idx)); inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, n_clone_idx));
} }
// Check to see if the use_blk already has an identical phi inserted. // Check to see if the use_blk already has an identical phi inserted.
// If it exists, it will be at the first position since all uses of a // If it exists, it will be at the first position since all uses of a
// def are processed together. // def are processed together.
Node *phi = use_blk->_nodes[1]; Node *phi = use_blk->get_node(1);
if( phi->is_Phi() ) { if( phi->is_Phi() ) {
fixup = phi; fixup = phi;
for (uint k = 1; k < use_blk->num_preds(); k++) { for (uint k = 1; k < use_blk->num_preds(); k++) {
@ -967,8 +969,8 @@ static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def
// If an existing PhiNode was not found, make a new one. // If an existing PhiNode was not found, make a new one.
if (fixup == NULL) { if (fixup == NULL) {
Node *new_phi = PhiNode::make(use_blk->head(), def); Node *new_phi = PhiNode::make(use_blk->head(), def);
use_blk->_nodes.insert(1, new_phi); use_blk->insert_node(new_phi, 1);
cfg->map_node_to_block(new_phi, use_blk); map_node_to_block(new_phi, use_blk);
for (uint k = 1; k < use_blk->num_preds(); k++) { for (uint k = 1; k < use_blk->num_preds(); k++) {
new_phi->set_req(k, inputs[k]); new_phi->set_req(k, inputs[k]);
} }
@ -977,7 +979,7 @@ static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def
} else { } else {
// Found the use just below the Catch. Make it use the clone. // Found the use just below the Catch. Make it use the clone.
fixup = use_blk->_nodes[n_clone_idx]; fixup = use_blk->get_node(n_clone_idx);
} }
return fixup; return fixup;
@ -997,36 +999,36 @@ static void catch_cleanup_intra_block(Node *use, Node *def, Block *blk, int beg,
for( uint k = 0; k < blk->_num_succs; k++ ) { for( uint k = 0; k < blk->_num_succs; k++ ) {
// Get clone in each successor block // Get clone in each successor block
Block *sb = blk->_succs[k]; Block *sb = blk->_succs[k];
Node *clone = sb->_nodes[offset_idx+1]; Node *clone = sb->get_node(offset_idx+1);
assert( clone->Opcode() == use->Opcode(), "" ); assert( clone->Opcode() == use->Opcode(), "" );
// Make use-clone reference the def-clone // Make use-clone reference the def-clone
catch_cleanup_fix_all_inputs(clone, def, sb->_nodes[n_clone_idx]); catch_cleanup_fix_all_inputs(clone, def, sb->get_node(n_clone_idx));
} }
} }
//------------------------------catch_cleanup_inter_block--------------------- //------------------------------catch_cleanup_inter_block---------------------
// Fix all input edges in use that reference "def". The use is in a different // Fix all input edges in use that reference "def". The use is in a different
// block than the def. // block than the def.
static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) { void PhaseCFG::catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
if( !use_blk ) return; // Can happen if the use is a precedence edge if( !use_blk ) return; // Can happen if the use is a precedence edge
Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, cfg, n_clone_idx); Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, n_clone_idx);
catch_cleanup_fix_all_inputs(use, def, new_def); catch_cleanup_fix_all_inputs(use, def, new_def);
} }
//------------------------------call_catch_cleanup----------------------------- //------------------------------call_catch_cleanup-----------------------------
// If we inserted any instructions between a Call and his CatchNode, // If we inserted any instructions between a Call and his CatchNode,
// clone the instructions on all paths below the Catch. // clone the instructions on all paths below the Catch.
void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) { void PhaseCFG::call_catch_cleanup(Block* block) {
// End of region to clone // End of region to clone
uint end = end_idx(); uint end = block->end_idx();
if( !_nodes[end]->is_Catch() ) return; if( !block->get_node(end)->is_Catch() ) return;
// Start of region to clone // Start of region to clone
uint beg = end; uint beg = end;
while(!_nodes[beg-1]->is_MachProj() || while(!block->get_node(beg-1)->is_MachProj() ||
!_nodes[beg-1]->in(0)->is_MachCall() ) { !block->get_node(beg-1)->in(0)->is_MachCall() ) {
beg--; beg--;
assert(beg > 0,"Catch cleanup walking beyond block boundary"); assert(beg > 0,"Catch cleanup walking beyond block boundary");
} }
@ -1035,15 +1037,15 @@ void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
// Clone along all Catch output paths. Clone area between the 'beg' and // Clone along all Catch output paths. Clone area between the 'beg' and
// 'end' indices. // 'end' indices.
for( uint i = 0; i < _num_succs; i++ ) { for( uint i = 0; i < block->_num_succs; i++ ) {
Block *sb = _succs[i]; Block *sb = block->_succs[i];
// Clone the entire area; ignoring the edge fixup for now. // Clone the entire area; ignoring the edge fixup for now.
for( uint j = end; j > beg; j-- ) { for( uint j = end; j > beg; j-- ) {
// It is safe here to clone a node with anti_dependence // It is safe here to clone a node with anti_dependence
// since clones dominate on each path. // since clones dominate on each path.
Node *clone = _nodes[j-1]->clone(); Node *clone = block->get_node(j-1)->clone();
sb->_nodes.insert( 1, clone ); sb->insert_node(clone, 1);
cfg->map_node_to_block(clone, sb); map_node_to_block(clone, sb);
} }
} }
@ -1051,7 +1053,7 @@ void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
// Fixup edges. Check the def-use info per cloned Node // Fixup edges. Check the def-use info per cloned Node
for(uint i2 = beg; i2 < end; i2++ ) { for(uint i2 = beg; i2 < end; i2++ ) {
uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block
Node *n = _nodes[i2]; // Node that got cloned Node *n = block->get_node(i2); // Node that got cloned
// Need DU safe iterator because of edge manipulation in calls. // Need DU safe iterator because of edge manipulation in calls.
Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area()); Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area());
for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) { for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) {
@ -1060,19 +1062,19 @@ void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
uint max = out->size(); uint max = out->size();
for (uint j = 0; j < max; j++) {// For all users for (uint j = 0; j < max; j++) {// For all users
Node *use = out->pop(); Node *use = out->pop();
Block *buse = cfg->get_block_for_node(use); Block *buse = get_block_for_node(use);
if( use->is_Phi() ) { if( use->is_Phi() ) {
for( uint k = 1; k < use->req(); k++ ) for( uint k = 1; k < use->req(); k++ )
if( use->in(k) == n ) { if( use->in(k) == n ) {
Block* block = cfg->get_block_for_node(buse->pred(k)); Block* b = get_block_for_node(buse->pred(k));
Node *fixup = catch_cleanup_find_cloned_def(block, n, this, cfg, n_clone_idx); Node *fixup = catch_cleanup_find_cloned_def(b, n, block, n_clone_idx);
use->set_req(k, fixup); use->set_req(k, fixup);
} }
} else { } else {
if (this == buse) { if (block == buse) {
catch_cleanup_intra_block(use, n, this, beg, n_clone_idx); catch_cleanup_intra_block(use, n, block, beg, n_clone_idx);
} else { } else {
catch_cleanup_inter_block(use, buse, n, this, cfg, n_clone_idx); catch_cleanup_inter_block(use, buse, n, block, n_clone_idx);
} }
} }
} // End for all users } // End for all users
@ -1081,30 +1083,30 @@ void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
// Remove the now-dead cloned ops // Remove the now-dead cloned ops
for(uint i3 = beg; i3 < end; i3++ ) { for(uint i3 = beg; i3 < end; i3++ ) {
_nodes[beg]->disconnect_inputs(NULL, C); block->get_node(beg)->disconnect_inputs(NULL, C);
_nodes.remove(beg); block->remove_node(beg);
} }
// If the successor blocks have a CreateEx node, move it back to the top // If the successor blocks have a CreateEx node, move it back to the top
for(uint i4 = 0; i4 < _num_succs; i4++ ) { for(uint i4 = 0; i4 < block->_num_succs; i4++ ) {
Block *sb = _succs[i4]; Block *sb = block->_succs[i4];
uint new_cnt = end - beg; uint new_cnt = end - beg;
// Remove any newly created, but dead, nodes. // Remove any newly created, but dead, nodes.
for( uint j = new_cnt; j > 0; j-- ) { for( uint j = new_cnt; j > 0; j-- ) {
Node *n = sb->_nodes[j]; Node *n = sb->get_node(j);
if (n->outcnt() == 0 && if (n->outcnt() == 0 &&
(!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){ (!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){
n->disconnect_inputs(NULL, C); n->disconnect_inputs(NULL, C);
sb->_nodes.remove(j); sb->remove_node(j);
new_cnt--; new_cnt--;
} }
} }
// If any newly created nodes remain, move the CreateEx node to the top // If any newly created nodes remain, move the CreateEx node to the top
if (new_cnt > 0) { if (new_cnt > 0) {
Node *cex = sb->_nodes[1+new_cnt]; Node *cex = sb->get_node(1+new_cnt);
if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) { if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
sb->_nodes.remove(1+new_cnt); sb->remove_node(1+new_cnt);
sb->_nodes.insert(1,cex); sb->insert_node(cex, 1);
} }
} }
} }

View file

@ -1280,6 +1280,11 @@ Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_ar
const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin)); const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot); const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
// String.value field is known to be @Stable.
if (UseImplicitStableValues) {
target = cast_array_to_stable(target, target_type);
}
IdealKit kit(this, false, true); IdealKit kit(this, false, true);
#define __ kit. #define __ kit.
Node* zero = __ ConI(0); Node* zero = __ ConI(0);
@ -2756,10 +2761,28 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
newval = _gvn.makecon(TypePtr::NULL_PTR); newval = _gvn.makecon(TypePtr::NULL_PTR);
// Reference stores need a store barrier. // Reference stores need a store barrier.
pre_barrier(true /* do_load*/, if (kind == LS_xchg) {
control(), base, adr, alias_idx, newval, value_type->make_oopptr(), // If pre-barrier must execute before the oop store, old value will require do_load here.
NULL /* pre_val*/, if (!can_move_pre_barrier()) {
T_OBJECT); pre_barrier(true /* do_load*/,
control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
NULL /* pre_val*/,
T_OBJECT);
} // Else move pre_barrier to use load_store value, see below.
} else if (kind == LS_cmpxchg) {
// Same as for newval above:
if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
oldval = _gvn.makecon(TypePtr::NULL_PTR);
}
// The only known value which might get overwritten is oldval.
pre_barrier(false /* do_load */,
control(), NULL, NULL, max_juint, NULL, NULL,
oldval /* pre_val */,
T_OBJECT);
} else {
ShouldNotReachHere();
}
#ifdef _LP64 #ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) { if (adr->bottom_type()->is_ptr_to_narrowoop()) {
Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop())); Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
@ -2795,16 +2818,27 @@ bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind
Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store)); Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
set_memory(proj, alias_idx); set_memory(proj, alias_idx);
if (type == T_OBJECT && kind == LS_xchg) {
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
}
#endif
if (can_move_pre_barrier()) {
// Don't need to load pre_val. The old value is returned by load_store.
// The pre_barrier can execute after the xchg as long as no safepoint
// gets inserted between them.
pre_barrier(false /* do_load */,
control(), NULL, NULL, max_juint, NULL, NULL,
load_store /* pre_val */,
T_OBJECT);
}
}
// Add the trailing membar surrounding the access // Add the trailing membar surrounding the access
insert_mem_bar(Op_MemBarCPUOrder); insert_mem_bar(Op_MemBarCPUOrder);
insert_mem_bar(Op_MemBarAcquire); insert_mem_bar(Op_MemBarAcquire);
#ifdef _LP64
if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
}
#endif
assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
set_result(load_store); set_result(load_store);
return true; return true;

View file

@ -85,8 +85,8 @@ void PhaseLive::compute(uint maxlrg) {
IndexSet* def = &_defs[block->_pre_order-1]; IndexSet* def = &_defs[block->_pre_order-1];
DEBUG_ONLY(IndexSet *def_outside = getfreeset();) DEBUG_ONLY(IndexSet *def_outside = getfreeset();)
uint i; uint i;
for (i = block->_nodes.size(); i > 1; i--) { for (i = block->number_of_nodes(); i > 1; i--) {
Node* n = block->_nodes[i-1]; Node* n = block->get_node(i-1);
if (n->is_Phi()) { if (n->is_Phi()) {
break; break;
} }
@ -112,7 +112,7 @@ void PhaseLive::compute(uint maxlrg) {
#endif #endif
// Remove anything defined by Phis and the block start instruction // Remove anything defined by Phis and the block start instruction
for (uint k = i; k > 0; k--) { for (uint k = i; k > 0; k--) {
uint r = _names[block->_nodes[k - 1]->_idx]; uint r = _names[block->get_node(k - 1)->_idx];
def->insert(r); def->insert(r);
use->remove(r); use->remove(r);
} }
@ -124,7 +124,7 @@ void PhaseLive::compute(uint maxlrg) {
// PhiNode uses go in the live-out set of prior blocks. // PhiNode uses go in the live-out set of prior blocks.
for (uint k = i; k > 0; k--) { for (uint k = i; k > 0; k--) {
add_liveout(p, _names[block->_nodes[k-1]->in(l)->_idx], first_pass); add_liveout(p, _names[block->get_node(k-1)->in(l)->_idx], first_pass);
} }
} }
freeset(block); freeset(block);
@ -254,10 +254,10 @@ void PhaseLive::add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ) {
void PhaseLive::dump( const Block *b ) const { void PhaseLive::dump( const Block *b ) const {
tty->print("Block %d: ",b->_pre_order); tty->print("Block %d: ",b->_pre_order);
tty->print("LiveOut: "); _live[b->_pre_order-1].dump(); tty->print("LiveOut: "); _live[b->_pre_order-1].dump();
uint cnt = b->_nodes.size(); uint cnt = b->number_of_nodes();
for( uint i=0; i<cnt; i++ ) { for( uint i=0; i<cnt; i++ ) {
tty->print("L%d/", _names[b->_nodes[i]->_idx] ); tty->print("L%d/", _names[b->get_node(i)->_idx] );
b->_nodes[i]->dump(); b->get_node(i)->dump();
} }
tty->print("\n"); tty->print("\n");
} }
@ -269,7 +269,7 @@ void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const {
for (uint i = 0; i < _cfg.number_of_blocks(); i++) { for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
Block* block = _cfg.get_block(i); Block* block = _cfg.get_block(i);
for (uint j = block->end_idx() + 1; j > 1; j--) { for (uint j = block->end_idx() + 1; j > 1; j--) {
Node* n = block->_nodes[j-1]; Node* n = block->get_node(j-1);
if (n->is_Phi()) { if (n->is_Phi()) {
break; break;
} }

View file

@ -72,6 +72,8 @@ void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcal
int jvms_adj = new_dbg_start - old_dbg_start; int jvms_adj = new_dbg_start - old_dbg_start;
assert (new_dbg_start == newcall->req(), "argument count mismatch"); assert (new_dbg_start == newcall->req(), "argument count mismatch");
// SafePointScalarObject node could be referenced several times in debug info.
// Use Dict to record cloned nodes.
Dict* sosn_map = new Dict(cmpkey,hashkey); Dict* sosn_map = new Dict(cmpkey,hashkey);
for (uint i = old_dbg_start; i < oldcall->req(); i++) { for (uint i = old_dbg_start; i < oldcall->req(); i++) {
Node* old_in = oldcall->in(i); Node* old_in = oldcall->in(i);
@ -79,8 +81,8 @@ void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcal
if (old_in != NULL && old_in->is_SafePointScalarObject()) { if (old_in != NULL && old_in->is_SafePointScalarObject()) {
SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject(); SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
uint old_unique = C->unique(); uint old_unique = C->unique();
Node* new_in = old_sosn->clone(jvms_adj, sosn_map); Node* new_in = old_sosn->clone(sosn_map);
if (old_unique != C->unique()) { if (old_unique != C->unique()) { // New node?
new_in->set_req(0, C->root()); // reset control edge new_in->set_req(0, C->root()); // reset control edge
new_in = transform_later(new_in); // Register new node. new_in = transform_later(new_in); // Register new node.
} }
@ -725,7 +727,11 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
while (safepoints.length() > 0) { while (safepoints.length() > 0) {
SafePointNode* sfpt = safepoints.pop(); SafePointNode* sfpt = safepoints.pop();
Node* mem = sfpt->memory(); Node* mem = sfpt->memory();
uint first_ind = sfpt->req(); assert(sfpt->jvms() != NULL, "missed JVMS");
// Fields of scalar objs are referenced only at the end
// of regular debuginfo at the last (youngest) JVMS.
// Record relative start index.
uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
SafePointScalarObjectNode* sobj = new (C) SafePointScalarObjectNode(res_type, SafePointScalarObjectNode* sobj = new (C) SafePointScalarObjectNode(res_type,
#ifdef ASSERT #ifdef ASSERT
alloc, alloc,
@ -799,7 +805,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
for (int i = start; i < end; i++) { for (int i = start; i < end; i++) {
if (sfpt_done->in(i)->is_SafePointScalarObject()) { if (sfpt_done->in(i)->is_SafePointScalarObject()) {
SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject(); SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject();
if (scobj->first_index() == sfpt_done->req() && if (scobj->first_index(jvms) == sfpt_done->req() &&
scobj->n_fields() == (uint)nfields) { scobj->n_fields() == (uint)nfields) {
assert(scobj->alloc() == alloc, "sanity"); assert(scobj->alloc() == alloc, "sanity");
sfpt_done->set_req(i, res); sfpt_done->set_req(i, res);

View file

@ -962,6 +962,19 @@ uint LoadNode::hash() const {
return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address); return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
} }
static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
bool is_stable_ary = FoldStableValues &&
(tp != NULL) && (tp->isa_aryptr() != NULL) &&
tp->isa_aryptr()->is_stable();
return (eliminate_boxing && non_volatile) || is_stable_ary;
}
return false;
}
//---------------------------can_see_stored_value------------------------------ //---------------------------can_see_stored_value------------------------------
// This routine exists to make sure this set of tests is done the same // This routine exists to make sure this set of tests is done the same
// everywhere. We need to make a coordinated change: first LoadNode::Ideal // everywhere. We need to make a coordinated change: first LoadNode::Ideal
@ -976,11 +989,9 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr(); const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL; Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL;
// This is more general than load from boxing objects. // This is more general than load from boxing objects.
if (phase->C->eliminate_boxing() && (atp != NULL) && if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
(atp->index() >= Compile::AliasIdxRaw) &&
(atp->field() != NULL) && !atp->field()->is_volatile()) {
uint alias_idx = atp->index(); uint alias_idx = atp->index();
bool final = atp->field()->is_final(); bool final = !atp->is_rewritable();
Node* result = NULL; Node* result = NULL;
Node* current = st; Node* current = st;
// Skip through chains of MemBarNodes checking the MergeMems for // Skip through chains of MemBarNodes checking the MergeMems for
@ -1015,7 +1026,6 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
} }
} }
// Loop around twice in the case Load -> Initialize -> Store. // Loop around twice in the case Load -> Initialize -> Store.
// (See PhaseIterGVN::add_users_to_worklist, which knows about this case.) // (See PhaseIterGVN::add_users_to_worklist, which knows about this case.)
for (int trip = 0; trip <= 1; trip++) { for (int trip = 0; trip <= 1; trip++) {
@ -1577,6 +1587,40 @@ LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
return NULL; return NULL;
} }
// Try to constant-fold a stable array element.
static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) {
assert(ary->is_stable(), "array should be stable");
if (ary->const_oop() != NULL) {
// Decode the results of GraphKit::array_element_address.
ciArray* aobj = ary->const_oop()->as_array();
ciConstant con = aobj->element_value_by_offset(off);
if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
const Type* con_type = Type::make_from_constant(con);
if (con_type != NULL) {
if (con_type->isa_aryptr()) {
// Join with the array element type, in case it is also stable.
int dim = ary->stable_dimension();
con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
}
if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
con_type = con_type->make_narrowoop();
}
#ifndef PRODUCT
if (TraceIterativeGVN) {
tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
con_type->dump(); tty->cr();
}
#endif //PRODUCT
return con_type;
}
}
}
return NULL;
}
//------------------------------Value----------------------------------------- //------------------------------Value-----------------------------------------
const Type *LoadNode::Value( PhaseTransform *phase ) const { const Type *LoadNode::Value( PhaseTransform *phase ) const {
// Either input is TOP ==> the result is TOP // Either input is TOP ==> the result is TOP
@ -1591,8 +1635,31 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
Compile* C = phase->C; Compile* C = phase->C;
// Try to guess loaded type from pointer type // Try to guess loaded type from pointer type
if (tp->base() == Type::AryPtr) { if (tp->isa_aryptr()) {
const Type *t = tp->is_aryptr()->elem(); const TypeAryPtr* ary = tp->is_aryptr();
const Type *t = ary->elem();
// Determine whether the reference is beyond the header or not, by comparing
// the offset against the offset of the start of the array's data.
// Different array types begin at slightly different offsets (12 vs. 16).
// We choose T_BYTE as an example base type that is least restrictive
// as to alignment, which will therefore produce the smallest
// possible base offset.
const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
// Try to constant-fold a stable array element.
if (FoldStableValues && ary->is_stable()) {
// Make sure the reference is not into the header
if (off_beyond_header && off != Type::OffsetBot) {
assert(adr->is_AddP() && adr->in(AddPNode::Offset)->is_Con(), "offset is a constant");
const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
if (con_type != NULL) {
return con_type;
}
}
}
// Don't do this for integer types. There is only potential profit if // Don't do this for integer types. There is only potential profit if
// the element type t is lower than _type; that is, for int types, if _type is // the element type t is lower than _type; that is, for int types, if _type is
// more restrictive than t. This only happens here if one is short and the other // more restrictive than t. This only happens here if one is short and the other
@ -1613,14 +1680,7 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
&& Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
// t might actually be lower than _type, if _type is a unique // t might actually be lower than _type, if _type is a unique
// concrete subclass of abstract class t. // concrete subclass of abstract class t.
// Make sure the reference is not into the header, by comparing if (off_beyond_header) { // is the offset beyond the header?
// the offset against the offset of the start of the array's data.
// Different array types begin at slightly different offsets (12 vs. 16).
// We choose T_BYTE as an example base type that is least restrictive
// as to alignment, which will therefore produce the smallest
// possible base offset.
const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
if ((uint)off >= (uint)min_base_off) { // is the offset beyond the header?
const Type* jt = t->join(_type); const Type* jt = t->join(_type);
// In any case, do not allow the join, per se, to empty out the type. // In any case, do not allow the join, per se, to empty out the type.
if (jt->empty() && !t->empty()) { if (jt->empty() && !t->empty()) {

View file

@ -773,6 +773,21 @@ void Node::del_req( uint idx ) {
_in[_cnt] = NULL; // NULL out emptied slot _in[_cnt] = NULL; // NULL out emptied slot
} }
//------------------------------del_req_ordered--------------------------------
// Delete the required edge and compact the edge array with preserved order
void Node::del_req_ordered( uint idx ) {
assert( idx < _cnt, "oob");
assert( !VerifyHashTableKeys || _hash_lock == 0,
"remove node from hash table before modifying it");
// First remove corresponding def-use edge
Node *n = in(idx);
if (n != NULL) n->del_out((Node *)this);
if (idx < _cnt - 1) { // Not last edge ?
Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx-1)*sizeof(Node*)));
}
_in[--_cnt] = NULL; // NULL out emptied slot
}
//------------------------------ins_req---------------------------------------- //------------------------------ins_req----------------------------------------
// Insert a new required input at the end // Insert a new required input at the end
void Node::ins_req( uint idx, Node *n ) { void Node::ins_req( uint idx, Node *n ) {

View file

@ -384,6 +384,7 @@ protected:
void add_req( Node *n ); // Append a NEW required input void add_req( Node *n ); // Append a NEW required input
void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
void del_req( uint idx ); // Delete required edge & compact void del_req( uint idx ); // Delete required edge & compact
void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
void ins_req( uint i, Node *n ); // Insert a NEW required input void ins_req( uint i, Node *n ); // Insert a NEW required input
void set_req( uint i, Node *n ) { void set_req( uint i, Node *n ) {
assert( is_not_dead(n), "can not use dead node"); assert( is_not_dead(n), "can not use dead node");

View file

@ -57,7 +57,7 @@ extern int emit_deopt_handler(CodeBuffer &cbuf);
// Convert Nodes to instruction bits and pass off to the VM // Convert Nodes to instruction bits and pass off to the VM
void Compile::Output() { void Compile::Output() {
// RootNode goes // RootNode goes
assert( _cfg->get_root_block()->_nodes.size() == 0, "" ); assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
// The number of new nodes (mostly MachNop) is proportional to // The number of new nodes (mostly MachNop) is proportional to
// the number of java calls and inner loops which are aligned. // the number of java calls and inner loops which are aligned.
@ -70,11 +70,11 @@ void Compile::Output() {
Block *entry = _cfg->get_block(1); Block *entry = _cfg->get_block(1);
Block *broot = _cfg->get_root_block(); Block *broot = _cfg->get_root_block();
const StartNode *start = entry->_nodes[0]->as_Start(); const StartNode *start = entry->head()->as_Start();
// Replace StartNode with prolog // Replace StartNode with prolog
MachPrologNode *prolog = new (this) MachPrologNode(); MachPrologNode *prolog = new (this) MachPrologNode();
entry->_nodes.map( 0, prolog ); entry->map_node(prolog, 0);
_cfg->map_node_to_block(prolog, entry); _cfg->map_node_to_block(prolog, entry);
_cfg->unmap_node_from_block(start); // start is no longer in any block _cfg->unmap_node_from_block(start); // start is no longer in any block
@ -144,8 +144,8 @@ void Compile::Output() {
for (uint i = 0; i < _cfg->number_of_blocks(); i++) { for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
tty->print("\nBB#%03d:\n", i); tty->print("\nBB#%03d:\n", i);
Block* block = _cfg->get_block(i); Block* block = _cfg->get_block(i);
for (uint j = 0; j < block->_nodes.size(); j++) { for (uint j = 0; j < block->number_of_nodes(); j++) {
Node* n = block->_nodes[j]; Node* n = block->get_node(j);
OptoReg::Name reg = _regalloc->get_reg_first(n); OptoReg::Name reg = _regalloc->get_reg_first(n);
tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : ""); tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
n->dump(); n->dump();
@ -226,8 +226,8 @@ void Compile::Insert_zap_nodes() {
// Insert call to zap runtime stub before every node with an oop map // Insert call to zap runtime stub before every node with an oop map
for( uint i=0; i<_cfg->number_of_blocks(); i++ ) { for( uint i=0; i<_cfg->number_of_blocks(); i++ ) {
Block *b = _cfg->get_block(i); Block *b = _cfg->get_block(i);
for ( uint j = 0; j < b->_nodes.size(); ++j ) { for ( uint j = 0; j < b->number_of_nodes(); ++j ) {
Node *n = b->_nodes[j]; Node *n = b->get_node(j);
// Determining if we should insert a zap-a-lot node in output. // Determining if we should insert a zap-a-lot node in output.
// We do that for all nodes that has oopmap info, except for calls // We do that for all nodes that has oopmap info, except for calls
@ -256,7 +256,7 @@ void Compile::Insert_zap_nodes() {
} }
if (insert) { if (insert) {
Node *zap = call_zap_node(n->as_MachSafePoint(), i); Node *zap = call_zap_node(n->as_MachSafePoint(), i);
b->_nodes.insert( j, zap ); b->insert_node(zap, j);
_cfg->map_node_to_block(zap, b); _cfg->map_node_to_block(zap, b);
++j; ++j;
} }
@ -379,10 +379,10 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
DEBUG_ONLY( jmp_rule[i] = 0; ) DEBUG_ONLY( jmp_rule[i] = 0; )
// Sum all instruction sizes to compute block size // Sum all instruction sizes to compute block size
uint last_inst = block->_nodes.size(); uint last_inst = block->number_of_nodes();
uint blk_size = 0; uint blk_size = 0;
for (uint j = 0; j < last_inst; j++) { for (uint j = 0; j < last_inst; j++) {
Node* nj = block->_nodes[j]; Node* nj = block->get_node(j);
// Handle machine instruction nodes // Handle machine instruction nodes
if (nj->is_Mach()) { if (nj->is_Mach()) {
MachNode *mach = nj->as_Mach(); MachNode *mach = nj->as_Mach();
@ -477,18 +477,18 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
for (uint i = 0; i < nblocks; i++) { for (uint i = 0; i < nblocks; i++) {
Block* block = _cfg->get_block(i); Block* block = _cfg->get_block(i);
int idx = jmp_nidx[i]; int idx = jmp_nidx[i];
MachNode* mach = (idx == -1) ? NULL: block->_nodes[idx]->as_Mach(); MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach();
if (mach != NULL && mach->may_be_short_branch()) { if (mach != NULL && mach->may_be_short_branch()) {
#ifdef ASSERT #ifdef ASSERT
assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity"); assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
int j; int j;
// Find the branch; ignore trailing NOPs. // Find the branch; ignore trailing NOPs.
for (j = block->_nodes.size()-1; j>=0; j--) { for (j = block->number_of_nodes()-1; j>=0; j--) {
Node* n = block->_nodes[j]; Node* n = block->get_node(j);
if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
break; break;
} }
assert(j >= 0 && j == idx && block->_nodes[j] == (Node*)mach, "sanity"); assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity");
#endif #endif
int br_size = jmp_size[i]; int br_size = jmp_size[i];
int br_offs = blk_starts[i] + jmp_offset[i]; int br_offs = blk_starts[i] + jmp_offset[i];
@ -522,7 +522,7 @@ void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size
diff -= nop_size; diff -= nop_size;
} }
adjust_block_start += diff; adjust_block_start += diff;
block->_nodes.map(idx, replacement); block->map_node(replacement, idx);
mach->subsume_by(replacement, C); mach->subsume_by(replacement, C);
mach = replacement; mach = replacement;
progress = true; progress = true;
@ -639,7 +639,7 @@ void Compile::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
new ConstantOopWriteValue(cik->java_mirror()->constant_encoding())); new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
Compile::set_sv_for_object_node(objs, sv); Compile::set_sv_for_object_node(objs, sv);
uint first_ind = spobj->first_index(); uint first_ind = spobj->first_index(sfpt->jvms());
for (uint i = 0; i < spobj->n_fields(); i++) { for (uint i = 0; i < spobj->n_fields(); i++) {
Node* fld_node = sfpt->in(first_ind+i); Node* fld_node = sfpt->in(first_ind+i);
(void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs); (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
@ -894,7 +894,7 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon); GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
// Loop over monitors and insert into array // Loop over monitors and insert into array
for(idx = 0; idx < num_mon; idx++) { for (idx = 0; idx < num_mon; idx++) {
// Grab the node that defines this monitor // Grab the node that defines this monitor
Node* box_node = sfn->monitor_box(jvms, idx); Node* box_node = sfn->monitor_box(jvms, idx);
Node* obj_node = sfn->monitor_obj(jvms, idx); Node* obj_node = sfn->monitor_obj(jvms, idx);
@ -902,11 +902,11 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
// Create ScopeValue for object // Create ScopeValue for object
ScopeValue *scval = NULL; ScopeValue *scval = NULL;
if( obj_node->is_SafePointScalarObject() ) { if (obj_node->is_SafePointScalarObject()) {
SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject(); SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
scval = Compile::sv_for_node_id(objs, spobj->_idx); scval = Compile::sv_for_node_id(objs, spobj->_idx);
if (scval == NULL) { if (scval == NULL) {
const Type *t = obj_node->bottom_type(); const Type *t = spobj->bottom_type();
ciKlass* cik = t->is_oopptr()->klass(); ciKlass* cik = t->is_oopptr()->klass();
assert(cik->is_instance_klass() || assert(cik->is_instance_klass() ||
cik->is_array_klass(), "Not supported allocation."); cik->is_array_klass(), "Not supported allocation.");
@ -914,14 +914,14 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
new ConstantOopWriteValue(cik->java_mirror()->constant_encoding())); new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
Compile::set_sv_for_object_node(objs, sv); Compile::set_sv_for_object_node(objs, sv);
uint first_ind = spobj->first_index(); uint first_ind = spobj->first_index(youngest_jvms);
for (uint i = 0; i < spobj->n_fields(); i++) { for (uint i = 0; i < spobj->n_fields(); i++) {
Node* fld_node = sfn->in(first_ind+i); Node* fld_node = sfn->in(first_ind+i);
(void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs); (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
} }
scval = sv; scval = sv;
} }
} else if( !obj_node->is_Con() ) { } else if (!obj_node->is_Con()) {
OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node); OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node);
if( obj_node->bottom_type()->base() == Type::NarrowOop ) { if( obj_node->bottom_type()->base() == Type::NarrowOop ) {
scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop ); scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop );
@ -1088,8 +1088,8 @@ CodeBuffer* Compile::init_buffer(uint* blk_starts) {
for (uint i = 0; i < _cfg->number_of_blocks(); i++) { for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
Block* b = _cfg->get_block(i); Block* b = _cfg->get_block(i);
for (uint j = 0; j < b->_nodes.size(); j++) { for (uint j = 0; j < b->number_of_nodes(); j++) {
Node* n = b->_nodes[j]; Node* n = b->get_node(j);
// If the node is a MachConstantNode evaluate the constant // If the node is a MachConstantNode evaluate the constant
// value section. // value section.
@ -1247,14 +1247,14 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// Define the label at the beginning of the basic block // Define the label at the beginning of the basic block
MacroAssembler(cb).bind(blk_labels[block->_pre_order]); MacroAssembler(cb).bind(blk_labels[block->_pre_order]);
uint last_inst = block->_nodes.size(); uint last_inst = block->number_of_nodes();
// Emit block normally, except for last instruction. // Emit block normally, except for last instruction.
// Emit means "dump code bits into code buffer". // Emit means "dump code bits into code buffer".
for (uint j = 0; j<last_inst; j++) { for (uint j = 0; j<last_inst; j++) {
// Get the node // Get the node
Node* n = block->_nodes[j]; Node* n = block->get_node(j);
// See if delay slots are supported // See if delay slots are supported
if (valid_bundle_info(n) && if (valid_bundle_info(n) &&
@ -1308,7 +1308,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
assert((padding % nop_size) == 0, "padding is not a multiple of NOP size"); assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
int nops_cnt = padding / nop_size; int nops_cnt = padding / nop_size;
MachNode *nop = new (this) MachNopNode(nops_cnt); MachNode *nop = new (this) MachNopNode(nops_cnt);
block->_nodes.insert(j++, nop); block->insert_node(nop, j++);
last_inst++; last_inst++;
_cfg->map_node_to_block(nop, block); _cfg->map_node_to_block(nop, block);
nop->emit(*cb, _regalloc); nop->emit(*cb, _regalloc);
@ -1394,7 +1394,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// Insert padding between avoid_back_to_back branches. // Insert padding between avoid_back_to_back branches.
if (needs_padding && replacement->avoid_back_to_back()) { if (needs_padding && replacement->avoid_back_to_back()) {
MachNode *nop = new (this) MachNopNode(); MachNode *nop = new (this) MachNopNode();
block->_nodes.insert(j++, nop); block->insert_node(nop, j++);
_cfg->map_node_to_block(nop, block); _cfg->map_node_to_block(nop, block);
last_inst++; last_inst++;
nop->emit(*cb, _regalloc); nop->emit(*cb, _regalloc);
@ -1407,7 +1407,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
jmp_size[i] = new_size; jmp_size[i] = new_size;
jmp_rule[i] = mach->rule(); jmp_rule[i] = mach->rule();
#endif #endif
block->_nodes.map(j, replacement); block->map_node(replacement, j);
mach->subsume_by(replacement, C); mach->subsume_by(replacement, C);
n = replacement; n = replacement;
mach = replacement; mach = replacement;
@ -1438,7 +1438,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
count++; count++;
uint i4; uint i4;
for (i4 = 0; i4 < last_inst; ++i4) { for (i4 = 0; i4 < last_inst; ++i4) {
if (block->_nodes[i4] == oop_store) { if (block->get_node(i4) == oop_store) {
break; break;
} }
} }
@ -1548,7 +1548,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
int padding = nb->alignment_padding(current_offset); int padding = nb->alignment_padding(current_offset);
if( padding > 0 ) { if( padding > 0 ) {
MachNode *nop = new (this) MachNopNode(padding / nop_size); MachNode *nop = new (this) MachNopNode(padding / nop_size);
block->_nodes.insert(block->_nodes.size(), nop); block->insert_node(nop, block->number_of_nodes());
_cfg->map_node_to_block(nop, block); _cfg->map_node_to_block(nop, block);
nop->emit(*cb, _regalloc); nop->emit(*cb, _regalloc);
current_offset = cb->insts_size(); current_offset = cb->insts_size();
@ -1655,8 +1655,8 @@ void Compile::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_start
int j; int j;
// Find the branch; ignore trailing NOPs. // Find the branch; ignore trailing NOPs.
for (j = block->_nodes.size() - 1; j >= 0; j--) { for (j = block->number_of_nodes() - 1; j >= 0; j--) {
n = block->_nodes[j]; n = block->get_node(j);
if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) { if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
break; break;
} }
@ -1675,8 +1675,8 @@ void Compile::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_start
uint call_return = call_returns[block->_pre_order]; uint call_return = call_returns[block->_pre_order];
#ifdef ASSERT #ifdef ASSERT
assert( call_return > 0, "no call seen for this basic block" ); assert( call_return > 0, "no call seen for this basic block" );
while (block->_nodes[--j]->is_MachProj()) ; while (block->get_node(--j)->is_MachProj()) ;
assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call"); assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
#endif #endif
// last instruction is a CatchNode, find it's CatchProjNodes // last instruction is a CatchNode, find it's CatchProjNodes
int nof_succs = block->_num_succs; int nof_succs = block->_num_succs;
@ -1782,7 +1782,7 @@ Scheduling::Scheduling(Arena *arena, Compile &compile)
// Get the last node // Get the last node
Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1); Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1);
_next_node = block->_nodes[block->_nodes.size() - 1]; _next_node = block->get_node(block->number_of_nodes() - 1);
} }
#ifndef PRODUCT #ifndef PRODUCT
@ -1875,7 +1875,7 @@ void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
// Used to allow latency 0 to force an instruction to the beginning // Used to allow latency 0 to force an instruction to the beginning
// of the bb // of the bb
uint latency = 1; uint latency = 1;
Node *use = bb->_nodes[j]; Node *use = bb->get_node(j);
uint nlen = use->len(); uint nlen = use->len();
// Walk over all the inputs // Walk over all the inputs
@ -2286,7 +2286,7 @@ void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
(OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) { (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
// Push any trailing projections // Push any trailing projections
if( bb->_nodes[bb->_nodes.size()-1] != n ) { if( bb->get_node(bb->number_of_nodes()-1) != n ) {
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node *foi = n->fast_out(i); Node *foi = n->fast_out(i);
if( foi->is_Proj() ) if( foi->is_Proj() )
@ -2329,21 +2329,21 @@ void Scheduling::ComputeUseCount(const Block *bb) {
_unconditional_delay_slot = NULL; _unconditional_delay_slot = NULL;
#ifdef ASSERT #ifdef ASSERT
for( uint i=0; i < bb->_nodes.size(); i++ ) for( uint i=0; i < bb->number_of_nodes(); i++ )
assert( _uses[bb->_nodes[i]->_idx] == 0, "_use array not clean" ); assert( _uses[bb->get_node(i)->_idx] == 0, "_use array not clean" );
#endif #endif
// Force the _uses count to never go to zero for unscheduable pieces // Force the _uses count to never go to zero for unscheduable pieces
// of the block // of the block
for( uint k = 0; k < _bb_start; k++ ) for( uint k = 0; k < _bb_start; k++ )
_uses[bb->_nodes[k]->_idx] = 1; _uses[bb->get_node(k)->_idx] = 1;
for( uint l = _bb_end; l < bb->_nodes.size(); l++ ) for( uint l = _bb_end; l < bb->number_of_nodes(); l++ )
_uses[bb->_nodes[l]->_idx] = 1; _uses[bb->get_node(l)->_idx] = 1;
// Iterate backwards over the instructions in the block. Don't count the // Iterate backwards over the instructions in the block. Don't count the
// branch projections at end or the block header instructions. // branch projections at end or the block header instructions.
for( uint j = _bb_end-1; j >= _bb_start; j-- ) { for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
Node *n = bb->_nodes[j]; Node *n = bb->get_node(j);
if( n->is_Proj() ) continue; // Projections handled another way if( n->is_Proj() ) continue; // Projections handled another way
// Account for all uses // Account for all uses
@ -2398,8 +2398,8 @@ void Scheduling::DoScheduling() {
#ifndef PRODUCT #ifndef PRODUCT
if (_cfg->C->trace_opto_output()) { if (_cfg->C->trace_opto_output()) {
tty->print("# Schedule BB#%03d (initial)\n", i); tty->print("# Schedule BB#%03d (initial)\n", i);
for (uint j = 0; j < bb->_nodes.size(); j++) { for (uint j = 0; j < bb->number_of_nodes(); j++) {
bb->_nodes[j]->dump(); bb->get_node(j)->dump();
} }
} }
#endif #endif
@ -2426,10 +2426,10 @@ void Scheduling::DoScheduling() {
} }
// Leave untouched the starting instruction, any Phis, a CreateEx node // Leave untouched the starting instruction, any Phis, a CreateEx node
// or Top. bb->_nodes[_bb_start] is the first schedulable instruction. // or Top. bb->get_node(_bb_start) is the first schedulable instruction.
_bb_end = bb->_nodes.size()-1; _bb_end = bb->number_of_nodes()-1;
for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) { for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
Node *n = bb->_nodes[_bb_start]; Node *n = bb->get_node(_bb_start);
// Things not matched, like Phinodes and ProjNodes don't get scheduled. // Things not matched, like Phinodes and ProjNodes don't get scheduled.
// Also, MachIdealNodes do not get scheduled // Also, MachIdealNodes do not get scheduled
if( !n->is_Mach() ) continue; // Skip non-machine nodes if( !n->is_Mach() ) continue; // Skip non-machine nodes
@ -2449,19 +2449,19 @@ void Scheduling::DoScheduling() {
// in the block), because they have delay slots we can fill. Calls all // in the block), because they have delay slots we can fill. Calls all
// have their delay slots filled in the template expansions, so we don't // have their delay slots filled in the template expansions, so we don't
// bother scheduling them. // bother scheduling them.
Node *last = bb->_nodes[_bb_end]; Node *last = bb->get_node(_bb_end);
// Ignore trailing NOPs. // Ignore trailing NOPs.
while (_bb_end > 0 && last->is_Mach() && while (_bb_end > 0 && last->is_Mach() &&
last->as_Mach()->ideal_Opcode() == Op_Con) { last->as_Mach()->ideal_Opcode() == Op_Con) {
last = bb->_nodes[--_bb_end]; last = bb->get_node(--_bb_end);
} }
assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, ""); assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
if( last->is_Catch() || if( last->is_Catch() ||
// Exclude unreachable path case when Halt node is in a separate block. // Exclude unreachable path case when Halt node is in a separate block.
(_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) { (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
// There must be a prior call. Skip it. // There must be a prior call. Skip it.
while( !bb->_nodes[--_bb_end]->is_MachCall() ) { while( !bb->get_node(--_bb_end)->is_MachCall() ) {
assert( bb->_nodes[_bb_end]->is_MachProj(), "skipping projections after expected call" ); assert( bb->get_node(_bb_end)->is_MachProj(), "skipping projections after expected call" );
} }
} else if( last->is_MachNullCheck() ) { } else if( last->is_MachNullCheck() ) {
// Backup so the last null-checked memory instruction is // Backup so the last null-checked memory instruction is
@ -2470,7 +2470,7 @@ void Scheduling::DoScheduling() {
Node *mem = last->in(1); Node *mem = last->in(1);
do { do {
_bb_end--; _bb_end--;
} while (mem != bb->_nodes[_bb_end]); } while (mem != bb->get_node(_bb_end));
} else { } else {
// Set _bb_end to point after last schedulable inst. // Set _bb_end to point after last schedulable inst.
_bb_end++; _bb_end++;
@ -2499,7 +2499,7 @@ void Scheduling::DoScheduling() {
assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" ); assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
#ifdef ASSERT #ifdef ASSERT
for( uint l = _bb_start; l < _bb_end; l++ ) { for( uint l = _bb_start; l < _bb_end; l++ ) {
Node *n = bb->_nodes[l]; Node *n = bb->get_node(l);
uint m; uint m;
for( m = 0; m < _bb_end-_bb_start; m++ ) for( m = 0; m < _bb_end-_bb_start; m++ )
if( _scheduled[m] == n ) if( _scheduled[m] == n )
@ -2510,14 +2510,14 @@ void Scheduling::DoScheduling() {
// Now copy the instructions (in reverse order) back to the block // Now copy the instructions (in reverse order) back to the block
for ( uint k = _bb_start; k < _bb_end; k++ ) for ( uint k = _bb_start; k < _bb_end; k++ )
bb->_nodes.map(k, _scheduled[_bb_end-k-1]); bb->map_node(_scheduled[_bb_end-k-1], k);
#ifndef PRODUCT #ifndef PRODUCT
if (_cfg->C->trace_opto_output()) { if (_cfg->C->trace_opto_output()) {
tty->print("# Schedule BB#%03d (final)\n", i); tty->print("# Schedule BB#%03d (final)\n", i);
uint current = 0; uint current = 0;
for (uint j = 0; j < bb->_nodes.size(); j++) { for (uint j = 0; j < bb->number_of_nodes(); j++) {
Node *n = bb->_nodes[j]; Node *n = bb->get_node(j);
if( valid_bundle_info(n) ) { if( valid_bundle_info(n) ) {
Bundle *bundle = node_bundling(n); Bundle *bundle = node_bundling(n);
if (bundle->instr_count() > 0 || bundle->flags() > 0) { if (bundle->instr_count() > 0 || bundle->flags() > 0) {
@ -2579,8 +2579,8 @@ void Scheduling::verify_good_schedule( Block *b, const char *msg ) {
// Walk over the block backwards. Check to make sure each DEF doesn't // Walk over the block backwards. Check to make sure each DEF doesn't
// kill a live value (other than the one it's supposed to). Add each // kill a live value (other than the one it's supposed to). Add each
// USE to the live set. // USE to the live set.
for( uint i = b->_nodes.size()-1; i >= _bb_start; i-- ) { for( uint i = b->number_of_nodes()-1; i >= _bb_start; i-- ) {
Node *n = b->_nodes[i]; Node *n = b->get_node(i);
int n_op = n->Opcode(); int n_op = n->Opcode();
if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) { if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
// Fat-proj kills a slew of registers // Fat-proj kills a slew of registers
@ -2711,7 +2711,7 @@ void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
pinch->req() == 1 ) { // pinch not yet in block? pinch->req() == 1 ) { // pinch not yet in block?
pinch->del_req(0); // yank pointer to later-def, also set flag pinch->del_req(0); // yank pointer to later-def, also set flag
// Insert the pinch-point in the block just after the last use // Insert the pinch-point in the block just after the last use
b->_nodes.insert(b->find_node(use)+1,pinch); b->insert_node(pinch, b->find_node(use) + 1);
_bb_end++; // Increase size scheduled region in block _bb_end++; // Increase size scheduled region in block
} }
@ -2763,10 +2763,10 @@ void Scheduling::ComputeRegisterAntidependencies(Block *b) {
// it being in the current block. // it being in the current block.
bool fat_proj_seen = false; bool fat_proj_seen = false;
uint last_safept = _bb_end-1; uint last_safept = _bb_end-1;
Node* end_node = (_bb_end-1 >= _bb_start) ? b->_nodes[last_safept] : NULL; Node* end_node = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : NULL;
Node* last_safept_node = end_node; Node* last_safept_node = end_node;
for( uint i = _bb_end-1; i >= _bb_start; i-- ) { for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
Node *n = b->_nodes[i]; Node *n = b->get_node(i);
int is_def = n->outcnt(); // def if some uses prior to adding precedence edges int is_def = n->outcnt(); // def if some uses prior to adding precedence edges
if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) { if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
// Fat-proj kills a slew of registers // Fat-proj kills a slew of registers
@ -2815,7 +2815,7 @@ void Scheduling::ComputeRegisterAntidependencies(Block *b) {
// Do not allow defs of new derived values to float above GC // Do not allow defs of new derived values to float above GC
// points unless the base is definitely available at the GC point. // points unless the base is definitely available at the GC point.
Node *m = b->_nodes[i]; Node *m = b->get_node(i);
// Add precedence edge from following safepoint to use of derived pointer // Add precedence edge from following safepoint to use of derived pointer
if( last_safept_node != end_node && if( last_safept_node != end_node &&
@ -2832,11 +2832,11 @@ void Scheduling::ComputeRegisterAntidependencies(Block *b) {
if( n->jvms() ) { // Precedence edge from derived to safept if( n->jvms() ) { // Precedence edge from derived to safept
// Check if last_safept_node was moved by pinch-point insertion in anti_do_use() // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
if( b->_nodes[last_safept] != last_safept_node ) { if( b->get_node(last_safept) != last_safept_node ) {
last_safept = b->find_node(last_safept_node); last_safept = b->find_node(last_safept_node);
} }
for( uint j=last_safept; j > i; j-- ) { for( uint j=last_safept; j > i; j-- ) {
Node *mach = b->_nodes[j]; Node *mach = b->get_node(j);
if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP ) if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
mach->add_prec( n ); mach->add_prec( n );
} }

View file

@ -518,7 +518,7 @@ class Parse : public GraphKit {
// loading from a constant field or the constant pool // loading from a constant field or the constant pool
// returns false if push failed (non-perm field constants only, not ldcs) // returns false if push failed (non-perm field constants only, not ldcs)
bool push_constant(ciConstant con, bool require_constant = false, bool is_autobox_cache = false); bool push_constant(ciConstant con, bool require_constant = false, bool is_autobox_cache = false, const Type* basic_type = NULL);
// implementation of object creation bytecodes // implementation of object creation bytecodes
void emit_guard_for_new(ciInstanceKlass* klass); void emit_guard_for_new(ciInstanceKlass* klass);

View file

@ -147,7 +147,15 @@ void Parse::do_field_access(bool is_get, bool is_field) {
void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
// Does this field have a constant value? If so, just push the value. // Does this field have a constant value? If so, just push the value.
if (field->is_constant()) { if (field->is_constant()) {
// final field // final or stable field
const Type* stable_type = NULL;
if (FoldStableValues && field->is_stable()) {
stable_type = Type::get_const_type(field->type());
if (field->type()->is_array_klass()) {
int stable_dimension = field->type()->as_array_klass()->dimension();
stable_type = stable_type->is_aryptr()->cast_to_stable(true, stable_dimension);
}
}
if (field->is_static()) { if (field->is_static()) {
// final static field // final static field
if (C->eliminate_boxing()) { if (C->eliminate_boxing()) {
@ -167,11 +175,10 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
} }
} }
} }
if (push_constant(field->constant_value())) if (push_constant(field->constant_value(), false, false, stable_type))
return; return;
} } else {
else { // final or stable non-static field
// final non-static field
// Treat final non-static fields of trusted classes (classes in // Treat final non-static fields of trusted classes (classes in
// java.lang.invoke and sun.invoke packages and subpackages) as // java.lang.invoke and sun.invoke packages and subpackages) as
// compile time constants. // compile time constants.
@ -179,8 +186,12 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr(); const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
ciObject* constant_oop = oop_ptr->const_oop(); ciObject* constant_oop = oop_ptr->const_oop();
ciConstant constant = field->constant_value_of(constant_oop); ciConstant constant = field->constant_value_of(constant_oop);
if (push_constant(constant, true)) if (FoldStableValues && field->is_stable() && constant.is_null_or_zero()) {
return; // fall through to field load; the field is not yet initialized
} else {
if (push_constant(constant, true, false, stable_type))
return;
}
} }
} }
} }
@ -301,7 +312,8 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
// Note the presence of writes to final non-static fields, so that we // Note the presence of writes to final non-static fields, so that we
// can insert a memory barrier later on to keep the writes from floating // can insert a memory barrier later on to keep the writes from floating
// out of the constructor. // out of the constructor.
if (is_field && field->is_final()) { // Any method can write a @Stable field; insert memory barriers after those also.
if (is_field && (field->is_final() || field->is_stable())) {
set_wrote_final(true); set_wrote_final(true);
// Preserve allocation ptr to create precedent edge to it in membar // Preserve allocation ptr to create precedent edge to it in membar
// generated on exit from constructor. // generated on exit from constructor.
@ -314,35 +326,21 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
} }
bool Parse::push_constant(ciConstant constant, bool require_constant, bool is_autobox_cache) {
bool Parse::push_constant(ciConstant constant, bool require_constant, bool is_autobox_cache, const Type* stable_type) {
const Type* con_type = Type::make_from_constant(constant, require_constant, is_autobox_cache);
switch (constant.basic_type()) { switch (constant.basic_type()) {
case T_BOOLEAN: push( intcon(constant.as_boolean()) ); break;
case T_INT: push( intcon(constant.as_int()) ); break;
case T_CHAR: push( intcon(constant.as_char()) ); break;
case T_BYTE: push( intcon(constant.as_byte()) ); break;
case T_SHORT: push( intcon(constant.as_short()) ); break;
case T_FLOAT: push( makecon(TypeF::make(constant.as_float())) ); break;
case T_DOUBLE: push_pair( makecon(TypeD::make(constant.as_double())) ); break;
case T_LONG: push_pair( longcon(constant.as_long()) ); break;
case T_ARRAY: case T_ARRAY:
case T_OBJECT: { case T_OBJECT:
// cases: // cases:
// can_be_constant = (oop not scavengable || ScavengeRootsInCode != 0) // can_be_constant = (oop not scavengable || ScavengeRootsInCode != 0)
// should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2) // should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
// An oop is not scavengable if it is in the perm gen. // An oop is not scavengable if it is in the perm gen.
ciObject* oop_constant = constant.as_object(); if (stable_type != NULL && con_type != NULL && con_type->isa_oopptr())
if (oop_constant->is_null_object()) { con_type = con_type->join(stable_type);
push( zerocon(T_OBJECT) ); break;
break;
} else if (require_constant || oop_constant->should_be_constant()) { case T_ILLEGAL:
push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant, is_autobox_cache)) );
break;
} else {
// we cannot inline the oop, but we can use it later to narrow a type
return false;
}
}
case T_ILLEGAL: {
// Invalid ciConstant returned due to OutOfMemoryError in the CI // Invalid ciConstant returned due to OutOfMemoryError in the CI
assert(C->env()->failing(), "otherwise should not see this"); assert(C->env()->failing(), "otherwise should not see this");
// These always occur because of object types; we are going to // These always occur because of object types; we are going to
@ -350,17 +348,16 @@ bool Parse::push_constant(ciConstant constant, bool require_constant, bool is_au
push( zerocon(T_OBJECT) ); push( zerocon(T_OBJECT) );
return false; return false;
} }
default:
ShouldNotReachHere();
return false;
}
// success if (con_type == NULL)
// we cannot inline the oop, but we can use it later to narrow a type
return false;
push_node(constant.basic_type(), makecon(con_type));
return true; return true;
} }
//============================================================================= //=============================================================================
void Parse::do_anewarray() { void Parse::do_anewarray() {
bool will_link; bool will_link;

View file

@ -1648,10 +1648,10 @@ void PhasePeephole::do_transform() {
bool block_not_printed = true; bool block_not_printed = true;
// and each instruction within a block // and each instruction within a block
uint end_index = block->_nodes.size(); uint end_index = block->number_of_nodes();
// block->end_idx() not valid after PhaseRegAlloc // block->end_idx() not valid after PhaseRegAlloc
for( uint instruction_index = 1; instruction_index < end_index; ++instruction_index ) { for( uint instruction_index = 1; instruction_index < end_index; ++instruction_index ) {
Node *n = block->_nodes.at(instruction_index); Node *n = block->get_node(instruction_index);
if( n->is_Mach() ) { if( n->is_Mach() ) {
MachNode *m = n->as_Mach(); MachNode *m = n->as_Mach();
int deleted_count = 0; int deleted_count = 0;
@ -1673,7 +1673,7 @@ void PhasePeephole::do_transform() {
} }
// Print instructions being deleted // Print instructions being deleted
for( int i = (deleted_count - 1); i >= 0; --i ) { for( int i = (deleted_count - 1); i >= 0; --i ) {
block->_nodes.at(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr(); block->get_node(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr();
} }
tty->print_cr("replaced with"); tty->print_cr("replaced with");
// Print new instruction // Print new instruction
@ -1687,11 +1687,11 @@ void PhasePeephole::do_transform() {
// the node index to live range mappings.) // the node index to live range mappings.)
uint safe_instruction_index = (instruction_index - deleted_count); uint safe_instruction_index = (instruction_index - deleted_count);
for( ; (instruction_index > safe_instruction_index); --instruction_index ) { for( ; (instruction_index > safe_instruction_index); --instruction_index ) {
block->_nodes.remove( instruction_index ); block->remove_node( instruction_index );
} }
// install new node after safe_instruction_index // install new node after safe_instruction_index
block->_nodes.insert( safe_instruction_index + 1, m2 ); block->insert_node(m2, safe_instruction_index + 1);
end_index = block->_nodes.size() - 1; // Recompute new block size end_index = block->number_of_nodes() - 1; // Recompute new block size
NOT_PRODUCT( inc_peepholes(); ) NOT_PRODUCT( inc_peepholes(); )
} }
} }

View file

@ -423,8 +423,8 @@ void PhaseChaitin::post_allocate_copy_removal() {
// Count of Phis in block // Count of Phis in block
uint phi_dex; uint phi_dex;
for (phi_dex = 1; phi_dex < block->_nodes.size(); phi_dex++) { for (phi_dex = 1; phi_dex < block->number_of_nodes(); phi_dex++) {
Node* phi = block->_nodes[phi_dex]; Node* phi = block->get_node(phi_dex);
if (!phi->is_Phi()) { if (!phi->is_Phi()) {
break; break;
} }
@ -439,7 +439,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
Block* pb = _cfg.get_block_for_node(block->pred(j)); Block* pb = _cfg.get_block_for_node(block->pred(j));
// Remove copies along phi edges // Remove copies along phi edges
for (uint k = 1; k < phi_dex; k++) { for (uint k = 1; k < phi_dex; k++) {
elide_copy(block->_nodes[k], j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false); elide_copy(block->get_node(k), j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false);
} }
if (blk2value[pb->_pre_order]) { // Have a mapping on this edge? if (blk2value[pb->_pre_order]) { // Have a mapping on this edge?
// See if this predecessor's mappings have been used by everybody // See if this predecessor's mappings have been used by everybody
@ -510,7 +510,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
// For all Phi's // For all Phi's
for (j = 1; j < phi_dex; j++) { for (j = 1; j < phi_dex; j++) {
uint k; uint k;
Node *phi = block->_nodes[j]; Node *phi = block->get_node(j);
uint pidx = _lrg_map.live_range_id(phi); uint pidx = _lrg_map.live_range_id(phi);
OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg(); OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg();
@ -522,7 +522,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
} }
if (u != NodeSentinel) { // Junk Phi. Remove if (u != NodeSentinel) { // Junk Phi. Remove
block->_nodes.remove(j--); block->remove_node(j--);
phi_dex--; phi_dex--;
_cfg.unmap_node_from_block(phi); _cfg.unmap_node_from_block(phi);
phi->replace_by(u); phi->replace_by(u);
@ -552,8 +552,8 @@ void PhaseChaitin::post_allocate_copy_removal() {
} }
// For all remaining instructions // For all remaining instructions
for (j = phi_dex; j < block->_nodes.size(); j++) { for (j = phi_dex; j < block->number_of_nodes(); j++) {
Node* n = block->_nodes[j]; Node* n = block->get_node(j);
if(n->outcnt() == 0 && // Dead? if(n->outcnt() == 0 && // Dead?
n != C->top() && // (ignore TOP, it has no du info) n != C->top() && // (ignore TOP, it has no du info)

View file

@ -112,17 +112,17 @@ Node *PhaseChaitin::get_spillcopy_wide( Node *def, Node *use, uint uidx ) {
void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) { void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
// Skip intervening ProjNodes. Do not insert between a ProjNode and // Skip intervening ProjNodes. Do not insert between a ProjNode and
// its definer. // its definer.
while( i < b->_nodes.size() && while( i < b->number_of_nodes() &&
(b->_nodes[i]->is_Proj() || (b->get_node(i)->is_Proj() ||
b->_nodes[i]->is_Phi() ) ) b->get_node(i)->is_Phi() ) )
i++; i++;
// Do not insert between a call and his Catch // Do not insert between a call and his Catch
if( b->_nodes[i]->is_Catch() ) { if( b->get_node(i)->is_Catch() ) {
// Put the instruction at the top of the fall-thru block. // Put the instruction at the top of the fall-thru block.
// Find the fall-thru projection // Find the fall-thru projection
while( 1 ) { while( 1 ) {
const CatchProjNode *cp = b->_nodes[++i]->as_CatchProj(); const CatchProjNode *cp = b->get_node(++i)->as_CatchProj();
if( cp->_con == CatchProjNode::fall_through_index ) if( cp->_con == CatchProjNode::fall_through_index )
break; break;
} }
@ -131,7 +131,7 @@ void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
i = 1; // Right at start of block i = 1; // Right at start of block
} }
b->_nodes.insert(i,spill); // Insert node in block b->insert_node(spill, i); // Insert node in block
_cfg.map_node_to_block(spill, b); // Update node->block mapping to reflect _cfg.map_node_to_block(spill, b); // Update node->block mapping to reflect
// Adjust the point where we go hi-pressure // Adjust the point where we go hi-pressure
if( i <= b->_ihrp_index ) b->_ihrp_index++; if( i <= b->_ihrp_index ) b->_ihrp_index++;
@ -160,9 +160,9 @@ uint PhaseChaitin::split_DEF( Node *def, Block *b, int loc, uint maxlrg, Node **
// (The implicit_null_check function ensures the use is also dominated // (The implicit_null_check function ensures the use is also dominated
// by the branch-not-taken block.) // by the branch-not-taken block.)
Node *be = b->end(); Node *be = b->end();
if( be->is_MachNullCheck() && be->in(1) == def && def == b->_nodes[loc] ) { if( be->is_MachNullCheck() && be->in(1) == def && def == b->get_node(loc)) {
// Spill goes in the branch-not-taken block // Spill goes in the branch-not-taken block
b = b->_succs[b->_nodes[b->end_idx()+1]->Opcode() == Op_IfTrue]; b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Op_IfTrue];
loc = 0; // Just past the Region loc = 0; // Just past the Region
} }
assert( loc >= 0, "must insert past block head" ); assert( loc >= 0, "must insert past block head" );
@ -450,7 +450,7 @@ bool PhaseChaitin::prompt_use( Block *b, uint lidx ) {
// Scan block for 1st use. // Scan block for 1st use.
for( uint i = 1; i <= b->end_idx(); i++ ) { for( uint i = 1; i <= b->end_idx(); i++ ) {
Node *n = b->_nodes[i]; Node *n = b->get_node(i);
// Ignore PHI use, these can be up or down // Ignore PHI use, these can be up or down
if (n->is_Phi()) { if (n->is_Phi()) {
continue; continue;
@ -647,7 +647,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// check block for appropriate phinode & update edges // check block for appropriate phinode & update edges
for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
n1 = b->_nodes[insidx]; n1 = b->get_node(insidx);
// bail if this is not a phi // bail if this is not a phi
phi = n1->is_Phi() ? n1->as_Phi() : NULL; phi = n1->is_Phi() ? n1->as_Phi() : NULL;
if( phi == NULL ) { if( phi == NULL ) {
@ -747,7 +747,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
//----------Walk Instructions in the Block and Split---------- //----------Walk Instructions in the Block and Split----------
// For all non-phi instructions in the block // For all non-phi instructions in the block
for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
Node *n = b->_nodes[insidx]; Node *n = b->get_node(insidx);
// Find the defining Node's live range index // Find the defining Node's live range index
uint defidx = _lrg_map.find_id(n); uint defidx = _lrg_map.find_id(n);
uint cnt = n->req(); uint cnt = n->req();
@ -776,7 +776,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg"); assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg");
n->replace_by(u); // Then replace with unique input n->replace_by(u); // Then replace with unique input
n->disconnect_inputs(NULL, C); n->disconnect_inputs(NULL, C);
b->_nodes.remove(insidx); b->remove_node(insidx);
insidx--; insidx--;
b->_ihrp_index--; b->_ihrp_index--;
b->_fhrp_index--; b->_fhrp_index--;
@ -789,12 +789,12 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
(b->_reg_pressure < (uint)INTPRESSURE) || (b->_reg_pressure < (uint)INTPRESSURE) ||
b->_ihrp_index > 4000000 || b->_ihrp_index > 4000000 ||
b->_ihrp_index >= b->end_idx() || b->_ihrp_index >= b->end_idx() ||
!b->_nodes[b->_ihrp_index]->is_Proj(), "" ); !b->get_node(b->_ihrp_index)->is_Proj(), "" );
assert( insidx > b->_fhrp_index || assert( insidx > b->_fhrp_index ||
(b->_freg_pressure < (uint)FLOATPRESSURE) || (b->_freg_pressure < (uint)FLOATPRESSURE) ||
b->_fhrp_index > 4000000 || b->_fhrp_index > 4000000 ||
b->_fhrp_index >= b->end_idx() || b->_fhrp_index >= b->end_idx() ||
!b->_nodes[b->_fhrp_index]->is_Proj(), "" ); !b->get_node(b->_fhrp_index)->is_Proj(), "" );
// ********** Handle Crossing HRP Boundry ********** // ********** Handle Crossing HRP Boundry **********
if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) { if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) {
@ -819,7 +819,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Insert point is just past last use or def in the block // Insert point is just past last use or def in the block
int insert_point = insidx-1; int insert_point = insidx-1;
while( insert_point > 0 ) { while( insert_point > 0 ) {
Node *n = b->_nodes[insert_point]; Node *n = b->get_node(insert_point);
// Hit top of block? Quit going backwards // Hit top of block? Quit going backwards
if (n->is_Phi()) { if (n->is_Phi()) {
break; break;
@ -865,7 +865,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
} }
} // end if LRG is UP } // end if LRG is UP
} // end for all spilling live ranges } // end for all spilling live ranges
assert( b->_nodes[insidx] == n, "got insidx set incorrectly" ); assert( b->get_node(insidx) == n, "got insidx set incorrectly" );
} // end if crossing HRP Boundry } // end if crossing HRP Boundry
// If the LRG index is oob, then this is a new spillcopy, skip it. // If the LRG index is oob, then this is a new spillcopy, skip it.
@ -878,7 +878,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) { if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) {
n->replace_by( n->in(copyidx) ); n->replace_by( n->in(copyidx) );
n->set_req( copyidx, NULL ); n->set_req( copyidx, NULL );
b->_nodes.remove(insidx--); b->remove_node(insidx--);
b->_ihrp_index--; // Adjust the point where we go hi-pressure b->_ihrp_index--; // Adjust the point where we go hi-pressure
b->_fhrp_index--; b->_fhrp_index--;
continue; continue;
@ -932,10 +932,10 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Rematerializable? Then clone def at use site instead // Rematerializable? Then clone def at use site instead
// of store/load // of store/load
if( def->rematerialize() ) { if( def->rematerialize() ) {
int old_size = b->_nodes.size(); int old_size = b->number_of_nodes();
def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true ); def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true );
if( !def ) return 0; // Bail out if( !def ) return 0; // Bail out
insidx += b->_nodes.size()-old_size; insidx += b->number_of_nodes()-old_size;
} }
MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL; MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
@ -1332,8 +1332,8 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// so look at the node before it. // so look at the node before it.
int insert = pred->end_idx(); int insert = pred->end_idx();
while (insert >= 1 && while (insert >= 1 &&
pred->_nodes[insert - 1]->is_SpillCopy() && pred->get_node(insert - 1)->is_SpillCopy() &&
_lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) { _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) {
insert--; insert--;
} }
def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false); def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
@ -1402,7 +1402,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
for (bidx = 0; bidx < _cfg.number_of_blocks(); bidx++) { for (bidx = 0; bidx < _cfg.number_of_blocks(); bidx++) {
b = _cfg.get_block(bidx); b = _cfg.get_block(bidx);
for (insidx = 0; insidx <= b->end_idx(); insidx++) { for (insidx = 0; insidx <= b->end_idx(); insidx++) {
Node *n = b->_nodes[insidx]; Node *n = b->get_node(insidx);
uint defidx = _lrg_map.find(n); uint defidx = _lrg_map.find(n);
assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split"); assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split");
assert(defidx < maxlrg,"Bad live range index in Split"); assert(defidx < maxlrg,"Bad live range index in Split");

View file

@ -189,6 +189,38 @@ const Type* Type::get_typeflow_type(ciType* type) {
} }
//-----------------------make_from_constant------------------------------------
const Type* Type::make_from_constant(ciConstant constant,
bool require_constant, bool is_autobox_cache) {
switch (constant.basic_type()) {
case T_BOOLEAN: return TypeInt::make(constant.as_boolean());
case T_CHAR: return TypeInt::make(constant.as_char());
case T_BYTE: return TypeInt::make(constant.as_byte());
case T_SHORT: return TypeInt::make(constant.as_short());
case T_INT: return TypeInt::make(constant.as_int());
case T_LONG: return TypeLong::make(constant.as_long());
case T_FLOAT: return TypeF::make(constant.as_float());
case T_DOUBLE: return TypeD::make(constant.as_double());
case T_ARRAY:
case T_OBJECT:
{
// cases:
// can_be_constant = (oop not scavengable || ScavengeRootsInCode != 0)
// should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
// An oop is not scavengable if it is in the perm gen.
ciObject* oop_constant = constant.as_object();
if (oop_constant->is_null_object()) {
return Type::get_zero_type(T_OBJECT);
} else if (require_constant || oop_constant->should_be_constant()) {
return TypeOopPtr::make_from_constant(oop_constant, require_constant, is_autobox_cache);
}
}
}
// Fall through to failure
return NULL;
}
//------------------------------make------------------------------------------- //------------------------------make-------------------------------------------
// Create a simple Type, with default empty symbol sets. Then hashcons it // Create a simple Type, with default empty symbol sets. Then hashcons it
// and look for an existing copy in the type dictionary. // and look for an existing copy in the type dictionary.
@ -1824,12 +1856,12 @@ inline const TypeInt* normalize_array_size(const TypeInt* size) {
} }
//------------------------------make------------------------------------------- //------------------------------make-------------------------------------------
const TypeAry *TypeAry::make( const Type *elem, const TypeInt *size) { const TypeAry* TypeAry::make(const Type* elem, const TypeInt* size, bool stable) {
if (UseCompressedOops && elem->isa_oopptr()) { if (UseCompressedOops && elem->isa_oopptr()) {
elem = elem->make_narrowoop(); elem = elem->make_narrowoop();
} }
size = normalize_array_size(size); size = normalize_array_size(size);
return (TypeAry*)(new TypeAry(elem,size))->hashcons(); return (TypeAry*)(new TypeAry(elem,size,stable))->hashcons();
} }
//------------------------------meet------------------------------------------- //------------------------------meet-------------------------------------------
@ -1850,7 +1882,8 @@ const Type *TypeAry::xmeet( const Type *t ) const {
case Array: { // Meeting 2 arrays? case Array: { // Meeting 2 arrays?
const TypeAry *a = t->is_ary(); const TypeAry *a = t->is_ary();
return TypeAry::make(_elem->meet(a->_elem), return TypeAry::make(_elem->meet(a->_elem),
_size->xmeet(a->_size)->is_int()); _size->xmeet(a->_size)->is_int(),
_stable & a->_stable);
} }
case Top: case Top:
break; break;
@ -1863,7 +1896,7 @@ const Type *TypeAry::xmeet( const Type *t ) const {
const Type *TypeAry::xdual() const { const Type *TypeAry::xdual() const {
const TypeInt* size_dual = _size->dual()->is_int(); const TypeInt* size_dual = _size->dual()->is_int();
size_dual = normalize_array_size(size_dual); size_dual = normalize_array_size(size_dual);
return new TypeAry( _elem->dual(), size_dual); return new TypeAry(_elem->dual(), size_dual, !_stable);
} }
//------------------------------eq--------------------------------------------- //------------------------------eq---------------------------------------------
@ -1871,13 +1904,14 @@ const Type *TypeAry::xdual() const {
bool TypeAry::eq( const Type *t ) const { bool TypeAry::eq( const Type *t ) const {
const TypeAry *a = (const TypeAry*)t; const TypeAry *a = (const TypeAry*)t;
return _elem == a->_elem && return _elem == a->_elem &&
_stable == a->_stable &&
_size == a->_size; _size == a->_size;
} }
//------------------------------hash------------------------------------------- //------------------------------hash-------------------------------------------
// Type-specific hashing function. // Type-specific hashing function.
int TypeAry::hash(void) const { int TypeAry::hash(void) const {
return (intptr_t)_elem + (intptr_t)_size; return (intptr_t)_elem + (intptr_t)_size + (_stable ? 43 : 0);
} }
//----------------------interface_vs_oop--------------------------------------- //----------------------interface_vs_oop---------------------------------------
@ -1894,6 +1928,7 @@ bool TypeAry::interface_vs_oop(const Type *t) const {
//------------------------------dump2------------------------------------------ //------------------------------dump2------------------------------------------
#ifndef PRODUCT #ifndef PRODUCT
void TypeAry::dump2( Dict &d, uint depth, outputStream *st ) const { void TypeAry::dump2( Dict &d, uint depth, outputStream *st ) const {
if (_stable) st->print("stable:");
_elem->dump2(d, depth, st); _elem->dump2(d, depth, st);
st->print("["); st->print("[");
_size->dump2(d, depth, st); _size->dump2(d, depth, st);
@ -3457,11 +3492,39 @@ const TypeAryPtr* TypeAryPtr::cast_to_size(const TypeInt* new_size) const {
assert(new_size != NULL, ""); assert(new_size != NULL, "");
new_size = narrow_size_type(new_size); new_size = narrow_size_type(new_size);
if (new_size == size()) return this; if (new_size == size()) return this;
const TypeAry* new_ary = TypeAry::make(elem(), new_size); const TypeAry* new_ary = TypeAry::make(elem(), new_size, is_stable());
return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id); return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id);
} }
//------------------------------cast_to_stable---------------------------------
const TypeAryPtr* TypeAryPtr::cast_to_stable(bool stable, int stable_dimension) const {
if (stable_dimension <= 0 || (stable_dimension == 1 && stable == this->is_stable()))
return this;
const Type* elem = this->elem();
const TypePtr* elem_ptr = elem->make_ptr();
if (stable_dimension > 1 && elem_ptr != NULL && elem_ptr->isa_aryptr()) {
// If this is widened from a narrow oop, TypeAry::make will re-narrow it.
elem = elem_ptr = elem_ptr->is_aryptr()->cast_to_stable(stable, stable_dimension - 1);
}
const TypeAry* new_ary = TypeAry::make(elem, size(), stable);
return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id);
}
//-----------------------------stable_dimension--------------------------------
int TypeAryPtr::stable_dimension() const {
if (!is_stable()) return 0;
int dim = 1;
const TypePtr* elem_ptr = elem()->make_ptr();
if (elem_ptr != NULL && elem_ptr->isa_aryptr())
dim += elem_ptr->is_aryptr()->stable_dimension();
return dim;
}
//------------------------------eq--------------------------------------------- //------------------------------eq---------------------------------------------
// Structural equality check for Type representations // Structural equality check for Type representations
bool TypeAryPtr::eq( const Type *t ) const { bool TypeAryPtr::eq( const Type *t ) const {
@ -3570,7 +3633,7 @@ const Type *TypeAryPtr::xmeet( const Type *t ) const {
// Something like byte[int+] meets char[int+]. // Something like byte[int+] meets char[int+].
// This must fall to bottom, not (int[-128..65535])[int+]. // This must fall to bottom, not (int[-128..65535])[int+].
instance_id = InstanceBot; instance_id = InstanceBot;
tary = TypeAry::make(Type::BOTTOM, tary->_size); tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
} }
} else // Non integral arrays. } else // Non integral arrays.
// Must fall to bottom if exact klasses in upper lattice // Must fall to bottom if exact klasses in upper lattice
@ -3584,7 +3647,7 @@ const Type *TypeAryPtr::xmeet( const Type *t ) const {
(tap ->_klass_is_exact && !tap->klass()->is_subtype_of(klass())) || (tap ->_klass_is_exact && !tap->klass()->is_subtype_of(klass())) ||
// 'this' is exact and super or unrelated: // 'this' is exact and super or unrelated:
(this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) { (this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) {
tary = TypeAry::make(Type::BOTTOM, tary->_size); tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
return make( NotNull, NULL, tary, lazy_klass, false, off, InstanceBot ); return make( NotNull, NULL, tary, lazy_klass, false, off, InstanceBot );
} }

View file

@ -372,6 +372,10 @@ public:
// Mapping from CI type system to compiler type: // Mapping from CI type system to compiler type:
static const Type* get_typeflow_type(ciType* type); static const Type* get_typeflow_type(ciType* type);
static const Type* make_from_constant(ciConstant constant,
bool require_constant = false,
bool is_autobox_cache = false);
private: private:
// support arrays // support arrays
static const BasicType _basic_type[]; static const BasicType _basic_type[];
@ -588,8 +592,8 @@ public:
//------------------------------TypeAry---------------------------------------- //------------------------------TypeAry----------------------------------------
// Class of Array Types // Class of Array Types
class TypeAry : public Type { class TypeAry : public Type {
TypeAry( const Type *elem, const TypeInt *size) : Type(Array), TypeAry(const Type* elem, const TypeInt* size, bool stable) : Type(Array),
_elem(elem), _size(size) {} _elem(elem), _size(size), _stable(stable) {}
public: public:
virtual bool eq( const Type *t ) const; virtual bool eq( const Type *t ) const;
virtual int hash() const; // Type specific hashing virtual int hash() const; // Type specific hashing
@ -599,10 +603,11 @@ public:
private: private:
const Type *_elem; // Element type of array const Type *_elem; // Element type of array
const TypeInt *_size; // Elements in array const TypeInt *_size; // Elements in array
const bool _stable; // Are elements @Stable?
friend class TypeAryPtr; friend class TypeAryPtr;
public: public:
static const TypeAry *make( const Type *elem, const TypeInt *size); static const TypeAry* make(const Type* elem, const TypeInt* size, bool stable = false);
virtual const Type *xmeet( const Type *t ) const; virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xdual() const; // Compute dual right now. virtual const Type *xdual() const; // Compute dual right now.
@ -988,6 +993,7 @@ public:
const TypeAry* ary() const { return _ary; } const TypeAry* ary() const { return _ary; }
const Type* elem() const { return _ary->_elem; } const Type* elem() const { return _ary->_elem; }
const TypeInt* size() const { return _ary->_size; } const TypeInt* size() const { return _ary->_size; }
bool is_stable() const { return _ary->_stable; }
bool is_autobox_cache() const { return _is_autobox_cache; } bool is_autobox_cache() const { return _is_autobox_cache; }
@ -1011,6 +1017,9 @@ public:
virtual const Type *xmeet( const Type *t ) const; virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xdual() const; // Compute dual right now. virtual const Type *xdual() const; // Compute dual right now.
const TypeAryPtr* cast_to_stable(bool stable, int stable_dimension = 1) const;
int stable_dimension() const;
// Convenience common pre-built types. // Convenience common pre-built types.
static const TypeAryPtr *RANGE; static const TypeAryPtr *RANGE;
static const TypeAryPtr *OOPS; static const TypeAryPtr *OOPS;

View file

@ -1605,17 +1605,6 @@ julong Arguments::limit_by_allocatable_memory(julong limit) {
return result; return result;
} }
void Arguments::set_heap_base_min_address() {
if (FLAG_IS_DEFAULT(HeapBaseMinAddress) && UseG1GC && HeapBaseMinAddress < 1*G) {
// By default HeapBaseMinAddress is 2G on all platforms except Solaris x86.
// G1 currently needs a lot of C-heap, so on Solaris we have to give G1
// some extra space for the C-heap compared to other collectors.
// Use FLAG_SET_DEFAULT here rather than FLAG_SET_ERGO to make sure that
// code that checks for default values work correctly.
FLAG_SET_DEFAULT(HeapBaseMinAddress, 1*G);
}
}
void Arguments::set_heap_size() { void Arguments::set_heap_size() {
if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) { if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) {
// Deprecated flag // Deprecated flag
@ -3537,8 +3526,6 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
} }
} }
set_heap_base_min_address();
// Set heap size based on available physical memory // Set heap size based on available physical memory
set_heap_size(); set_heap_size();

View file

@ -334,8 +334,6 @@ class Arguments : AllStatic {
// limits the given memory size by the maximum amount of memory this process is // limits the given memory size by the maximum amount of memory this process is
// currently allowed to allocate or reserve. // currently allowed to allocate or reserve.
static julong limit_by_allocatable_memory(julong size); static julong limit_by_allocatable_memory(julong size);
// Setup HeapBaseMinAddress
static void set_heap_base_min_address();
// Setup heap size // Setup heap size
static void set_heap_size(); static void set_heap_size();
// Based on automatic selection criteria, should the // Based on automatic selection criteria, should the

View file

@ -205,6 +205,7 @@ void Flag::print_as_flag(outputStream* st) {
#define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 product}", DEFAULT }, #define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 product}", DEFAULT },
#define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 pd product}", DEFAULT }, #define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 pd product}", DEFAULT },
#define C1_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 diagnostic}", DEFAULT },
#ifdef PRODUCT #ifdef PRODUCT
#define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */ #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
#define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */ #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc) /* flag is constant */
@ -260,7 +261,7 @@ static Flag flagTable[] = {
G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT) G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT)
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
#ifdef COMPILER1 #ifdef COMPILER1
C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT) C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_DIAGNOSTIC_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
#endif #endif
#ifdef COMPILER2 #ifdef COMPILER2
C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT) C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)

View file

@ -3649,6 +3649,9 @@ class CommandLineFlags {
experimental(bool, TrustFinalNonStaticFields, false, \ experimental(bool, TrustFinalNonStaticFields, false, \
"trust final non-static declarations for constant folding") \ "trust final non-static declarations for constant folding") \
\ \
experimental(bool, FoldStableValues, false, \
"Private flag to control optimizations for stable variables") \
\
develop(bool, TraceInvokeDynamic, false, \ develop(bool, TraceInvokeDynamic, false, \
"trace internal invoke dynamic operations") \ "trace internal invoke dynamic operations") \
\ \

View file

@ -57,6 +57,7 @@
#define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name), #define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
#define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name), #define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc) FLAG_MEMBER(name),
#define C1_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc) FLAG_MEMBER(name),
#ifdef PRODUCT #ifdef PRODUCT
#define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */ #define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc) /* flag is constant */
#define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc) /* flag is constant */ #define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc) /* flag is constant */
@ -99,7 +100,7 @@ typedef enum {
G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER) G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER)
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
#ifdef COMPILER1 #ifdef COMPILER1
C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER) C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_DIAGNOSTIC_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
#endif #endif
#ifdef COMPILER2 #ifdef COMPILER2
C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER) C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
@ -131,6 +132,7 @@ typedef enum {
#define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type), #define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type), #define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#define C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) FLAG_MEMBER_WITH_TYPE(name,type),
#ifdef PRODUCT #ifdef PRODUCT
#define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */ #define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc) /* flag is constant */
#define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) /* flag is constant */ #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc) /* flag is constant */
@ -204,6 +206,7 @@ typedef enum {
C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
C1_PRODUCT_FLAG_MEMBER_WITH_TYPE, C1_PRODUCT_FLAG_MEMBER_WITH_TYPE,
C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE) C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
#endif #endif
#ifdef COMPILER2 #ifdef COMPILER2

View file

@ -471,16 +471,6 @@ class RuntimeHistogramElement : public HistogramElement {
VM_ENTRY_BASE(result_type, header, thread) \ VM_ENTRY_BASE(result_type, header, thread) \
debug_only(VMEntryWrapper __vew;) debug_only(VMEntryWrapper __vew;)
// Another special case for nmethod_entry_point so the nmethod that the
// interpreter is about to branch to doesn't get flushed before as we
// branch to it's interpreter_entry_point. Skip stress testing here too.
// Also we don't allow async exceptions because it is just too painful.
#define IRT_ENTRY_FOR_NMETHOD(result_type, header) \
result_type header { \
nmethodLocker _nmlock(nm); \
ThreadInVMfromJavaNoAsyncException __tiv(thread); \
VM_ENTRY_BASE(result_type, header, thread)
#define IRT_END } #define IRT_END }

View file

@ -1051,7 +1051,8 @@ Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
// Find receiver for non-static call // Find receiver for non-static call
if (bc != Bytecodes::_invokestatic && if (bc != Bytecodes::_invokestatic &&
bc != Bytecodes::_invokedynamic) { bc != Bytecodes::_invokedynamic &&
bc != Bytecodes::_invokehandle) {
// This register map must be update since we need to find the receiver for // This register map must be update since we need to find the receiver for
// compiled frames. The receiver might be in a register. // compiled frames. The receiver might be in a register.
RegisterMap reg_map2(thread); RegisterMap reg_map2(thread);
@ -1078,7 +1079,7 @@ Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
#ifdef ASSERT #ifdef ASSERT
// Check that the receiver klass is of the right subtype and that it is initialized for virtual calls // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) { if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic && bc != Bytecodes::_invokehandle) {
assert(receiver.not_null(), "should have thrown exception"); assert(receiver.not_null(), "should have thrown exception");
KlassHandle receiver_klass(THREAD, receiver->klass()); KlassHandle receiver_klass(THREAD, receiver->klass());
Klass* rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle)); Klass* rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
@ -1240,9 +1241,9 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
#endif #endif
if (is_virtual) { if (is_virtual) {
assert(receiver.not_null(), "sanity check"); assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
bool static_bound = call_info.resolved_method()->can_be_statically_bound(); bool static_bound = call_info.resolved_method()->can_be_statically_bound();
KlassHandle h_klass(THREAD, receiver->klass()); KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass());
CompiledIC::compute_monomorphic_entry(callee_method, h_klass, CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
is_optimized, static_bound, virtual_call_info, is_optimized, static_bound, virtual_call_info,
CHECK_(methodHandle())); CHECK_(methodHandle()));

View file

@ -3636,6 +3636,16 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
CompileBroker::compilation_init(); CompileBroker::compilation_init();
#endif #endif
if (EnableInvokeDynamic) {
// Pre-initialize some JSR292 core classes to avoid deadlock during class loading.
// It is done after compilers are initialized, because otherwise compilations of
// signature polymorphic MH intrinsics can be missed
// (see SystemDictionary::find_method_handle_intrinsic).
initialize_class(vmSymbols::java_lang_invoke_MethodHandle(), CHECK_0);
initialize_class(vmSymbols::java_lang_invoke_MemberName(), CHECK_0);
initialize_class(vmSymbols::java_lang_invoke_MethodHandleNatives(), CHECK_0);
}
#if INCLUDE_MANAGEMENT #if INCLUDE_MANAGEMENT
Management::initialize(THREAD); Management::initialize(THREAD);
#endif // INCLUDE_MANAGEMENT #endif // INCLUDE_MANAGEMENT

View file

@ -78,11 +78,13 @@ enum {
JVM_ACC_FIELD_ACCESS_WATCHED = 0x00002000, // field access is watched by JVMTI JVM_ACC_FIELD_ACCESS_WATCHED = 0x00002000, // field access is watched by JVMTI
JVM_ACC_FIELD_MODIFICATION_WATCHED = 0x00008000, // field modification is watched by JVMTI JVM_ACC_FIELD_MODIFICATION_WATCHED = 0x00008000, // field modification is watched by JVMTI
JVM_ACC_FIELD_INTERNAL = 0x00000400, // internal field, same as JVM_ACC_ABSTRACT JVM_ACC_FIELD_INTERNAL = 0x00000400, // internal field, same as JVM_ACC_ABSTRACT
JVM_ACC_FIELD_STABLE = 0x00000020, // @Stable field, same as JVM_ACC_SYNCHRONIZED
JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE = 0x00000800, // field has generic signature JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE = 0x00000800, // field has generic signature
JVM_ACC_FIELD_INTERNAL_FLAGS = JVM_ACC_FIELD_ACCESS_WATCHED | JVM_ACC_FIELD_INTERNAL_FLAGS = JVM_ACC_FIELD_ACCESS_WATCHED |
JVM_ACC_FIELD_MODIFICATION_WATCHED | JVM_ACC_FIELD_MODIFICATION_WATCHED |
JVM_ACC_FIELD_INTERNAL | JVM_ACC_FIELD_INTERNAL |
JVM_ACC_FIELD_STABLE |
JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE, JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE,
// flags accepted by set_field_flags() // flags accepted by set_field_flags()
@ -148,6 +150,7 @@ class AccessFlags VALUE_OBJ_CLASS_SPEC {
{ return (_flags & JVM_ACC_FIELD_MODIFICATION_WATCHED) != 0; } { return (_flags & JVM_ACC_FIELD_MODIFICATION_WATCHED) != 0; }
bool on_stack() const { return (_flags & JVM_ACC_ON_STACK) != 0; } bool on_stack() const { return (_flags & JVM_ACC_ON_STACK) != 0; }
bool is_internal() const { return (_flags & JVM_ACC_FIELD_INTERNAL) != 0; } bool is_internal() const { return (_flags & JVM_ACC_FIELD_INTERNAL) != 0; }
bool is_stable() const { return (_flags & JVM_ACC_FIELD_STABLE) != 0; }
bool field_has_generic_signature() const bool field_has_generic_signature() const
{ return (_flags & JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE) != 0; } { return (_flags & JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE) != 0; }

View file

@ -0,0 +1,84 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 8023472
* @summary C2 optimization breaks with G1
*
* @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -Dcount=100000 G1CrashTest
*
* @author pbiswal@palantir.com
*/
public class G1CrashTest {
static Object[] set = new Object[11];
public static void main(String[] args) throws InterruptedException {
for (int j = 0; j < Integer.getInteger("count"); j++) {
Object key = new Object();
insertKey(key);
if (j > set.length / 2) {
Object[] oldKeys = set;
set = new Object[2 * set.length - 1];
for (Object o : oldKeys) {
if (o != null)
insertKey(o);
}
}
}
}
static void insertKey(Object key) {
int hash = key.hashCode() & 0x7fffffff;
int index = hash % set.length;
Object cur = set[index];
if (cur == null)
set[index] = key;
else
insertKeyRehash(key, index, hash, cur);
}
static void insertKeyRehash(Object key, int index, int hash, Object cur) {
int loopIndex = index;
int firstRemoved = -1;
do {
if (cur == "dead")
firstRemoved = 1;
index--;
if (index < 0)
index += set.length;
cur = set[index];
if (cur == null) {
if (firstRemoved != -1)
set[firstRemoved] = "dead";
else
set[index] = key;
return;
}
} while (index != loopIndex);
if (firstRemoved != -1)
set[firstRemoved] = null;
}
}

View file

@ -0,0 +1,194 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test
* @bug 8022595
* @summary JSR292: deadlock during class loading of MethodHandles, MethodHandleImpl & MethodHandleNatives
*
* @run main/othervm ConcurrentClassLoadingTest
*/
import java.util.*;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
public class ConcurrentClassLoadingTest {
int numThreads = 0;
long seed = 0;
CyclicBarrier l;
Random rand;
public static void main(String[] args) throws Throwable {
ConcurrentClassLoadingTest test = new ConcurrentClassLoadingTest();
test.parseArgs(args);
test.run();
}
void parseArgs(String[] args) {
int i = 0;
while (i < args.length) {
String flag = args[i];
switch(flag) {
case "-seed":
seed = Long.parseLong(args[++i]);
break;
case "-numThreads":
numThreads = Integer.parseInt(args[++i]);
break;
default:
throw new Error("Unknown flag: " + flag);
}
++i;
}
}
void init() {
if (numThreads == 0) {
numThreads = Runtime.getRuntime().availableProcessors();
}
if (seed == 0) {
seed = (new Random()).nextLong();
}
rand = new Random(seed);
l = new CyclicBarrier(numThreads + 1);
System.out.printf("Threads: %d\n", numThreads);
System.out.printf("Seed: %d\n", seed);
}
final List<Loader> loaders = new ArrayList<>();
void prepare() {
List<String> c = new ArrayList<>(Arrays.asList(classNames));
// Split classes between loading threads
int count = (classNames.length / numThreads) + 1;
for (int t = 0; t < numThreads; t++) {
List<String> sel = new ArrayList<>();
System.out.printf("Thread #%d:\n", t);
for (int i = 0; i < count; i++) {
if (c.size() == 0) break;
int k = rand.nextInt(c.size());
String elem = c.remove(k);
sel.add(elem);
System.out.printf("\t%s\n", elem);
}
loaders.add(new Loader(sel));
}
// Print diagnostic info when the test hangs
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
boolean alive = false;
for (Loader l : loaders) {
if (!l.isAlive()) continue;
if (!alive) {
System.out.println("Some threads are still alive:");
alive = true;
}
System.out.println(l.getName());
for (StackTraceElement elem : l.getStackTrace()) {
System.out.println("\t"+elem.toString());
}
}
}
});
}
public void run() throws Throwable {
init();
prepare();
for (Loader loader : loaders) {
loader.start();
}
l.await();
for (Loader loader : loaders) {
loader.join();
}
}
class Loader extends Thread {
List<String> classes;
public Loader(List<String> classes) {
this.classes = classes;
setDaemon(true);
}
@Override
public void run() {
try {
l.await();
for (String name : classes) {
Class.forName(name).getName();
}
} catch (ClassNotFoundException | BrokenBarrierException | InterruptedException e) {
throw new Error(e);
}
}
}
final static String[] classNames = {
"java.lang.invoke.AbstractValidatingLambdaMetafactory",
"java.lang.invoke.BoundMethodHandle",
"java.lang.invoke.CallSite",
"java.lang.invoke.ConstantCallSite",
"java.lang.invoke.DirectMethodHandle",
"java.lang.invoke.InnerClassLambdaMetafactory",
"java.lang.invoke.InvokeDynamic",
"java.lang.invoke.InvokeGeneric",
"java.lang.invoke.InvokerBytecodeGenerator",
"java.lang.invoke.Invokers",
"java.lang.invoke.LambdaConversionException",
"java.lang.invoke.LambdaForm",
"java.lang.invoke.LambdaMetafactory",
"java.lang.invoke.MagicLambdaImpl",
"java.lang.invoke.MemberName",
"java.lang.invoke.MethodHandle",
"java.lang.invoke.MethodHandleImpl",
"java.lang.invoke.MethodHandleInfo",
"java.lang.invoke.MethodHandleNatives",
"java.lang.invoke.MethodHandleProxies",
"java.lang.invoke.MethodHandles",
"java.lang.invoke.MethodHandleStatics",
"java.lang.invoke.MethodType",
"java.lang.invoke.MethodTypeForm",
"java.lang.invoke.MutableCallSite",
"java.lang.invoke.SerializedLambda",
"java.lang.invoke.SimpleMethodHandle",
"java.lang.invoke.SwitchPoint",
"java.lang.invoke.TypeConvertingMethodAdapter",
"java.lang.invoke.VolatileCallSite",
"java.lang.invoke.WrongMethodTypeException"
};
}

Some files were not shown because too many files have changed in this diff Show more