This commit is contained in:
Lana Steuck 2018-01-18 18:58:46 +00:00
commit 8f528fba93
608 changed files with 7528 additions and 3300 deletions

View file

@ -3869,7 +3869,6 @@ void TemplateTable::_new() {
Label done;
Label initialize_header;
Label initialize_object; // including clearing the fields
Label allocate_shared;
__ get_cpool_and_tags(rcx, rax);
@ -3895,12 +3894,19 @@ void TemplateTable::_new() {
__ testl(rdx, Klass::_lh_instance_slow_path_bit);
__ jcc(Assembler::notZero, slow_case);
// Allocate the instance:
// If TLAB is enabled:
// Try to allocate in the TLAB.
// If fails, go to the slow path.
// Else If inline contiguous allocations are enabled:
// Try to allocate in eden.
// If fails due to heap end, go to slow path.
//
// Allocate the instance
// 1) Try to allocate in the TLAB
// 2) if fail and the object is large allocate in the shared Eden
// 3) if the above fails (or is not applicable), go to a slow case
// (creates a new TLAB, etc.)
// If TLAB is enabled OR inline contiguous is enabled:
// Initialize the allocation.
// Exit.
//
// Go to slow path.
const bool allow_shared_alloc =
Universe::heap()->supports_inline_contig_alloc();
@ -3916,7 +3922,7 @@ void TemplateTable::_new() {
__ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
__ lea(rbx, Address(rax, rdx, Address::times_1));
__ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
__ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
__ jcc(Assembler::above, slow_case);
__ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
if (ZeroTLAB) {
// the fields have been already cleared
@ -3925,40 +3931,40 @@ void TemplateTable::_new() {
// initialize both the header and fields
__ jmp(initialize_object);
}
}
// Allocation in the shared Eden, if allowed.
//
// rdx: instance size in bytes
if (allow_shared_alloc) {
__ bind(allocate_shared);
ExternalAddress heap_top((address)Universe::heap()->top_addr());
ExternalAddress heap_end((address)Universe::heap()->end_addr());
Label retry;
__ bind(retry);
__ movptr(rax, heap_top);
__ lea(rbx, Address(rax, rdx, Address::times_1));
__ cmpptr(rbx, heap_end);
__ jcc(Assembler::above, slow_case);
// Compare rax, with the top addr, and if still equal, store the new
// top addr in rbx, at the address of the top addr pointer. Sets ZF if was
// equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
} else {
// Allocation in the shared Eden, if allowed.
//
// rax,: object begin
// rbx,: object end
// rdx: instance size in bytes
__ locked_cmpxchgptr(rbx, heap_top);
if (allow_shared_alloc) {
ExternalAddress heap_top((address)Universe::heap()->top_addr());
ExternalAddress heap_end((address)Universe::heap()->end_addr());
// if someone beat us on the allocation, try again, otherwise continue
__ jcc(Assembler::notEqual, retry);
Label retry;
__ bind(retry);
__ movptr(rax, heap_top);
__ lea(rbx, Address(rax, rdx, Address::times_1));
__ cmpptr(rbx, heap_end);
__ jcc(Assembler::above, slow_case);
__ incr_allocated_bytes(thread, rdx, 0);
// Compare rax, with the top addr, and if still equal, store the new
// top addr in rbx, at the address of the top addr pointer. Sets ZF if was
// equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
//
// rax,: object begin
// rbx,: object end
// rdx: instance size in bytes
__ locked_cmpxchgptr(rbx, heap_top);
// if someone beat us on the allocation, try again, otherwise continue
__ jcc(Assembler::notEqual, retry);
__ incr_allocated_bytes(thread, rdx, 0);
}
}
if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
// If UseTLAB or allow_shared_alloc are true, the object is created above and
// there is an initialize need. Otherwise, skip and go to the slow path.
if (UseTLAB || allow_shared_alloc) {
// The object is initialized before the header. If the object size is
// zero, go directly to the header initialization.
__ bind(initialize_object);