mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-27 14:54:52 +02:00
Merge
This commit is contained in:
commit
6a37cbc50c
228 changed files with 4289 additions and 2421 deletions
|
@ -1,5 +1,5 @@
|
||||||
#
|
#
|
||||||
# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
# Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
#
|
#
|
||||||
# This code is free software; you can redistribute it and/or modify it
|
# This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -24,6 +24,7 @@
|
||||||
JVM_ActiveProcessorCount
|
JVM_ActiveProcessorCount
|
||||||
JVM_ArrayCopy
|
JVM_ArrayCopy
|
||||||
JVM_AssertionStatusDirectives
|
JVM_AssertionStatusDirectives
|
||||||
|
JVM_BeforeHalt
|
||||||
JVM_CallStackWalk
|
JVM_CallStackWalk
|
||||||
JVM_Clone
|
JVM_Clone
|
||||||
JVM_ConstantPoolGetClassAt
|
JVM_ConstantPoolGetClassAt
|
||||||
|
|
|
@ -206,6 +206,7 @@ SUNWprivate_1.1 {
|
||||||
Java_java_lang_Runtime_totalMemory;
|
Java_java_lang_Runtime_totalMemory;
|
||||||
Java_java_lang_Runtime_availableProcessors;
|
Java_java_lang_Runtime_availableProcessors;
|
||||||
Java_java_lang_SecurityManager_getClassContext;
|
Java_java_lang_SecurityManager_getClassContext;
|
||||||
|
Java_java_lang_Shutdown_beforeHalt;
|
||||||
Java_java_lang_Shutdown_halt0;
|
Java_java_lang_Shutdown_halt0;
|
||||||
Java_java_lang_StackTraceElement_initStackTraceElement;
|
Java_java_lang_StackTraceElement_initStackTraceElement;
|
||||||
Java_java_lang_StackTraceElement_initStackTraceElements;
|
Java_java_lang_StackTraceElement_initStackTraceElements;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
//
|
//
|
||||||
// Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
// Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
// Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
// Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
//
|
//
|
||||||
|
@ -995,6 +995,7 @@ definitions %{
|
||||||
|
|
||||||
source_hpp %{
|
source_hpp %{
|
||||||
|
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
#include "gc/shared/cardTableModRefBS.hpp"
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "opto/addnode.hpp"
|
#include "opto/addnode.hpp"
|
||||||
|
|
||||||
|
@ -4438,8 +4439,8 @@ encode %{
|
||||||
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
|
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
|
||||||
Assembler::byte, /*acquire*/ false, /*release*/ true,
|
Assembler::byte, /*acquire*/ false, /*release*/ true,
|
||||||
/*weak*/ false, noreg);
|
/*weak*/ false, noreg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
|
||||||
// The only difference between aarch64_enc_cmpxchg and
|
// The only difference between aarch64_enc_cmpxchg and
|
||||||
// aarch64_enc_cmpxchg_acq is that we use load-acquire in the
|
// aarch64_enc_cmpxchg_acq is that we use load-acquire in the
|
||||||
|
@ -5845,7 +5846,7 @@ operand immByteMapBase()
|
||||||
%{
|
%{
|
||||||
// Get base of card map
|
// Get base of card map
|
||||||
predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef) &&
|
predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef) &&
|
||||||
(jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
|
(jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base());
|
||||||
match(ConP);
|
match(ConP);
|
||||||
|
|
||||||
op_cost(0);
|
op_cost(0);
|
||||||
|
|
|
@ -2048,21 +2048,21 @@ public:
|
||||||
starti;
|
starti;
|
||||||
f(0,31), f((int)T & 1, 30);
|
f(0,31), f((int)T & 1, 30);
|
||||||
f(op1, 29, 21), f(0, 20, 16), f(op2, 15, 12);
|
f(op1, 29, 21), f(0, 20, 16), f(op2, 15, 12);
|
||||||
f((int)T >> 1, 11, 10), rf(Xn, 5), rf(Vt, 0);
|
f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0);
|
||||||
}
|
}
|
||||||
void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn,
|
void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn,
|
||||||
int imm, int op1, int op2) {
|
int imm, int op1, int op2) {
|
||||||
starti;
|
starti;
|
||||||
f(0,31), f((int)T & 1, 30);
|
f(0,31), f((int)T & 1, 30);
|
||||||
f(op1 | 0b100, 29, 21), f(0b11111, 20, 16), f(op2, 15, 12);
|
f(op1 | 0b100, 29, 21), f(0b11111, 20, 16), f(op2, 15, 12);
|
||||||
f((int)T >> 1, 11, 10), rf(Xn, 5), rf(Vt, 0);
|
f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0);
|
||||||
}
|
}
|
||||||
void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn,
|
void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn,
|
||||||
Register Xm, int op1, int op2) {
|
Register Xm, int op1, int op2) {
|
||||||
starti;
|
starti;
|
||||||
f(0,31), f((int)T & 1, 30);
|
f(0,31), f((int)T & 1, 30);
|
||||||
f(op1 | 0b100, 29, 21), rf(Xm, 16), f(op2, 15, 12);
|
f(op1 | 0b100, 29, 21), rf(Xm, 16), f(op2, 15, 12);
|
||||||
f((int)T >> 1, 11, 10), rf(Xn, 5), rf(Vt, 0);
|
f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ld_st(FloatRegister Vt, SIMD_Arrangement T, Address a, int op1, int op2) {
|
void ld_st(FloatRegister Vt, SIMD_Arrangement T, Address a, int op1, int op2) {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
|
@ -30,6 +30,8 @@
|
||||||
#include "c1/c1_MacroAssembler.hpp"
|
#include "c1/c1_MacroAssembler.hpp"
|
||||||
#include "c1/c1_Runtime1.hpp"
|
#include "c1/c1_Runtime1.hpp"
|
||||||
#include "compiler/disassembler.hpp"
|
#include "compiler/disassembler.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "nativeInst_aarch64.hpp"
|
#include "nativeInst_aarch64.hpp"
|
||||||
#include "oops/compiledICHolder.hpp"
|
#include "oops/compiledICHolder.hpp"
|
||||||
|
@ -42,6 +44,7 @@
|
||||||
#include "runtime/vframeArray.hpp"
|
#include "runtime/vframeArray.hpp"
|
||||||
#include "vmreg_aarch64.inline.hpp"
|
#include "vmreg_aarch64.inline.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1162,10 +1165,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||||
// arg0: store_address
|
// arg0: store_address
|
||||||
Address store_addr(rfp, 2*BytesPerWord);
|
Address store_addr(rfp, 2*BytesPerWord);
|
||||||
|
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
|
||||||
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
|
||||||
|
|
||||||
Label done;
|
Label done;
|
||||||
Label runtime;
|
Label runtime;
|
||||||
|
|
||||||
|
@ -1186,13 +1185,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||||
assert_different_registers(card_offset, byte_map_base, rscratch1);
|
assert_different_registers(card_offset, byte_map_base, rscratch1);
|
||||||
|
|
||||||
f.load_argument(0, card_offset);
|
f.load_argument(0, card_offset);
|
||||||
__ lsr(card_offset, card_offset, CardTableModRefBS::card_shift);
|
__ lsr(card_offset, card_offset, CardTable::card_shift);
|
||||||
__ load_byte_map_base(byte_map_base);
|
__ load_byte_map_base(byte_map_base);
|
||||||
__ ldrb(rscratch1, Address(byte_map_base, card_offset));
|
__ ldrb(rscratch1, Address(byte_map_base, card_offset));
|
||||||
__ cmpw(rscratch1, (int)G1SATBCardTableModRefBS::g1_young_card_val());
|
__ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val());
|
||||||
__ br(Assembler::EQ, done);
|
__ br(Assembler::EQ, done);
|
||||||
|
|
||||||
assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");
|
assert((int)CardTable::dirty_card_val() == 0, "must be 0");
|
||||||
|
|
||||||
__ membar(Assembler::StoreLoad);
|
__ membar(Assembler::StoreLoad);
|
||||||
__ ldrb(rscratch1, Address(byte_map_base, card_offset));
|
__ ldrb(rscratch1, Address(byte_map_base, card_offset));
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
|
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
|
@ -29,8 +29,9 @@
|
||||||
#include "jvm.h"
|
#include "jvm.h"
|
||||||
#include "asm/assembler.hpp"
|
#include "asm/assembler.hpp"
|
||||||
#include "asm/assembler.inline.hpp"
|
#include "asm/assembler.inline.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
|
|
||||||
#include "compiler/disassembler.hpp"
|
#include "compiler/disassembler.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "nativeInst_aarch64.hpp"
|
#include "nativeInst_aarch64.hpp"
|
||||||
|
@ -42,10 +43,12 @@
|
||||||
#include "runtime/biasedLocking.hpp"
|
#include "runtime/biasedLocking.hpp"
|
||||||
#include "runtime/icache.hpp"
|
#include "runtime/icache.hpp"
|
||||||
#include "runtime/interfaceSupport.hpp"
|
#include "runtime/interfaceSupport.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
#include "runtime/thread.hpp"
|
#include "runtime/thread.hpp"
|
||||||
|
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#include "gc/g1/heapRegion.hpp"
|
#include "gc/g1/heapRegion.hpp"
|
||||||
|
@ -1794,18 +1797,63 @@ int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb,
|
||||||
|
|
||||||
void MacroAssembler::membar(Membar_mask_bits order_constraint) {
|
void MacroAssembler::membar(Membar_mask_bits order_constraint) {
|
||||||
address prev = pc() - NativeMembar::instruction_size;
|
address prev = pc() - NativeMembar::instruction_size;
|
||||||
if (prev == code()->last_membar()) {
|
address last = code()->last_insn();
|
||||||
|
if (last != NULL && nativeInstruction_at(last)->is_Membar() && prev == last) {
|
||||||
NativeMembar *bar = NativeMembar_at(prev);
|
NativeMembar *bar = NativeMembar_at(prev);
|
||||||
// We are merging two memory barrier instructions. On AArch64 we
|
// We are merging two memory barrier instructions. On AArch64 we
|
||||||
// can do this simply by ORing them together.
|
// can do this simply by ORing them together.
|
||||||
bar->set_kind(bar->get_kind() | order_constraint);
|
bar->set_kind(bar->get_kind() | order_constraint);
|
||||||
BLOCK_COMMENT("merged membar");
|
BLOCK_COMMENT("merged membar");
|
||||||
} else {
|
} else {
|
||||||
code()->set_last_membar(pc());
|
code()->set_last_insn(pc());
|
||||||
dmb(Assembler::barrier(order_constraint));
|
dmb(Assembler::barrier(order_constraint));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) {
|
||||||
|
if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) {
|
||||||
|
merge_ldst(rt, adr, size_in_bytes, is_store);
|
||||||
|
code()->clear_last_insn();
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported.");
|
||||||
|
const unsigned mask = size_in_bytes - 1;
|
||||||
|
if (adr.getMode() == Address::base_plus_offset &&
|
||||||
|
(adr.offset() & mask) == 0) { // only supports base_plus_offset.
|
||||||
|
code()->set_last_insn(pc());
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MacroAssembler::ldr(Register Rx, const Address &adr) {
|
||||||
|
// We always try to merge two adjacent loads into one ldp.
|
||||||
|
if (!try_merge_ldst(Rx, adr, 8, false)) {
|
||||||
|
Assembler::ldr(Rx, adr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MacroAssembler::ldrw(Register Rw, const Address &adr) {
|
||||||
|
// We always try to merge two adjacent loads into one ldp.
|
||||||
|
if (!try_merge_ldst(Rw, adr, 4, false)) {
|
||||||
|
Assembler::ldrw(Rw, adr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MacroAssembler::str(Register Rx, const Address &adr) {
|
||||||
|
// We always try to merge two adjacent stores into one stp.
|
||||||
|
if (!try_merge_ldst(Rx, adr, 8, true)) {
|
||||||
|
Assembler::str(Rx, adr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MacroAssembler::strw(Register Rw, const Address &adr) {
|
||||||
|
// We always try to merge two adjacent stores into one stp.
|
||||||
|
if (!try_merge_ldst(Rw, adr, 4, true)) {
|
||||||
|
Assembler::strw(Rw, adr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// MacroAssembler routines found actually to be needed
|
// MacroAssembler routines found actually to be needed
|
||||||
|
|
||||||
void MacroAssembler::push(Register src)
|
void MacroAssembler::push(Register src)
|
||||||
|
@ -2576,6 +2624,143 @@ Address MacroAssembler::spill_address(int size, int offset, Register tmp)
|
||||||
return Address(base, offset);
|
return Address(base, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Checks whether offset is aligned.
|
||||||
|
// Returns true if it is, else false.
|
||||||
|
bool MacroAssembler::merge_alignment_check(Register base,
|
||||||
|
size_t size,
|
||||||
|
long cur_offset,
|
||||||
|
long prev_offset) const {
|
||||||
|
if (AvoidUnalignedAccesses) {
|
||||||
|
if (base == sp) {
|
||||||
|
// Checks whether low offset if aligned to pair of registers.
|
||||||
|
long pair_mask = size * 2 - 1;
|
||||||
|
long offset = prev_offset > cur_offset ? cur_offset : prev_offset;
|
||||||
|
return (offset & pair_mask) == 0;
|
||||||
|
} else { // If base is not sp, we can't guarantee the access is aligned.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
long mask = size - 1;
|
||||||
|
// Load/store pair instruction only supports element size aligned offset.
|
||||||
|
return (cur_offset & mask) == 0 && (prev_offset & mask) == 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks whether current and previous loads/stores can be merged.
|
||||||
|
// Returns true if it can be merged, else false.
|
||||||
|
bool MacroAssembler::ldst_can_merge(Register rt,
|
||||||
|
const Address &adr,
|
||||||
|
size_t cur_size_in_bytes,
|
||||||
|
bool is_store) const {
|
||||||
|
address prev = pc() - NativeInstruction::instruction_size;
|
||||||
|
address last = code()->last_insn();
|
||||||
|
|
||||||
|
if (last == NULL || !nativeInstruction_at(last)->is_Imm_LdSt()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (adr.getMode() != Address::base_plus_offset || prev != last) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
NativeLdSt* prev_ldst = NativeLdSt_at(prev);
|
||||||
|
size_t prev_size_in_bytes = prev_ldst->size_in_bytes();
|
||||||
|
|
||||||
|
assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging.");
|
||||||
|
assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging.");
|
||||||
|
|
||||||
|
if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
long max_offset = 63 * prev_size_in_bytes;
|
||||||
|
long min_offset = -64 * prev_size_in_bytes;
|
||||||
|
|
||||||
|
assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged.");
|
||||||
|
|
||||||
|
// Only same base can be merged.
|
||||||
|
if (adr.base() != prev_ldst->base()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
long cur_offset = adr.offset();
|
||||||
|
long prev_offset = prev_ldst->offset();
|
||||||
|
size_t diff = abs(cur_offset - prev_offset);
|
||||||
|
if (diff != prev_size_in_bytes) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Following cases can not be merged:
|
||||||
|
// ldr x2, [x2, #8]
|
||||||
|
// ldr x3, [x2, #16]
|
||||||
|
// or:
|
||||||
|
// ldr x2, [x3, #8]
|
||||||
|
// ldr x2, [x3, #16]
|
||||||
|
// If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL.
|
||||||
|
if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
long low_offset = prev_offset > cur_offset ? cur_offset : prev_offset;
|
||||||
|
// Offset range must be in ldp/stp instruction's range.
|
||||||
|
if (low_offset > max_offset || low_offset < min_offset) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge current load/store with previous load/store into ldp/stp.
|
||||||
|
void MacroAssembler::merge_ldst(Register rt,
|
||||||
|
const Address &adr,
|
||||||
|
size_t cur_size_in_bytes,
|
||||||
|
bool is_store) {
|
||||||
|
|
||||||
|
assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged.");
|
||||||
|
|
||||||
|
Register rt_low, rt_high;
|
||||||
|
address prev = pc() - NativeInstruction::instruction_size;
|
||||||
|
NativeLdSt* prev_ldst = NativeLdSt_at(prev);
|
||||||
|
|
||||||
|
long offset;
|
||||||
|
|
||||||
|
if (adr.offset() < prev_ldst->offset()) {
|
||||||
|
offset = adr.offset();
|
||||||
|
rt_low = rt;
|
||||||
|
rt_high = prev_ldst->target();
|
||||||
|
} else {
|
||||||
|
offset = prev_ldst->offset();
|
||||||
|
rt_low = prev_ldst->target();
|
||||||
|
rt_high = rt;
|
||||||
|
}
|
||||||
|
|
||||||
|
Address adr_p = Address(prev_ldst->base(), offset);
|
||||||
|
// Overwrite previous generated binary.
|
||||||
|
code_section()->set_end(prev);
|
||||||
|
|
||||||
|
const int sz = prev_ldst->size_in_bytes();
|
||||||
|
assert(sz == 8 || sz == 4, "only supports 64/32bit merging.");
|
||||||
|
if (!is_store) {
|
||||||
|
BLOCK_COMMENT("merged ldr pair");
|
||||||
|
if (sz == 8) {
|
||||||
|
ldp(rt_low, rt_high, adr_p);
|
||||||
|
} else {
|
||||||
|
ldpw(rt_low, rt_high, adr_p);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
BLOCK_COMMENT("merged str pair");
|
||||||
|
if (sz == 8) {
|
||||||
|
stp(rt_low, rt_high, adr_p);
|
||||||
|
} else {
|
||||||
|
stpw(rt_low, rt_high, adr_p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Multiply 64 bit by 64 bit first loop.
|
* Multiply 64 bit by 64 bit first loop.
|
||||||
*/
|
*/
|
||||||
|
@ -3433,16 +3618,16 @@ void MacroAssembler::store_check(Register obj) {
|
||||||
// register obj is destroyed afterwards.
|
// register obj is destroyed afterwards.
|
||||||
|
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
assert(bs->kind() == BarrierSet::CardTableModRef,
|
||||||
bs->kind() == BarrierSet::CardTableExtension,
|
|
||||||
"Wrong barrier set kind");
|
"Wrong barrier set kind");
|
||||||
|
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
CardTable* ct = ctbs->card_table();
|
||||||
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||||
|
|
||||||
lsr(obj, obj, CardTableModRefBS::card_shift);
|
lsr(obj, obj, CardTable::card_shift);
|
||||||
|
|
||||||
assert(CardTableModRefBS::dirty_card_val() == 0, "must be");
|
assert(CardTable::dirty_card_val() == 0, "must be");
|
||||||
|
|
||||||
load_byte_map_base(rscratch1);
|
load_byte_map_base(rscratch1);
|
||||||
|
|
||||||
|
@ -3944,8 +4129,9 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
|
||||||
DirtyCardQueue::byte_offset_of_buf()));
|
DirtyCardQueue::byte_offset_of_buf()));
|
||||||
|
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
CardTable* ct = ctbs->card_table();
|
||||||
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||||
|
|
||||||
Label done;
|
Label done;
|
||||||
Label runtime;
|
Label runtime;
|
||||||
|
@ -3962,20 +4148,20 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
|
||||||
|
|
||||||
// storing region crossing non-NULL, is card already dirty?
|
// storing region crossing non-NULL, is card already dirty?
|
||||||
|
|
||||||
ExternalAddress cardtable((address) ct->byte_map_base);
|
ExternalAddress cardtable((address) ct->byte_map_base());
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||||
const Register card_addr = tmp;
|
const Register card_addr = tmp;
|
||||||
|
|
||||||
lsr(card_addr, store_addr, CardTableModRefBS::card_shift);
|
lsr(card_addr, store_addr, CardTable::card_shift);
|
||||||
|
|
||||||
// get the address of the card
|
// get the address of the card
|
||||||
load_byte_map_base(tmp2);
|
load_byte_map_base(tmp2);
|
||||||
add(card_addr, card_addr, tmp2);
|
add(card_addr, card_addr, tmp2);
|
||||||
ldrb(tmp2, Address(card_addr));
|
ldrb(tmp2, Address(card_addr));
|
||||||
cmpw(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
|
cmpw(tmp2, (int)G1CardTable::g1_young_card_val());
|
||||||
br(Assembler::EQ, done);
|
br(Assembler::EQ, done);
|
||||||
|
|
||||||
assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");
|
assert((int)CardTable::dirty_card_val() == 0, "must be 0");
|
||||||
|
|
||||||
membar(Assembler::StoreLoad);
|
membar(Assembler::StoreLoad);
|
||||||
|
|
||||||
|
@ -4152,7 +4338,7 @@ void MacroAssembler::zero_memory(Register addr, Register len, Register t1) {
|
||||||
bind(loop);
|
bind(loop);
|
||||||
sub(len, len, unroll);
|
sub(len, len, unroll);
|
||||||
for (int i = -unroll; i < 0; i++)
|
for (int i = -unroll; i < 0; i++)
|
||||||
str(zr, Address(t1, i * wordSize));
|
Assembler::str(zr, Address(t1, i * wordSize));
|
||||||
bind(entry);
|
bind(entry);
|
||||||
add(t1, t1, unroll * wordSize);
|
add(t1, t1, unroll * wordSize);
|
||||||
cbnz(len, loop);
|
cbnz(len, loop);
|
||||||
|
@ -4329,7 +4515,7 @@ void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byt
|
||||||
|
|
||||||
void MacroAssembler::load_byte_map_base(Register reg) {
|
void MacroAssembler::load_byte_map_base(Register reg) {
|
||||||
jbyte *byte_map_base =
|
jbyte *byte_map_base =
|
||||||
((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base;
|
((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base();
|
||||||
|
|
||||||
if (is_valid_AArch64_address((address)byte_map_base)) {
|
if (is_valid_AArch64_address((address)byte_map_base)) {
|
||||||
// Strictly speaking the byte_map_base isn't an address at all,
|
// Strictly speaking the byte_map_base isn't an address at all,
|
||||||
|
|
|
@ -150,11 +150,19 @@ class MacroAssembler: public Assembler {
|
||||||
|
|
||||||
void bind(Label& L) {
|
void bind(Label& L) {
|
||||||
Assembler::bind(L);
|
Assembler::bind(L);
|
||||||
code()->clear_last_membar();
|
code()->clear_last_insn();
|
||||||
}
|
}
|
||||||
|
|
||||||
void membar(Membar_mask_bits order_constraint);
|
void membar(Membar_mask_bits order_constraint);
|
||||||
|
|
||||||
|
using Assembler::ldr;
|
||||||
|
using Assembler::str;
|
||||||
|
|
||||||
|
void ldr(Register Rx, const Address &adr);
|
||||||
|
void ldrw(Register Rw, const Address &adr);
|
||||||
|
void str(Register Rx, const Address &adr);
|
||||||
|
void strw(Register Rx, const Address &adr);
|
||||||
|
|
||||||
// Frame creation and destruction shared between JITs.
|
// Frame creation and destruction shared between JITs.
|
||||||
void build_frame(int framesize);
|
void build_frame(int framesize);
|
||||||
void remove_frame(int framesize);
|
void remove_frame(int framesize);
|
||||||
|
@ -1290,6 +1298,17 @@ private:
|
||||||
// Uses rscratch2 if the address is not directly reachable
|
// Uses rscratch2 if the address is not directly reachable
|
||||||
Address spill_address(int size, int offset, Register tmp=rscratch2);
|
Address spill_address(int size, int offset, Register tmp=rscratch2);
|
||||||
|
|
||||||
|
bool merge_alignment_check(Register base, size_t size, long cur_offset, long prev_offset) const;
|
||||||
|
|
||||||
|
// Check whether two loads/stores can be merged into ldp/stp.
|
||||||
|
bool ldst_can_merge(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store) const;
|
||||||
|
|
||||||
|
// Merge current load/store with previous load/store into ldp/stp.
|
||||||
|
void merge_ldst(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store);
|
||||||
|
|
||||||
|
// Try to merge two loads/stores into ldp/stp. If success, returns true else false.
|
||||||
|
bool try_merge_ldst(Register rt, const Address &adr, size_t cur_size_in_bytes, bool is_store);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
void spill(Register Rx, bool is64, int offset) {
|
void spill(Register Rx, bool is64, int offset) {
|
||||||
if (is64) {
|
if (is64) {
|
||||||
|
|
|
@ -131,6 +131,13 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
|
||||||
return Instruction_aarch64::extract(insn, 31, 12) == 0b11010101000000110011 &&
|
return Instruction_aarch64::extract(insn, 31, 12) == 0b11010101000000110011 &&
|
||||||
Instruction_aarch64::extract(insn, 7, 0) == 0b10111111;
|
Instruction_aarch64::extract(insn, 7, 0) == 0b10111111;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool is_Imm_LdSt() {
|
||||||
|
unsigned int insn = uint_at(0);
|
||||||
|
return Instruction_aarch64::extract(insn, 29, 27) == 0b111 &&
|
||||||
|
Instruction_aarch64::extract(insn, 23, 23) == 0b0 &&
|
||||||
|
Instruction_aarch64::extract(insn, 26, 25) == 0b00;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
inline NativeInstruction* nativeInstruction_at(address address) {
|
inline NativeInstruction* nativeInstruction_at(address address) {
|
||||||
|
@ -532,4 +539,57 @@ inline NativeMembar *NativeMembar_at(address addr) {
|
||||||
return (NativeMembar*)addr;
|
return (NativeMembar*)addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class NativeLdSt : public NativeInstruction {
|
||||||
|
private:
|
||||||
|
int32_t size() { return Instruction_aarch64::extract(uint_at(0), 31, 30); }
|
||||||
|
// Check whether instruction is with unscaled offset.
|
||||||
|
bool is_ldst_ur() {
|
||||||
|
return (Instruction_aarch64::extract(uint_at(0), 29, 21) == 0b111000010 ||
|
||||||
|
Instruction_aarch64::extract(uint_at(0), 29, 21) == 0b111000000) &&
|
||||||
|
Instruction_aarch64::extract(uint_at(0), 11, 10) == 0b00;
|
||||||
|
}
|
||||||
|
bool is_ldst_unsigned_offset() {
|
||||||
|
return Instruction_aarch64::extract(uint_at(0), 29, 22) == 0b11100101 ||
|
||||||
|
Instruction_aarch64::extract(uint_at(0), 29, 22) == 0b11100100;
|
||||||
|
}
|
||||||
|
public:
|
||||||
|
Register target() {
|
||||||
|
uint32_t r = Instruction_aarch64::extract(uint_at(0), 4, 0);
|
||||||
|
return r == 0x1f ? zr : as_Register(r);
|
||||||
|
}
|
||||||
|
Register base() {
|
||||||
|
uint32_t b = Instruction_aarch64::extract(uint_at(0), 9, 5);
|
||||||
|
return b == 0x1f ? sp : as_Register(b);
|
||||||
|
}
|
||||||
|
int64_t offset() {
|
||||||
|
if (is_ldst_ur()) {
|
||||||
|
return Instruction_aarch64::sextract(uint_at(0), 20, 12);
|
||||||
|
} else if (is_ldst_unsigned_offset()) {
|
||||||
|
return Instruction_aarch64::extract(uint_at(0), 21, 10) << size();
|
||||||
|
} else {
|
||||||
|
// others like: pre-index or post-index.
|
||||||
|
ShouldNotReachHere();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
size_t size_in_bytes() { return 1 << size(); }
|
||||||
|
bool is_not_pre_post_index() { return (is_ldst_ur() || is_ldst_unsigned_offset()); }
|
||||||
|
bool is_load() {
|
||||||
|
assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 ||
|
||||||
|
Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00, "must be ldr or str");
|
||||||
|
|
||||||
|
return Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01;
|
||||||
|
}
|
||||||
|
bool is_store() {
|
||||||
|
assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 ||
|
||||||
|
Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00, "must be ldr or str");
|
||||||
|
|
||||||
|
return Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
inline NativeLdSt *NativeLdSt_at(address addr) {
|
||||||
|
assert(nativeInstruction_at(addr)->is_Imm_LdSt(), "no immediate load/store found");
|
||||||
|
return (NativeLdSt*)addr;
|
||||||
|
}
|
||||||
#endif // CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP
|
#endif // CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
|
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
|
@ -26,6 +26,8 @@
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/macroAssembler.hpp"
|
#include "asm/macroAssembler.hpp"
|
||||||
#include "asm/macroAssembler.inline.hpp"
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "nativeInst_aarch64.hpp"
|
#include "nativeInst_aarch64.hpp"
|
||||||
#include "oops/instanceOop.hpp"
|
#include "oops/instanceOop.hpp"
|
||||||
|
@ -652,9 +654,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
|
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
|
||||||
__ pop(saved_regs, sp);
|
__ pop(saved_regs, sp);
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
case BarrierSet::ModRef:
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
|
@ -695,16 +695,16 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ pop(saved_regs, sp);
|
__ pop(saved_regs, sp);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
{
|
{
|
||||||
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
CardTable* ct = ctbs->card_table();
|
||||||
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||||
|
|
||||||
Label L_loop;
|
Label L_loop;
|
||||||
|
|
||||||
__ lsr(start, start, CardTableModRefBS::card_shift);
|
__ lsr(start, start, CardTable::card_shift);
|
||||||
__ lsr(end, end, CardTableModRefBS::card_shift);
|
__ lsr(end, end, CardTable::card_shift);
|
||||||
__ sub(end, end, start); // number of bytes to copy
|
__ sub(end, end, start); // number of bytes to copy
|
||||||
|
|
||||||
const Register count = end; // 'end' register contains bytes count now
|
const Register count = end; // 'end' register contains bytes count now
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
|
@ -184,8 +184,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
{
|
{
|
||||||
if (val == noreg) {
|
if (val == noreg) {
|
||||||
__ store_heap_oop_null(obj);
|
__ store_heap_oop_null(obj);
|
||||||
|
|
|
@ -193,7 +193,9 @@ void VM_Version::get_processor_features() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enable vendor specific features
|
// Enable vendor specific features
|
||||||
if (_cpu == CPU_CAVIUM) {
|
|
||||||
|
// ThunderX
|
||||||
|
if (_cpu == CPU_CAVIUM && (_model == 0xA1)) {
|
||||||
if (_variant == 0) _features |= CPU_DMB_ATOMICS;
|
if (_variant == 0) _features |= CPU_DMB_ATOMICS;
|
||||||
if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
|
if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
|
||||||
FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
|
FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
|
||||||
|
@ -202,6 +204,20 @@ void VM_Version::get_processor_features() {
|
||||||
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, (_variant > 0));
|
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, (_variant > 0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// ThunderX2
|
||||||
|
if ((_cpu == CPU_CAVIUM && (_model == 0xAF)) ||
|
||||||
|
(_cpu == CPU_BROADCOM && (_model == 0x516))) {
|
||||||
|
if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
|
||||||
|
FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
|
||||||
|
}
|
||||||
|
if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
|
||||||
|
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
|
||||||
|
}
|
||||||
|
if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
|
||||||
|
FLAG_SET_DEFAULT(UseFPUForSpilling, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (_cpu == CPU_ARM && (_model == 0xd03 || _model2 == 0xd03)) _features |= CPU_A53MAC;
|
if (_cpu == CPU_ARM && (_model == 0xd03 || _model2 == 0xd03)) _features |= CPU_A53MAC;
|
||||||
if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _features |= CPU_STXR_PREFETCH;
|
if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _features |= CPU_STXR_PREFETCH;
|
||||||
// If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
|
// If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -33,6 +33,8 @@
|
||||||
#include "ci/ciArray.hpp"
|
#include "ci/ciArray.hpp"
|
||||||
#include "ci/ciObjArrayKlass.hpp"
|
#include "ci/ciObjArrayKlass.hpp"
|
||||||
#include "ci/ciTypeArrayKlass.hpp"
|
#include "ci/ciTypeArrayKlass.hpp"
|
||||||
|
#include "ci/ciUtilities.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
#include "gc/shared/cardTableModRefBS.hpp"
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
|
@ -475,22 +477,21 @@ void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp)
|
||||||
}
|
}
|
||||||
|
|
||||||
void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) {
|
void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) {
|
||||||
assert(CardTableModRefBS::dirty_card_val() == 0,
|
assert(CardTable::dirty_card_val() == 0,
|
||||||
"Cannot use ZR register (aarch64) or the register containing the card table base address directly (aarch32) otherwise");
|
"Cannot use ZR register (aarch64) or the register containing the card table base address directly (aarch32) otherwise");
|
||||||
#ifdef AARCH64
|
#ifdef AARCH64
|
||||||
// AARCH64 has a register that is constant zero. We can use that one to set the
|
// AARCH64 has a register that is constant zero. We can use that one to set the
|
||||||
// value in the card table to dirty.
|
// value in the card table to dirty.
|
||||||
__ move(FrameMap::ZR_opr, card_addr);
|
__ move(FrameMap::ZR_opr, card_addr);
|
||||||
#else // AARCH64
|
#else // AARCH64
|
||||||
CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
|
if((ci_card_table_address_as<intx>() & 0xff) == 0) {
|
||||||
if(((intx)ct->byte_map_base & 0xff) == 0) {
|
|
||||||
// If the card table base address is aligned to 256 bytes, we can use the register
|
// If the card table base address is aligned to 256 bytes, we can use the register
|
||||||
// that contains the card_table_base_address.
|
// that contains the card_table_base_address.
|
||||||
__ move(value, card_addr);
|
__ move(value, card_addr);
|
||||||
} else {
|
} else {
|
||||||
// Otherwise we need to create a register containing that value.
|
// Otherwise we need to create a register containing that value.
|
||||||
LIR_Opr tmp_zero = new_register(T_INT);
|
LIR_Opr tmp_zero = new_register(T_INT);
|
||||||
__ move(LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val()), tmp_zero);
|
__ move(LIR_OprFact::intConst(CardTable::dirty_card_val()), tmp_zero);
|
||||||
__ move(tmp_zero, card_addr);
|
__ move(tmp_zero, card_addr);
|
||||||
}
|
}
|
||||||
#endif // AARCH64
|
#endif // AARCH64
|
||||||
|
@ -510,14 +511,14 @@ void LIRGenerator::CardTableModRef_post_barrier_helper(LIR_OprDesc* addr, LIR_Co
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef AARCH64
|
#ifdef AARCH64
|
||||||
LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
|
LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BYTE);
|
||||||
LIR_Opr tmp2 = tmp;
|
LIR_Opr tmp2 = tmp;
|
||||||
__ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTableModRefBS::card_shift)
|
__ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTable::card_shift)
|
||||||
LIR_Address* card_addr = new LIR_Address(tmp2, T_BYTE);
|
LIR_Address* card_addr = new LIR_Address(tmp2, T_BYTE);
|
||||||
#else
|
#else
|
||||||
// Use unsigned type T_BOOLEAN here rather than (signed) T_BYTE since signed load
|
// Use unsigned type T_BOOLEAN here rather than (signed) T_BYTE since signed load
|
||||||
// byte instruction does not support the addressing mode we need.
|
// byte instruction does not support the addressing mode we need.
|
||||||
LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BOOLEAN);
|
LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN);
|
||||||
#endif
|
#endif
|
||||||
if (UseCondCardMark) {
|
if (UseCondCardMark) {
|
||||||
if (UseConcMarkSweepGC) {
|
if (UseConcMarkSweepGC) {
|
||||||
|
@ -527,7 +528,7 @@ void LIRGenerator::CardTableModRef_post_barrier_helper(LIR_OprDesc* addr, LIR_Co
|
||||||
__ move(card_addr, cur_value);
|
__ move(card_addr, cur_value);
|
||||||
|
|
||||||
LabelObj* L_already_dirty = new LabelObj();
|
LabelObj* L_already_dirty = new LabelObj();
|
||||||
__ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val()));
|
__ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTable::dirty_card_val()));
|
||||||
__ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
|
__ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
|
||||||
set_card(tmp, card_addr);
|
set_card(tmp, card_addr);
|
||||||
__ branch_destination(L_already_dirty->label());
|
__ branch_destination(L_already_dirty->label());
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -28,6 +28,9 @@
|
||||||
#include "c1/c1_LIRAssembler.hpp"
|
#include "c1/c1_LIRAssembler.hpp"
|
||||||
#include "c1/c1_MacroAssembler.hpp"
|
#include "c1/c1_MacroAssembler.hpp"
|
||||||
#include "c1/c1_Runtime1.hpp"
|
#include "c1/c1_Runtime1.hpp"
|
||||||
|
#include "ci/ciUtilities.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "nativeInst_arm.hpp"
|
#include "nativeInst_arm.hpp"
|
||||||
#include "oops/compiledICHolder.hpp"
|
#include "oops/compiledICHolder.hpp"
|
||||||
|
@ -40,6 +43,7 @@
|
||||||
#include "utilities/align.hpp"
|
#include "utilities/align.hpp"
|
||||||
#include "vmreg_arm.inline.hpp"
|
#include "vmreg_arm.inline.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -608,8 +612,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||||
|
|
||||||
__ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
|
__ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
|
||||||
|
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
|
||||||
Label done;
|
Label done;
|
||||||
Label recheck;
|
Label recheck;
|
||||||
Label runtime;
|
Label runtime;
|
||||||
|
@ -619,8 +621,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||||
Address buffer(Rthread, in_bytes(JavaThread::dirty_card_queue_offset() +
|
Address buffer(Rthread, in_bytes(JavaThread::dirty_card_queue_offset() +
|
||||||
DirtyCardQueue::byte_offset_of_buf()));
|
DirtyCardQueue::byte_offset_of_buf()));
|
||||||
|
|
||||||
AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
|
AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
|
||||||
|
|
||||||
// save at least the registers that need saving if the runtime is called
|
// save at least the registers that need saving if the runtime is called
|
||||||
#ifdef AARCH64
|
#ifdef AARCH64
|
||||||
|
@ -649,12 +650,12 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||||
// explicitly specify that 'cardtable' has a relocInfo::none
|
// explicitly specify that 'cardtable' has a relocInfo::none
|
||||||
// type.
|
// type.
|
||||||
__ lea(r_card_base_1, cardtable);
|
__ lea(r_card_base_1, cardtable);
|
||||||
__ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTableModRefBS::card_shift));
|
__ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
|
||||||
|
|
||||||
// first quick check without barrier
|
// first quick check without barrier
|
||||||
__ ldrb(r_tmp2, Address(r_card_addr_0));
|
__ ldrb(r_tmp2, Address(r_card_addr_0));
|
||||||
|
|
||||||
__ cmp(r_tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
|
__ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
|
||||||
__ b(recheck, ne);
|
__ b(recheck, ne);
|
||||||
|
|
||||||
__ bind(done);
|
__ bind(done);
|
||||||
|
@ -675,14 +676,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||||
// reload card state after the barrier that ensures the stored oop was visible
|
// reload card state after the barrier that ensures the stored oop was visible
|
||||||
__ ldrb(r_tmp2, Address(r_card_addr_0));
|
__ ldrb(r_tmp2, Address(r_card_addr_0));
|
||||||
|
|
||||||
assert(CardTableModRefBS::dirty_card_val() == 0, "adjust this code");
|
assert(CardTable::dirty_card_val() == 0, "adjust this code");
|
||||||
__ cbz(r_tmp2, done);
|
__ cbz(r_tmp2, done);
|
||||||
|
|
||||||
// storing region crossing non-NULL, card is clean.
|
// storing region crossing non-NULL, card is clean.
|
||||||
// dirty card and log.
|
// dirty card and log.
|
||||||
|
|
||||||
assert(0 == (int)CardTableModRefBS::dirty_card_val(), "adjust this code");
|
assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
|
||||||
if (((intptr_t)ct->byte_map_base & 0xff) == 0) {
|
if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
|
||||||
// Card table is aligned so the lowest byte of the table address base is zero.
|
// Card table is aligned so the lowest byte of the table address base is zero.
|
||||||
__ strb(r_card_base_1, Address(r_card_addr_0));
|
__ strb(r_card_base_1, Address(r_card_addr_0));
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -25,6 +25,7 @@
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "jvm.h"
|
#include "jvm.h"
|
||||||
#include "gc/shared/barrierSet.inline.hpp"
|
#include "gc/shared/barrierSet.inline.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
#include "gc/shared/cardTableModRefBS.inline.hpp"
|
#include "gc/shared/cardTableModRefBS.inline.hpp"
|
||||||
#include "gc/shared/collectedHeap.hpp"
|
#include "gc/shared/collectedHeap.hpp"
|
||||||
#include "interp_masm_arm.hpp"
|
#include "interp_masm_arm.hpp"
|
||||||
|
@ -410,12 +411,12 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
|
||||||
void InterpreterMacroAssembler::store_check_part1(Register card_table_base) {
|
void InterpreterMacroAssembler::store_check_part1(Register card_table_base) {
|
||||||
// Check barrier set type (should be card table) and element size
|
// Check barrier set type (should be card table) and element size
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
assert(bs->kind() == BarrierSet::CardTableModRef,
|
||||||
bs->kind() == BarrierSet::CardTableExtension,
|
|
||||||
"Wrong barrier set kind");
|
"Wrong barrier set kind");
|
||||||
|
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "Adjust store check code");
|
CardTable* ct = ctbs->card_table();
|
||||||
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code");
|
||||||
|
|
||||||
// Load card table base address.
|
// Load card table base address.
|
||||||
|
|
||||||
|
@ -433,19 +434,19 @@ void InterpreterMacroAssembler::store_check_part1(Register card_table_base) {
|
||||||
rarely accessed area of thread descriptor).
|
rarely accessed area of thread descriptor).
|
||||||
*/
|
*/
|
||||||
// TODO-AARCH64 Investigate if mov_slow is faster than ldr from Rthread on AArch64
|
// TODO-AARCH64 Investigate if mov_slow is faster than ldr from Rthread on AArch64
|
||||||
mov_address(card_table_base, (address)ct->byte_map_base, symbolic_Relocation::card_table_reference);
|
mov_address(card_table_base, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The 2nd part of the store check.
|
// The 2nd part of the store check.
|
||||||
void InterpreterMacroAssembler::store_check_part2(Register obj, Register card_table_base, Register tmp) {
|
void InterpreterMacroAssembler::store_check_part2(Register obj, Register card_table_base, Register tmp) {
|
||||||
assert_different_registers(obj, card_table_base, tmp);
|
assert_different_registers(obj, card_table_base, tmp);
|
||||||
|
|
||||||
assert(CardTableModRefBS::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
|
assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
|
||||||
#ifdef AARCH64
|
#ifdef AARCH64
|
||||||
add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTableModRefBS::card_shift));
|
add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTable::card_shift));
|
||||||
Address card_table_addr(card_table_base);
|
Address card_table_addr(card_table_base);
|
||||||
#else
|
#else
|
||||||
Address card_table_addr(card_table_base, obj, lsr, CardTableModRefBS::card_shift);
|
Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (UseCondCardMark) {
|
if (UseCondCardMark) {
|
||||||
|
@ -472,8 +473,9 @@ void InterpreterMacroAssembler::set_card(Register card_table_base, Address card_
|
||||||
#ifdef AARCH64
|
#ifdef AARCH64
|
||||||
strb(ZR, card_table_addr);
|
strb(ZR, card_table_addr);
|
||||||
#else
|
#else
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
||||||
if ((((uintptr_t)ct->byte_map_base & 0xff) == 0)) {
|
CardTable* ct = ctbs->card_table();
|
||||||
|
if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) {
|
||||||
// Card table is aligned so the lowest byte of the table address base is zero.
|
// Card table is aligned so the lowest byte of the table address base is zero.
|
||||||
// This works only if the code is not saved for later use, possibly
|
// This works only if the code is not saved for later use, possibly
|
||||||
// in a context where the base would no longer be aligned.
|
// in a context where the base would no longer be aligned.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -29,6 +29,7 @@
|
||||||
#include "ci/ciEnv.hpp"
|
#include "ci/ciEnv.hpp"
|
||||||
#include "code/nativeInst.hpp"
|
#include "code/nativeInst.hpp"
|
||||||
#include "compiler/disassembler.hpp"
|
#include "compiler/disassembler.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
#include "gc/shared/cardTableModRefBS.hpp"
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "gc/shared/collectedHeap.inline.hpp"
|
#include "gc/shared/collectedHeap.inline.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
|
@ -43,6 +44,7 @@
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#include "gc/g1/heapRegion.hpp"
|
#include "gc/g1/heapRegion.hpp"
|
||||||
|
@ -2265,7 +2267,8 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
|
||||||
DirtyCardQueue::byte_offset_of_buf()));
|
DirtyCardQueue::byte_offset_of_buf()));
|
||||||
|
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
|
CardTable* ct = ctbs->card_table();
|
||||||
Label done;
|
Label done;
|
||||||
Label runtime;
|
Label runtime;
|
||||||
|
|
||||||
|
@ -2286,18 +2289,18 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
|
||||||
|
|
||||||
// storing region crossing non-NULL, is card already dirty?
|
// storing region crossing non-NULL, is card already dirty?
|
||||||
const Register card_addr = tmp1;
|
const Register card_addr = tmp1;
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||||
|
|
||||||
mov_address(tmp2, (address)ct->byte_map_base, symbolic_Relocation::card_table_reference);
|
mov_address(tmp2, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
|
||||||
add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTableModRefBS::card_shift));
|
add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTable::card_shift));
|
||||||
|
|
||||||
ldrb(tmp2, Address(card_addr));
|
ldrb(tmp2, Address(card_addr));
|
||||||
cmp(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
|
cmp(tmp2, (int)G1CardTable::g1_young_card_val());
|
||||||
b(done, eq);
|
b(done, eq);
|
||||||
|
|
||||||
membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp2);
|
membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp2);
|
||||||
|
|
||||||
assert(CardTableModRefBS::dirty_card_val() == 0, "adjust this code");
|
assert(CardTable::dirty_card_val() == 0, "adjust this code");
|
||||||
ldrb(tmp2, Address(card_addr));
|
ldrb(tmp2, Address(card_addr));
|
||||||
cbz(tmp2, done);
|
cbz(tmp2, done);
|
||||||
|
|
||||||
|
@ -3023,7 +3026,6 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // COMPILER2
|
#endif // COMPILER2
|
||||||
|
|
||||||
// Must preserve condition codes, or C2 encodeKlass_not_null rule
|
// Must preserve condition codes, or C2 encodeKlass_not_null rule
|
||||||
// must be changed.
|
// must be changed.
|
||||||
void MacroAssembler::encode_klass_not_null(Register r) {
|
void MacroAssembler::encode_klass_not_null(Register r) {
|
||||||
|
@ -3261,4 +3263,3 @@ void MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscratch
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif // COMPILER2
|
#endif // COMPILER2
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -25,6 +25,8 @@
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/assembler.hpp"
|
#include "asm/assembler.hpp"
|
||||||
#include "assembler_arm.inline.hpp"
|
#include "assembler_arm.inline.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "nativeInst_arm.hpp"
|
#include "nativeInst_arm.hpp"
|
||||||
#include "oops/instanceOop.hpp"
|
#include "oops/instanceOop.hpp"
|
||||||
|
@ -2907,8 +2909,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ pop(saved_regs | R9ifScratched);
|
__ pop(saved_regs | R9ifScratched);
|
||||||
#endif // AARCH64
|
#endif // AARCH64
|
||||||
}
|
}
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
|
@ -2961,12 +2962,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
#endif // !AARCH64
|
#endif // !AARCH64
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
{
|
{
|
||||||
BLOCK_COMMENT("CardTablePostBarrier");
|
BLOCK_COMMENT("CardTablePostBarrier");
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
CardTable* ct = ctbs->card_table();
|
||||||
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||||
|
|
||||||
Label L_cardtable_loop, L_done;
|
Label L_cardtable_loop, L_done;
|
||||||
|
|
||||||
|
@ -2975,12 +2976,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop);
|
__ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop);
|
||||||
__ sub(count, count, BytesPerHeapOop); // last addr
|
__ sub(count, count, BytesPerHeapOop); // last addr
|
||||||
|
|
||||||
__ logical_shift_right(addr, addr, CardTableModRefBS::card_shift);
|
__ logical_shift_right(addr, addr, CardTable::card_shift);
|
||||||
__ logical_shift_right(count, count, CardTableModRefBS::card_shift);
|
__ logical_shift_right(count, count, CardTable::card_shift);
|
||||||
__ sub(count, count, addr); // nb of cards
|
__ sub(count, count, addr); // nb of cards
|
||||||
|
|
||||||
// warning: Rthread has not been preserved
|
// warning: Rthread has not been preserved
|
||||||
__ mov_address(tmp, (address) ct->byte_map_base, symbolic_Relocation::card_table_reference);
|
__ mov_address(tmp, (address) ct->byte_map_base(), symbolic_Relocation::card_table_reference);
|
||||||
__ add(addr,tmp, addr);
|
__ add(addr,tmp, addr);
|
||||||
|
|
||||||
Register zero = __ zero_register(tmp);
|
Register zero = __ zero_register(tmp);
|
||||||
|
@ -2992,8 +2993,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ BIND(L_done);
|
__ BIND(L_done);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::ModRef:
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -228,8 +228,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
{
|
{
|
||||||
if (is_null) {
|
if (is_null) {
|
||||||
__ store_heap_oop_null(new_val, obj);
|
__ store_heap_oop_null(new_val, obj);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
|
@ -27,6 +27,9 @@
|
||||||
#include "c1/c1_Defs.hpp"
|
#include "c1/c1_Defs.hpp"
|
||||||
#include "c1/c1_MacroAssembler.hpp"
|
#include "c1/c1_MacroAssembler.hpp"
|
||||||
#include "c1/c1_Runtime1.hpp"
|
#include "c1/c1_Runtime1.hpp"
|
||||||
|
#include "ci/ciUtilities.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "nativeInst_ppc.hpp"
|
#include "nativeInst_ppc.hpp"
|
||||||
#include "oops/compiledICHolder.hpp"
|
#include "oops/compiledICHolder.hpp"
|
||||||
|
@ -40,6 +43,7 @@
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#include "vmreg_ppc.inline.hpp"
|
#include "vmreg_ppc.inline.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -795,7 +799,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||||
Register tmp = R0;
|
Register tmp = R0;
|
||||||
Register addr = R14;
|
Register addr = R14;
|
||||||
Register tmp2 = R15;
|
Register tmp2 = R15;
|
||||||
jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
|
jbyte* byte_map_base = ci_card_table_address();
|
||||||
|
|
||||||
Label restart, refill, ret;
|
Label restart, refill, ret;
|
||||||
|
|
||||||
|
@ -803,26 +807,26 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||||
__ std(addr, -8, R1_SP);
|
__ std(addr, -8, R1_SP);
|
||||||
__ std(tmp2, -16, R1_SP);
|
__ std(tmp2, -16, R1_SP);
|
||||||
|
|
||||||
__ srdi(addr, R0, CardTableModRefBS::card_shift); // Addr is passed in R0.
|
__ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.
|
||||||
__ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
|
__ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
|
||||||
__ add(addr, tmp2, addr);
|
__ add(addr, tmp2, addr);
|
||||||
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
|
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
|
||||||
|
|
||||||
// Return if young card.
|
// Return if young card.
|
||||||
__ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::g1_young_card_val());
|
__ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
|
||||||
__ beq(CCR0, ret);
|
__ beq(CCR0, ret);
|
||||||
|
|
||||||
// Return if sequential consistent value is already dirty.
|
// Return if sequential consistent value is already dirty.
|
||||||
__ membar(Assembler::StoreLoad);
|
__ membar(Assembler::StoreLoad);
|
||||||
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
|
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
|
||||||
|
|
||||||
__ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::dirty_card_val());
|
__ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
|
||||||
__ beq(CCR0, ret);
|
__ beq(CCR0, ret);
|
||||||
|
|
||||||
// Not dirty.
|
// Not dirty.
|
||||||
|
|
||||||
// First, dirty it.
|
// First, dirty it.
|
||||||
__ li(tmp, G1SATBCardTableModRefBS::dirty_card_val());
|
__ li(tmp, G1CardTable::dirty_card_val());
|
||||||
__ stb(tmp, 0, addr);
|
__ stb(tmp, 0, addr);
|
||||||
|
|
||||||
int dirty_card_q_index_byte_offset =
|
int dirty_card_q_index_byte_offset =
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
|
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
|
@ -32,6 +32,7 @@
|
||||||
#include "runtime/frame.inline.hpp"
|
#include "runtime/frame.inline.hpp"
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
#include "runtime/javaCalls.hpp"
|
#include "runtime/javaCalls.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
#include "runtime/monitorChunk.hpp"
|
#include "runtime/monitorChunk.hpp"
|
||||||
#include "runtime/signature.hpp"
|
#include "runtime/signature.hpp"
|
||||||
#include "runtime/stubCodeGenerator.hpp"
|
#include "runtime/stubCodeGenerator.hpp"
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/macroAssembler.inline.hpp"
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
#include "compiler/disassembler.hpp"
|
#include "compiler/disassembler.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
#include "gc/shared/cardTableModRefBS.hpp"
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "gc/shared/collectedHeap.inline.hpp"
|
#include "gc/shared/collectedHeap.inline.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
|
@ -43,6 +44,7 @@
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#include "gc/g1/heapRegion.hpp"
|
#include "gc/g1/heapRegion.hpp"
|
||||||
|
@ -3036,20 +3038,20 @@ void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {
|
||||||
void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
|
void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
|
||||||
CardTableModRefBS* bs =
|
CardTableModRefBS* bs =
|
||||||
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
||||||
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
|
||||||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
|
CardTable* ct = bs->card_table();
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
cmpdi(CCR0, Rnew_val, 0);
|
cmpdi(CCR0, Rnew_val, 0);
|
||||||
asm_assert_ne("null oop not allowed", 0x321);
|
asm_assert_ne("null oop not allowed", 0x321);
|
||||||
#endif
|
#endif
|
||||||
card_table_write(bs->byte_map_base, Rtmp, Rstore_addr);
|
card_table_write(ct->byte_map_base(), Rtmp, Rstore_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the card table byte.
|
// Write the card table byte.
|
||||||
void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) {
|
void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) {
|
||||||
assert_different_registers(Robj, Rtmp, R0);
|
assert_different_registers(Robj, Rtmp, R0);
|
||||||
load_const_optimized(Rtmp, (address)byte_map_base, R0);
|
load_const_optimized(Rtmp, (address)byte_map_base, R0);
|
||||||
srdi(Robj, Robj, CardTableModRefBS::card_shift);
|
srdi(Robj, Robj, CardTable::card_shift);
|
||||||
li(R0, 0); // dirty
|
li(R0, 0); // dirty
|
||||||
if (UseConcMarkSweepGC) membar(Assembler::StoreStore);
|
if (UseConcMarkSweepGC) membar(Assembler::StoreStore);
|
||||||
stbx(R0, Rtmp, Robj);
|
stbx(R0, Rtmp, Robj);
|
||||||
|
@ -3171,6 +3173,7 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_v
|
||||||
|
|
||||||
G1SATBCardTableLoggingModRefBS* bs =
|
G1SATBCardTableLoggingModRefBS* bs =
|
||||||
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
|
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
|
||||||
|
CardTable* ct = bs->card_table();
|
||||||
|
|
||||||
// Does store cross heap regions?
|
// Does store cross heap regions?
|
||||||
if (G1RSBarrierRegionFilter) {
|
if (G1RSBarrierRegionFilter) {
|
||||||
|
@ -3187,26 +3190,26 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_v
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Storing region crossing non-NULL, is card already dirty?
|
// Storing region crossing non-NULL, is card already dirty?
|
||||||
assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||||
const Register Rcard_addr = Rtmp1;
|
const Register Rcard_addr = Rtmp1;
|
||||||
Register Rbase = Rtmp2;
|
Register Rbase = Rtmp2;
|
||||||
load_const_optimized(Rbase, (address)bs->byte_map_base, /*temp*/ Rtmp3);
|
load_const_optimized(Rbase, (address)ct->byte_map_base(), /*temp*/ Rtmp3);
|
||||||
|
|
||||||
srdi(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift);
|
srdi(Rcard_addr, Rstore_addr, CardTable::card_shift);
|
||||||
|
|
||||||
// Get the address of the card.
|
// Get the address of the card.
|
||||||
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
|
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
|
||||||
cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val());
|
cmpwi(CCR0, Rtmp3, (int)G1CardTable::g1_young_card_val());
|
||||||
beq(CCR0, filtered);
|
beq(CCR0, filtered);
|
||||||
|
|
||||||
membar(Assembler::StoreLoad);
|
membar(Assembler::StoreLoad);
|
||||||
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); // Reload after membar.
|
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); // Reload after membar.
|
||||||
cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val());
|
cmpwi(CCR0, Rtmp3 /* card value */, CardTable::dirty_card_val());
|
||||||
beq(CCR0, filtered);
|
beq(CCR0, filtered);
|
||||||
|
|
||||||
// Storing a region crossing, non-NULL oop, card is clean.
|
// Storing a region crossing, non-NULL oop, card is clean.
|
||||||
// Dirty card and log.
|
// Dirty card and log.
|
||||||
li(Rtmp3, CardTableModRefBS::dirty_card_val());
|
li(Rtmp3, CardTable::dirty_card_val());
|
||||||
//release(); // G1: oops are allowed to get visible after dirty marking.
|
//release(); // G1: oops are allowed to get visible after dirty marking.
|
||||||
stbx(Rtmp3, Rbase, Rcard_addr);
|
stbx(Rtmp3, Rbase, Rcard_addr);
|
||||||
|
|
||||||
|
|
|
@ -25,6 +25,8 @@
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/macroAssembler.inline.hpp"
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "nativeInst_ppc.hpp"
|
#include "nativeInst_ppc.hpp"
|
||||||
#include "oops/instanceOop.hpp"
|
#include "oops/instanceOop.hpp"
|
||||||
|
@ -667,9 +669,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ bind(filtered);
|
__ bind(filtered);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
case BarrierSet::ModRef:
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
|
@ -703,8 +703,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ restore_LR_CR(R0);
|
__ restore_LR_CR(R0);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
{
|
{
|
||||||
Label Lskip_loop, Lstore_loop;
|
Label Lskip_loop, Lstore_loop;
|
||||||
if (UseConcMarkSweepGC) {
|
if (UseConcMarkSweepGC) {
|
||||||
|
@ -712,19 +711,20 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ release();
|
__ release();
|
||||||
}
|
}
|
||||||
|
|
||||||
CardTableModRefBS* const ct = barrier_set_cast<CardTableModRefBS>(bs);
|
CardTableModRefBS* const ctbs = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
CardTable* const ct = ctbs->card_table();
|
||||||
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||||
assert_different_registers(addr, count, tmp);
|
assert_different_registers(addr, count, tmp);
|
||||||
|
|
||||||
__ sldi(count, count, LogBytesPerHeapOop);
|
__ sldi(count, count, LogBytesPerHeapOop);
|
||||||
__ addi(count, count, -BytesPerHeapOop);
|
__ addi(count, count, -BytesPerHeapOop);
|
||||||
__ add(count, addr, count);
|
__ add(count, addr, count);
|
||||||
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
|
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
|
||||||
__ srdi(addr, addr, CardTableModRefBS::card_shift);
|
__ srdi(addr, addr, CardTable::card_shift);
|
||||||
__ srdi(count, count, CardTableModRefBS::card_shift);
|
__ srdi(count, count, CardTable::card_shift);
|
||||||
__ subf(count, addr, count);
|
__ subf(count, addr, count);
|
||||||
assert_different_registers(R0, addr, count, tmp);
|
assert_different_registers(R0, addr, count, tmp);
|
||||||
__ load_const(tmp, (address)ct->byte_map_base);
|
__ load_const(tmp, (address)ct->byte_map_base());
|
||||||
__ addic_(count, count, 1);
|
__ addic_(count, count, 1);
|
||||||
__ beq(CCR0, Lskip_loop);
|
__ beq(CCR0, Lskip_loop);
|
||||||
__ li(R0, 0);
|
__ li(R0, 0);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2013, 2017 SAP SE. All rights reserved.
|
* Copyright (c) 2013, 2017 SAP SE. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
|
@ -103,8 +103,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
{
|
{
|
||||||
Label Lnull, Ldone;
|
Label Lnull, Ldone;
|
||||||
if (Rval != noreg) {
|
if (Rval != noreg) {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
|
@ -27,6 +27,9 @@
|
||||||
#include "c1/c1_Defs.hpp"
|
#include "c1/c1_Defs.hpp"
|
||||||
#include "c1/c1_MacroAssembler.hpp"
|
#include "c1/c1_MacroAssembler.hpp"
|
||||||
#include "c1/c1_Runtime1.hpp"
|
#include "c1/c1_Runtime1.hpp"
|
||||||
|
#include "ci/ciUtilities.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "nativeInst_s390.hpp"
|
#include "nativeInst_s390.hpp"
|
||||||
#include "oops/compiledICHolder.hpp"
|
#include "oops/compiledICHolder.hpp"
|
||||||
|
@ -40,6 +43,7 @@
|
||||||
#include "vmreg_s390.inline.hpp"
|
#include "vmreg_s390.inline.hpp"
|
||||||
#include "registerSaver_s390.hpp"
|
#include "registerSaver_s390.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -845,7 +849,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||||
Register r1 = Z_R6; // Must be saved/restored.
|
Register r1 = Z_R6; // Must be saved/restored.
|
||||||
Register r2 = Z_R7; // Must be saved/restored.
|
Register r2 = Z_R7; // Must be saved/restored.
|
||||||
Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card.
|
Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card.
|
||||||
jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
|
jbyte* byte_map_base = ci_card_table_address();
|
||||||
|
|
||||||
// Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
|
// Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
|
||||||
__ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
|
__ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
|
||||||
|
@ -854,17 +858,17 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||||
|
|
||||||
// Calculate address of card corresponding to the updated oop slot.
|
// Calculate address of card corresponding to the updated oop slot.
|
||||||
AddressLiteral rs(byte_map_base);
|
AddressLiteral rs(byte_map_base);
|
||||||
__ z_srlg(addr_card, addr_oop, CardTableModRefBS::card_shift);
|
__ z_srlg(addr_card, addr_oop, CardTable::card_shift);
|
||||||
addr_oop = noreg; // dead now
|
addr_oop = noreg; // dead now
|
||||||
__ load_const_optimized(cardtable, rs); // cardtable := <card table base>
|
__ load_const_optimized(cardtable, rs); // cardtable := <card table base>
|
||||||
__ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
|
__ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
|
||||||
|
|
||||||
__ z_cli(0, addr_card, (int)G1SATBCardTableModRefBS::g1_young_card_val());
|
__ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
|
||||||
__ z_bre(young_card);
|
__ z_bre(young_card);
|
||||||
|
|
||||||
__ z_sync(); // Required to support concurrent cleaning.
|
__ z_sync(); // Required to support concurrent cleaning.
|
||||||
|
|
||||||
__ z_cli(0, addr_card, (int)CardTableModRefBS::dirty_card_val());
|
__ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
|
||||||
__ z_brne(not_already_dirty);
|
__ z_brne(not_already_dirty);
|
||||||
|
|
||||||
__ bind(young_card);
|
__ bind(young_card);
|
||||||
|
@ -877,7 +881,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||||
__ bind(not_already_dirty);
|
__ bind(not_already_dirty);
|
||||||
|
|
||||||
// First, dirty it: [addr_card] := 0
|
// First, dirty it: [addr_card] := 0
|
||||||
__ z_mvi(0, addr_card, CardTableModRefBS::dirty_card_val());
|
__ z_mvi(0, addr_card, CardTable::dirty_card_val());
|
||||||
|
|
||||||
Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
|
Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
|
||||||
Register buf = r2;
|
Register buf = r2;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
|
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
|
@ -27,6 +27,7 @@
|
||||||
#include "asm/codeBuffer.hpp"
|
#include "asm/codeBuffer.hpp"
|
||||||
#include "asm/macroAssembler.inline.hpp"
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
#include "compiler/disassembler.hpp"
|
#include "compiler/disassembler.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
#include "gc/shared/collectedHeap.inline.hpp"
|
#include "gc/shared/collectedHeap.inline.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "gc/shared/cardTableModRefBS.hpp"
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
|
@ -50,6 +51,7 @@
|
||||||
#include "utilities/events.hpp"
|
#include "utilities/events.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#include "gc/g1/heapRegion.hpp"
|
#include "gc/g1/heapRegion.hpp"
|
||||||
|
@ -3502,12 +3504,13 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg
|
||||||
|
|
||||||
// Write to card table for modification at store_addr - register is destroyed afterwards.
|
// Write to card table for modification at store_addr - register is destroyed afterwards.
|
||||||
void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) {
|
void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) {
|
||||||
CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
|
CardTable* ct = ctbs->card_table();
|
||||||
|
assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
|
||||||
assert_different_registers(store_addr, tmp);
|
assert_different_registers(store_addr, tmp);
|
||||||
z_srlg(store_addr, store_addr, CardTableModRefBS::card_shift);
|
z_srlg(store_addr, store_addr, CardTable::card_shift);
|
||||||
load_absolute_address(tmp, (address)bs->byte_map_base);
|
load_absolute_address(tmp, (address)ct->byte_map_base());
|
||||||
z_agr(store_addr, tmp);
|
z_agr(store_addr, tmp);
|
||||||
z_mvi(0, store_addr, 0); // Store byte 0.
|
z_mvi(0, store_addr, 0); // Store byte 0.
|
||||||
}
|
}
|
||||||
|
@ -3707,6 +3710,7 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr,
|
||||||
assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
|
assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
|
||||||
|
|
||||||
G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
|
G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
|
||||||
|
CardTable* ct = bs->card_table();
|
||||||
assert(bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
|
assert(bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
|
||||||
|
|
||||||
BLOCK_COMMENT("g1_write_barrier_post {");
|
BLOCK_COMMENT("g1_write_barrier_post {");
|
||||||
|
@ -3733,33 +3737,33 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr,
|
||||||
Rnew_val = noreg; // end of lifetime
|
Rnew_val = noreg; // end of lifetime
|
||||||
|
|
||||||
// Storing region crossing non-NULL, is card already dirty?
|
// Storing region crossing non-NULL, is card already dirty?
|
||||||
assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||||
assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
|
assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
|
||||||
// Make sure not to use Z_R0 for any of these registers.
|
// Make sure not to use Z_R0 for any of these registers.
|
||||||
Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
|
Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
|
||||||
Register Rbase = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3;
|
Register Rbase = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3;
|
||||||
|
|
||||||
// calculate address of card
|
// calculate address of card
|
||||||
load_const_optimized(Rbase, (address)bs->byte_map_base); // Card table base.
|
load_const_optimized(Rbase, (address)ct->byte_map_base()); // Card table base.
|
||||||
z_srlg(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); // Index into card table.
|
z_srlg(Rcard_addr, Rstore_addr, CardTable::card_shift); // Index into card table.
|
||||||
z_algr(Rcard_addr, Rbase); // Explicit calculation needed for cli.
|
z_algr(Rcard_addr, Rbase); // Explicit calculation needed for cli.
|
||||||
Rbase = noreg; // end of lifetime
|
Rbase = noreg; // end of lifetime
|
||||||
|
|
||||||
// Filter young.
|
// Filter young.
|
||||||
assert((unsigned int)G1SATBCardTableModRefBS::g1_young_card_val() <= 255, "otherwise check this code");
|
assert((unsigned int)G1CardTable::g1_young_card_val() <= 255, "otherwise check this code");
|
||||||
z_cli(0, Rcard_addr, (int)G1SATBCardTableModRefBS::g1_young_card_val());
|
z_cli(0, Rcard_addr, (int)G1CardTable::g1_young_card_val());
|
||||||
z_bre(filtered);
|
z_bre(filtered);
|
||||||
|
|
||||||
// Check the card value. If dirty, we're done.
|
// Check the card value. If dirty, we're done.
|
||||||
// This also avoids false sharing of the (already dirty) card.
|
// This also avoids false sharing of the (already dirty) card.
|
||||||
z_sync(); // Required to support concurrent cleaning.
|
z_sync(); // Required to support concurrent cleaning.
|
||||||
assert((unsigned int)CardTableModRefBS::dirty_card_val() <= 255, "otherwise check this code");
|
assert((unsigned int)CardTable::dirty_card_val() <= 255, "otherwise check this code");
|
||||||
z_cli(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); // Reload after membar.
|
z_cli(0, Rcard_addr, CardTable::dirty_card_val()); // Reload after membar.
|
||||||
z_bre(filtered);
|
z_bre(filtered);
|
||||||
|
|
||||||
// Storing a region crossing, non-NULL oop, card is clean.
|
// Storing a region crossing, non-NULL oop, card is clean.
|
||||||
// Dirty card and log.
|
// Dirty card and log.
|
||||||
z_mvi(0, Rcard_addr, CardTableModRefBS::dirty_card_val());
|
z_mvi(0, Rcard_addr, CardTable::dirty_card_val());
|
||||||
|
|
||||||
Register Rcard_addr_x = Rcard_addr;
|
Register Rcard_addr_x = Rcard_addr;
|
||||||
Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1;
|
Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
|
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
|
@ -26,6 +26,8 @@
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/macroAssembler.inline.hpp"
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
#include "registerSaver_s390.hpp"
|
#include "registerSaver_s390.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "interpreter/interp_masm.hpp"
|
#include "interpreter/interp_masm.hpp"
|
||||||
#include "nativeInst_s390.hpp"
|
#include "nativeInst_s390.hpp"
|
||||||
|
@ -722,8 +724,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ bind(filtered);
|
__ bind(filtered);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
case BarrierSet::ModRef:
|
case BarrierSet::ModRef:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -761,14 +762,14 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
// These cases formerly known as
|
// These cases formerly known as
|
||||||
// void array_store_check(Register addr, Register count, bool branchToEnd).
|
// void array_store_check(Register addr, Register count, bool branchToEnd).
|
||||||
{
|
{
|
||||||
NearLabel doXC, done;
|
NearLabel doXC, done;
|
||||||
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
CardTable* ct = ctbs->card_table();
|
||||||
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||||
assert_different_registers(Z_R0, Z_R1, addr, count);
|
assert_different_registers(Z_R0, Z_R1, addr, count);
|
||||||
|
|
||||||
// Nothing to do if count <= 0.
|
// Nothing to do if count <= 0.
|
||||||
|
@ -787,11 +788,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
|
__ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
|
||||||
|
|
||||||
// Get base address of card table.
|
// Get base address of card table.
|
||||||
__ load_const_optimized(Z_R1, (address)ct->byte_map_base);
|
__ load_const_optimized(Z_R1, (address)ct->byte_map_base());
|
||||||
|
|
||||||
// count = (count>>shift) - (addr>>shift)
|
// count = (count>>shift) - (addr>>shift)
|
||||||
__ z_srlg(addr, addr, CardTableModRefBS::card_shift);
|
__ z_srlg(addr, addr, CardTable::card_shift);
|
||||||
__ z_srlg(count, count, CardTableModRefBS::card_shift);
|
__ z_srlg(count, count, CardTable::card_shift);
|
||||||
|
|
||||||
// Prefetch first elements of card table for update.
|
// Prefetch first elements of card table for update.
|
||||||
if (VM_Version::has_Prefetch()) {
|
if (VM_Version::has_Prefetch()) {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
|
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
|
@ -260,8 +260,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
{
|
{
|
||||||
if (val_is_null) {
|
if (val_is_null) {
|
||||||
__ store_heap_oop_null(val, offset, base);
|
__ store_heap_oop_null(val, offset, base);
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
#include "gc/shared/collectedHeap.hpp"
|
#include "gc/shared/collectedHeap.hpp"
|
||||||
#include "nativeInst_sparc.hpp"
|
#include "nativeInst_sparc.hpp"
|
||||||
#include "oops/objArrayKlass.hpp"
|
#include "oops/objArrayKlass.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
#include "runtime/safepointMechanism.inline.hpp"
|
#include "runtime/safepointMechanism.inline.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -26,6 +26,9 @@
|
||||||
#include "c1/c1_Defs.hpp"
|
#include "c1/c1_Defs.hpp"
|
||||||
#include "c1/c1_MacroAssembler.hpp"
|
#include "c1/c1_MacroAssembler.hpp"
|
||||||
#include "c1/c1_Runtime1.hpp"
|
#include "c1/c1_Runtime1.hpp"
|
||||||
|
#include "ci/ciUtilities.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "nativeInst_sparc.hpp"
|
#include "nativeInst_sparc.hpp"
|
||||||
#include "oops/compiledICHolder.hpp"
|
#include "oops/compiledICHolder.hpp"
|
||||||
|
@ -38,6 +41,7 @@
|
||||||
#include "utilities/align.hpp"
|
#include "utilities/align.hpp"
|
||||||
#include "vmreg_sparc.inline.hpp"
|
#include "vmreg_sparc.inline.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -843,22 +847,22 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||||
Register cardtable = G5;
|
Register cardtable = G5;
|
||||||
Register tmp = G1_scratch;
|
Register tmp = G1_scratch;
|
||||||
Register tmp2 = G3_scratch;
|
Register tmp2 = G3_scratch;
|
||||||
jbyte* byte_map_base = barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base;
|
jbyte* byte_map_base = ci_card_table_address();
|
||||||
|
|
||||||
Label not_already_dirty, restart, refill, young_card;
|
Label not_already_dirty, restart, refill, young_card;
|
||||||
|
|
||||||
__ srlx(addr, CardTableModRefBS::card_shift, addr);
|
__ srlx(addr, CardTable::card_shift, addr);
|
||||||
|
|
||||||
AddressLiteral rs(byte_map_base);
|
AddressLiteral rs(byte_map_base);
|
||||||
__ set(rs, cardtable); // cardtable := <card table base>
|
__ set(rs, cardtable); // cardtable := <card table base>
|
||||||
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
|
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
|
||||||
|
|
||||||
__ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
|
__ cmp_and_br_short(tmp, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
|
||||||
|
|
||||||
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
|
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
|
||||||
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
|
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
|
||||||
|
|
||||||
assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
|
assert(CardTable::dirty_card_val() == 0, "otherwise check this code");
|
||||||
__ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
|
__ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
|
||||||
|
|
||||||
__ bind(young_card);
|
__ bind(young_card);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -26,6 +26,7 @@
|
||||||
#include "jvm.h"
|
#include "jvm.h"
|
||||||
#include "asm/macroAssembler.inline.hpp"
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
#include "compiler/disassembler.hpp"
|
#include "compiler/disassembler.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
#include "gc/shared/cardTableModRefBS.hpp"
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "gc/shared/collectedHeap.inline.hpp"
|
#include "gc/shared/collectedHeap.inline.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
|
@ -35,6 +36,7 @@
|
||||||
#include "prims/methodHandles.hpp"
|
#include "prims/methodHandles.hpp"
|
||||||
#include "runtime/biasedLocking.hpp"
|
#include "runtime/biasedLocking.hpp"
|
||||||
#include "runtime/interfaceSupport.hpp"
|
#include "runtime/interfaceSupport.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
#include "runtime/objectMonitor.hpp"
|
#include "runtime/objectMonitor.hpp"
|
||||||
#include "runtime/os.inline.hpp"
|
#include "runtime/os.inline.hpp"
|
||||||
#include "runtime/safepoint.hpp"
|
#include "runtime/safepoint.hpp"
|
||||||
|
@ -44,6 +46,7 @@
|
||||||
#include "utilities/align.hpp"
|
#include "utilities/align.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#include "gc/g1/heapRegion.hpp"
|
#include "gc/g1/heapRegion.hpp"
|
||||||
|
@ -658,7 +661,7 @@ void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index)
|
||||||
|
|
||||||
void MacroAssembler::card_table_write(jbyte* byte_map_base,
|
void MacroAssembler::card_table_write(jbyte* byte_map_base,
|
||||||
Register tmp, Register obj) {
|
Register tmp, Register obj) {
|
||||||
srlx(obj, CardTableModRefBS::card_shift, obj);
|
srlx(obj, CardTable::card_shift, obj);
|
||||||
assert(tmp != obj, "need separate temp reg");
|
assert(tmp != obj, "need separate temp reg");
|
||||||
set((address) byte_map_base, tmp);
|
set((address) byte_map_base, tmp);
|
||||||
stb(G0, tmp, obj);
|
stb(G0, tmp, obj);
|
||||||
|
@ -3574,17 +3577,17 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
|
||||||
|
|
||||||
Label not_already_dirty, restart, refill, young_card;
|
Label not_already_dirty, restart, refill, young_card;
|
||||||
|
|
||||||
__ srlx(O0, CardTableModRefBS::card_shift, O0);
|
__ srlx(O0, CardTable::card_shift, O0);
|
||||||
AddressLiteral addrlit(byte_map_base);
|
AddressLiteral addrlit(byte_map_base);
|
||||||
__ set(addrlit, O1); // O1 := <card table base>
|
__ set(addrlit, O1); // O1 := <card table base>
|
||||||
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
|
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
|
||||||
|
|
||||||
__ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
|
__ cmp_and_br_short(O2, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
|
||||||
|
|
||||||
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
|
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
|
||||||
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
|
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
|
||||||
|
|
||||||
assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
|
assert(CardTable::dirty_card_val() == 0, "otherwise check this code");
|
||||||
__ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
|
__ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
|
||||||
|
|
||||||
__ bind(young_card);
|
__ bind(young_card);
|
||||||
|
@ -3664,6 +3667,7 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val
|
||||||
|
|
||||||
G1SATBCardTableLoggingModRefBS* bs =
|
G1SATBCardTableLoggingModRefBS* bs =
|
||||||
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
|
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
|
||||||
|
CardTable* ct = bs->card_table();
|
||||||
|
|
||||||
if (G1RSBarrierRegionFilter) {
|
if (G1RSBarrierRegionFilter) {
|
||||||
xor3(store_addr, new_val, tmp);
|
xor3(store_addr, new_val, tmp);
|
||||||
|
@ -3704,7 +3708,8 @@ void g1_barrier_stubs_init() {
|
||||||
if (dirty_card_log_enqueue == 0) {
|
if (dirty_card_log_enqueue == 0) {
|
||||||
G1SATBCardTableLoggingModRefBS* bs =
|
G1SATBCardTableLoggingModRefBS* bs =
|
||||||
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(heap->barrier_set());
|
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(heap->barrier_set());
|
||||||
generate_dirty_card_log_enqueue(bs->byte_map_base);
|
CardTable *ct = bs->card_table();
|
||||||
|
generate_dirty_card_log_enqueue(ct->byte_map_base());
|
||||||
assert(dirty_card_log_enqueue != 0, "postcondition.");
|
assert(dirty_card_log_enqueue != 0, "postcondition.");
|
||||||
}
|
}
|
||||||
if (satb_log_enqueue_with_frame == 0) {
|
if (satb_log_enqueue_with_frame == 0) {
|
||||||
|
@ -3726,9 +3731,10 @@ void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_v
|
||||||
if (new_val == G0) return;
|
if (new_val == G0) return;
|
||||||
CardTableModRefBS* bs =
|
CardTableModRefBS* bs =
|
||||||
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
||||||
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
CardTable* ct = bs->card_table();
|
||||||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
|
|
||||||
card_table_write(bs->byte_map_base, tmp, store_addr);
|
assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
|
||||||
|
card_table_write(ct->byte_map_base(), tmp, store_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ((OopHandle)result).resolve();
|
// ((OopHandle)result).resolve();
|
||||||
|
|
|
@ -24,6 +24,8 @@
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/macroAssembler.inline.hpp"
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "nativeInst_sparc.hpp"
|
#include "nativeInst_sparc.hpp"
|
||||||
#include "oops/instanceOop.hpp"
|
#include "oops/instanceOop.hpp"
|
||||||
|
@ -875,9 +877,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp
|
DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
case BarrierSet::ModRef:
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
|
@ -908,11 +908,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ restore();
|
__ restore();
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
{
|
{
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
CardTable* ct = ctbs->card_table();
|
||||||
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||||
assert_different_registers(addr, count, tmp);
|
assert_different_registers(addr, count, tmp);
|
||||||
|
|
||||||
Label L_loop, L_done;
|
Label L_loop, L_done;
|
||||||
|
@ -923,10 +923,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ sub(count, BytesPerHeapOop, count);
|
__ sub(count, BytesPerHeapOop, count);
|
||||||
__ add(count, addr, count);
|
__ add(count, addr, count);
|
||||||
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
|
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
|
||||||
__ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
|
__ srl_ptr(addr, CardTable::card_shift, addr);
|
||||||
__ srl_ptr(count, CardTableModRefBS::card_shift, count);
|
__ srl_ptr(count, CardTable::card_shift, count);
|
||||||
__ sub(count, addr, count);
|
__ sub(count, addr, count);
|
||||||
AddressLiteral rs(ct->byte_map_base);
|
AddressLiteral rs(ct->byte_map_base());
|
||||||
__ set(rs, tmp);
|
__ set(rs, tmp);
|
||||||
__ BIND(L_loop);
|
__ BIND(L_loop);
|
||||||
__ stb(G0, tmp, addr);
|
__ stb(G0, tmp, addr);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -90,8 +90,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
{
|
{
|
||||||
if (index == noreg ) {
|
if (index == noreg ) {
|
||||||
assert(Assembler::is_simm13(offset), "fix this code");
|
assert(Assembler::is_simm13(offset), "fix this code");
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -27,6 +27,9 @@
|
||||||
#include "c1/c1_Defs.hpp"
|
#include "c1/c1_Defs.hpp"
|
||||||
#include "c1/c1_MacroAssembler.hpp"
|
#include "c1/c1_MacroAssembler.hpp"
|
||||||
#include "c1/c1_Runtime1.hpp"
|
#include "c1/c1_Runtime1.hpp"
|
||||||
|
#include "ci/ciUtilities.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "nativeInst_x86.hpp"
|
#include "nativeInst_x86.hpp"
|
||||||
#include "oops/compiledICHolder.hpp"
|
#include "oops/compiledICHolder.hpp"
|
||||||
|
@ -39,6 +42,7 @@
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#include "vmreg_x86.inline.hpp"
|
#include "vmreg_x86.inline.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1632,10 +1636,6 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||||
// arg0: store_address
|
// arg0: store_address
|
||||||
Address store_addr(rbp, 2*BytesPerWord);
|
Address store_addr(rbp, 2*BytesPerWord);
|
||||||
|
|
||||||
CardTableModRefBS* ct =
|
|
||||||
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
|
||||||
|
|
||||||
Label done;
|
Label done;
|
||||||
Label enqueued;
|
Label enqueued;
|
||||||
Label runtime;
|
Label runtime;
|
||||||
|
@ -1657,25 +1657,25 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||||
const Register card_addr = rcx;
|
const Register card_addr = rcx;
|
||||||
|
|
||||||
f.load_argument(0, card_addr);
|
f.load_argument(0, card_addr);
|
||||||
__ shrptr(card_addr, CardTableModRefBS::card_shift);
|
__ shrptr(card_addr, CardTable::card_shift);
|
||||||
// Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
|
// Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
|
||||||
// a valid address and therefore is not properly handled by the relocation code.
|
// a valid address and therefore is not properly handled by the relocation code.
|
||||||
__ movptr(cardtable, (intptr_t)ct->byte_map_base);
|
__ movptr(cardtable, ci_card_table_address_as<intptr_t>());
|
||||||
__ addptr(card_addr, cardtable);
|
__ addptr(card_addr, cardtable);
|
||||||
|
|
||||||
NOT_LP64(__ get_thread(thread);)
|
NOT_LP64(__ get_thread(thread);)
|
||||||
|
|
||||||
__ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
|
__ cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val());
|
||||||
__ jcc(Assembler::equal, done);
|
__ jcc(Assembler::equal, done);
|
||||||
|
|
||||||
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
|
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
|
||||||
__ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
|
__ cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
|
||||||
__ jcc(Assembler::equal, done);
|
__ jcc(Assembler::equal, done);
|
||||||
|
|
||||||
// storing region crossing non-NULL, card is clean.
|
// storing region crossing non-NULL, card is clean.
|
||||||
// dirty card and log.
|
// dirty card and log.
|
||||||
|
|
||||||
__ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
|
__ movb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
|
||||||
|
|
||||||
const Register tmp = rdx;
|
const Register tmp = rdx;
|
||||||
__ push(rdx);
|
__ push(rdx);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -346,8 +346,9 @@ class SlowSignatureHandler
|
||||||
_from -= Interpreter::stackElementSize;
|
_from -= Interpreter::stackElementSize;
|
||||||
|
|
||||||
if (_num_args < Argument::n_float_register_parameters_c-1) {
|
if (_num_args < Argument::n_float_register_parameters_c-1) {
|
||||||
|
assert((_num_args*2) < BitsPerWord, "_num_args*2 is out of range");
|
||||||
*_reg_args++ = from_obj;
|
*_reg_args++ = from_obj;
|
||||||
*_fp_identifiers |= (intptr_t)(0x01 << (_num_args*2)); // mark as float
|
*_fp_identifiers |= ((intptr_t)0x01 << (_num_args*2)); // mark as float
|
||||||
_num_args++;
|
_num_args++;
|
||||||
} else {
|
} else {
|
||||||
*_to++ = from_obj;
|
*_to++ = from_obj;
|
||||||
|
@ -360,8 +361,9 @@ class SlowSignatureHandler
|
||||||
_from -= 2*Interpreter::stackElementSize;
|
_from -= 2*Interpreter::stackElementSize;
|
||||||
|
|
||||||
if (_num_args < Argument::n_float_register_parameters_c-1) {
|
if (_num_args < Argument::n_float_register_parameters_c-1) {
|
||||||
|
assert((_num_args*2) < BitsPerWord, "_num_args*2 is out of range");
|
||||||
*_reg_args++ = from_obj;
|
*_reg_args++ = from_obj;
|
||||||
*_fp_identifiers |= (intptr_t)(0x3 << (_num_args*2)); // mark as double
|
*_fp_identifiers |= ((intptr_t)0x3 << (_num_args*2)); // mark as double
|
||||||
_num_args++;
|
_num_args++;
|
||||||
} else {
|
} else {
|
||||||
*_to++ = from_obj;
|
*_to++ = from_obj;
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include "asm/assembler.hpp"
|
#include "asm/assembler.hpp"
|
||||||
#include "asm/assembler.inline.hpp"
|
#include "asm/assembler.inline.hpp"
|
||||||
#include "compiler/disassembler.hpp"
|
#include "compiler/disassembler.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
#include "gc/shared/cardTableModRefBS.hpp"
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "gc/shared/collectedHeap.inline.hpp"
|
#include "gc/shared/collectedHeap.inline.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
|
@ -45,6 +46,7 @@
|
||||||
#include "runtime/thread.hpp"
|
#include "runtime/thread.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#include "gc/g1/heapRegion.hpp"
|
#include "gc/g1/heapRegion.hpp"
|
||||||
|
@ -5407,9 +5409,10 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
|
||||||
Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
|
Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
|
||||||
DirtyCardQueue::byte_offset_of_buf()));
|
DirtyCardQueue::byte_offset_of_buf()));
|
||||||
|
|
||||||
CardTableModRefBS* ct =
|
CardTableModRefBS* ctbs =
|
||||||
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
CardTable* ct = ctbs->card_table();
|
||||||
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||||
|
|
||||||
Label done;
|
Label done;
|
||||||
Label runtime;
|
Label runtime;
|
||||||
|
@ -5432,24 +5435,24 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
|
||||||
const Register cardtable = tmp2;
|
const Register cardtable = tmp2;
|
||||||
|
|
||||||
movptr(card_addr, store_addr);
|
movptr(card_addr, store_addr);
|
||||||
shrptr(card_addr, CardTableModRefBS::card_shift);
|
shrptr(card_addr, CardTable::card_shift);
|
||||||
// Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
|
// Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
|
||||||
// a valid address and therefore is not properly handled by the relocation code.
|
// a valid address and therefore is not properly handled by the relocation code.
|
||||||
movptr(cardtable, (intptr_t)ct->byte_map_base);
|
movptr(cardtable, (intptr_t)ct->byte_map_base());
|
||||||
addptr(card_addr, cardtable);
|
addptr(card_addr, cardtable);
|
||||||
|
|
||||||
cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
|
cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val());
|
||||||
jcc(Assembler::equal, done);
|
jcc(Assembler::equal, done);
|
||||||
|
|
||||||
membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
|
membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
|
||||||
cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
|
cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
|
||||||
jcc(Assembler::equal, done);
|
jcc(Assembler::equal, done);
|
||||||
|
|
||||||
|
|
||||||
// storing a region crossing, non-NULL oop, card is clean.
|
// storing a region crossing, non-NULL oop, card is clean.
|
||||||
// dirty card and log.
|
// dirty card and log.
|
||||||
|
|
||||||
movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
|
movb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
|
||||||
|
|
||||||
cmpl(queue_index, 0);
|
cmpl(queue_index, 0);
|
||||||
jcc(Assembler::equal, runtime);
|
jcc(Assembler::equal, runtime);
|
||||||
|
@ -5494,14 +5497,14 @@ void MacroAssembler::store_check(Register obj) {
|
||||||
// Does a store check for the oop in register obj. The content of
|
// Does a store check for the oop in register obj. The content of
|
||||||
// register obj is destroyed afterwards.
|
// register obj is destroyed afterwards.
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
assert(bs->kind() == BarrierSet::CardTableForRS ||
|
assert(bs->kind() == BarrierSet::CardTableModRef,
|
||||||
bs->kind() == BarrierSet::CardTableExtension,
|
|
||||||
"Wrong barrier set kind");
|
"Wrong barrier set kind");
|
||||||
|
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
CardTable* ct = ctbs->card_table();
|
||||||
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||||
|
|
||||||
shrptr(obj, CardTableModRefBS::card_shift);
|
shrptr(obj, CardTable::card_shift);
|
||||||
|
|
||||||
Address card_addr;
|
Address card_addr;
|
||||||
|
|
||||||
|
@ -5510,7 +5513,7 @@ void MacroAssembler::store_check(Register obj) {
|
||||||
// So this essentially converts an address to a displacement and it will
|
// So this essentially converts an address to a displacement and it will
|
||||||
// never need to be relocated. On 64bit however the value may be too
|
// never need to be relocated. On 64bit however the value may be too
|
||||||
// large for a 32bit displacement.
|
// large for a 32bit displacement.
|
||||||
intptr_t disp = (intptr_t) ct->byte_map_base;
|
intptr_t disp = (intptr_t) ct->byte_map_base();
|
||||||
if (is_simm32(disp)) {
|
if (is_simm32(disp)) {
|
||||||
card_addr = Address(noreg, obj, Address::times_1, disp);
|
card_addr = Address(noreg, obj, Address::times_1, disp);
|
||||||
} else {
|
} else {
|
||||||
|
@ -5518,12 +5521,12 @@ void MacroAssembler::store_check(Register obj) {
|
||||||
// displacement and done in a single instruction given favorable mapping and a
|
// displacement and done in a single instruction given favorable mapping and a
|
||||||
// smarter version of as_Address. However, 'ExternalAddress' generates a relocation
|
// smarter version of as_Address. However, 'ExternalAddress' generates a relocation
|
||||||
// entry and that entry is not properly handled by the relocation code.
|
// entry and that entry is not properly handled by the relocation code.
|
||||||
AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
|
AddressLiteral cardtable((address)ct->byte_map_base(), relocInfo::none);
|
||||||
Address index(noreg, obj, Address::times_1);
|
Address index(noreg, obj, Address::times_1);
|
||||||
card_addr = as_Address(ArrayAddress(cardtable, index));
|
card_addr = as_Address(ArrayAddress(cardtable, index));
|
||||||
}
|
}
|
||||||
|
|
||||||
int dirty = CardTableModRefBS::dirty_card_val();
|
int dirty = CardTable::dirty_card_val();
|
||||||
if (UseCondCardMark) {
|
if (UseCondCardMark) {
|
||||||
Label L_already_dirty;
|
Label L_already_dirty;
|
||||||
if (UseConcMarkSweepGC) {
|
if (UseConcMarkSweepGC) {
|
||||||
|
|
|
@ -25,6 +25,8 @@
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/macroAssembler.hpp"
|
#include "asm/macroAssembler.hpp"
|
||||||
#include "asm/macroAssembler.inline.hpp"
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "nativeInst_x86.hpp"
|
#include "nativeInst_x86.hpp"
|
||||||
#include "oops/instanceOop.hpp"
|
#include "oops/instanceOop.hpp"
|
||||||
|
@ -705,9 +707,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
case BarrierSet::ModRef:
|
|
||||||
break;
|
break;
|
||||||
default :
|
default :
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
|
@ -739,22 +739,22 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
|
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
{
|
{
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
CardTable* ct = ctbs->card_table();
|
||||||
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||||
|
|
||||||
Label L_loop;
|
Label L_loop;
|
||||||
const Register end = count; // elements count; end == start+count-1
|
const Register end = count; // elements count; end == start+count-1
|
||||||
assert_different_registers(start, end);
|
assert_different_registers(start, end);
|
||||||
|
|
||||||
__ lea(end, Address(start, count, Address::times_ptr, -wordSize));
|
__ lea(end, Address(start, count, Address::times_ptr, -wordSize));
|
||||||
__ shrptr(start, CardTableModRefBS::card_shift);
|
__ shrptr(start, CardTable::card_shift);
|
||||||
__ shrptr(end, CardTableModRefBS::card_shift);
|
__ shrptr(end, CardTable::card_shift);
|
||||||
__ subptr(end, start); // end --> count
|
__ subptr(end, start); // end --> count
|
||||||
__ BIND(L_loop);
|
__ BIND(L_loop);
|
||||||
intptr_t disp = (intptr_t) ct->byte_map_base;
|
intptr_t disp = (intptr_t) ct->byte_map_base();
|
||||||
Address cardtable(start, count, Address::times_1, disp);
|
Address cardtable(start, count, Address::times_1, disp);
|
||||||
__ movb(cardtable, 0);
|
__ movb(cardtable, 0);
|
||||||
__ decrement(count);
|
__ decrement(count);
|
||||||
|
|
|
@ -25,6 +25,9 @@
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/macroAssembler.hpp"
|
#include "asm/macroAssembler.hpp"
|
||||||
#include "asm/macroAssembler.inline.hpp"
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
|
#include "ci/ciUtilities.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "nativeInst_x86.hpp"
|
#include "nativeInst_x86.hpp"
|
||||||
#include "oops/instanceOop.hpp"
|
#include "oops/instanceOop.hpp"
|
||||||
|
@ -1232,9 +1235,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ bind(filtered);
|
__ bind(filtered);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
case BarrierSet::ModRef:
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
|
@ -1272,12 +1273,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
__ popa();
|
__ popa();
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
{
|
{
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
|
||||||
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
|
||||||
|
|
||||||
Label L_loop, L_done;
|
Label L_loop, L_done;
|
||||||
const Register end = count;
|
const Register end = count;
|
||||||
|
|
||||||
|
@ -1286,11 +1283,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||||
|
|
||||||
__ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size
|
__ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size
|
||||||
__ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
|
__ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
|
||||||
__ shrptr(start, CardTableModRefBS::card_shift);
|
__ shrptr(start, CardTable::card_shift);
|
||||||
__ shrptr(end, CardTableModRefBS::card_shift);
|
__ shrptr(end, CardTable::card_shift);
|
||||||
__ subptr(end, start); // end --> cards count
|
__ subptr(end, start); // end --> cards count
|
||||||
|
|
||||||
int64_t disp = (int64_t) ct->byte_map_base;
|
int64_t disp = ci_card_table_address_as<int64_t>();
|
||||||
__ mov64(scratch, disp);
|
__ mov64(scratch, disp);
|
||||||
__ addptr(start, scratch);
|
__ addptr(start, scratch);
|
||||||
__ BIND(L_loop);
|
__ BIND(L_loop);
|
||||||
|
|
|
@ -198,8 +198,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
{
|
{
|
||||||
if (val == noreg) {
|
if (val == noreg) {
|
||||||
__ store_heap_oop_null(obj);
|
__ store_heap_oop_null(obj);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
|
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
|
@ -41,6 +41,7 @@
|
||||||
#include "runtime/deoptimization.hpp"
|
#include "runtime/deoptimization.hpp"
|
||||||
#include "runtime/frame.inline.hpp"
|
#include "runtime/frame.inline.hpp"
|
||||||
#include "runtime/interfaceSupport.hpp"
|
#include "runtime/interfaceSupport.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
#include "runtime/orderAccess.inline.hpp"
|
#include "runtime/orderAccess.inline.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
|
|
|
@ -414,9 +414,9 @@ void OSContainer::init() {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
char * OSContainer::container_type() {
|
const char * OSContainer::container_type() {
|
||||||
if (is_containerized()) {
|
if (is_containerized()) {
|
||||||
return (char *)"cgroupv1";
|
return "cgroupv1";
|
||||||
} else {
|
} else {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,7 +40,7 @@ class OSContainer: AllStatic {
|
||||||
public:
|
public:
|
||||||
static void init();
|
static void init();
|
||||||
static inline bool is_containerized();
|
static inline bool is_containerized();
|
||||||
static char * container_type();
|
static const char * container_type();
|
||||||
|
|
||||||
static jlong memory_limit_in_bytes();
|
static jlong memory_limit_in_bytes();
|
||||||
static jlong memory_and_swap_limit_in_bytes();
|
static jlong memory_and_swap_limit_in_bytes();
|
||||||
|
|
|
@ -177,20 +177,17 @@ julong os::Linux::available_memory() {
|
||||||
|
|
||||||
if (OSContainer::is_containerized()) {
|
if (OSContainer::is_containerized()) {
|
||||||
jlong mem_limit, mem_usage;
|
jlong mem_limit, mem_usage;
|
||||||
if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
|
if ((mem_limit = OSContainer::memory_limit_in_bytes()) < 1) {
|
||||||
if ((mem_usage = OSContainer::memory_usage_in_bytes()) > 0) {
|
log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value",
|
||||||
if (mem_limit > mem_usage) {
|
mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit);
|
||||||
avail_mem = (julong)mem_limit - (julong)mem_usage;
|
}
|
||||||
} else {
|
if (mem_limit > 0 && (mem_usage = OSContainer::memory_usage_in_bytes()) < 1) {
|
||||||
avail_mem = 0;
|
log_debug(os, container)("container memory usage failed: " JLONG_FORMAT ", using host value", mem_usage);
|
||||||
}
|
}
|
||||||
log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);
|
if (mem_limit > 0 && mem_usage > 0 ) {
|
||||||
return avail_mem;
|
avail_mem = mem_limit > mem_usage ? (julong)mem_limit - (julong)mem_usage : 0;
|
||||||
} else {
|
log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);
|
||||||
log_debug(os,container)("container memory usage call failed: " JLONG_FORMAT, mem_usage);
|
return avail_mem;
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log_debug(os,container)("container memory unlimited or failed: " JLONG_FORMAT, mem_limit);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -201,22 +198,18 @@ julong os::Linux::available_memory() {
|
||||||
}
|
}
|
||||||
|
|
||||||
julong os::physical_memory() {
|
julong os::physical_memory() {
|
||||||
|
jlong phys_mem = 0;
|
||||||
if (OSContainer::is_containerized()) {
|
if (OSContainer::is_containerized()) {
|
||||||
jlong mem_limit;
|
jlong mem_limit;
|
||||||
if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
|
if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
|
||||||
log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit);
|
log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit);
|
||||||
return (julong)mem_limit;
|
return phys_mem;
|
||||||
} else {
|
|
||||||
if (mem_limit == OSCONTAINER_ERROR) {
|
|
||||||
log_debug(os,container)("container memory limit call failed");
|
|
||||||
}
|
|
||||||
if (mem_limit == -1) {
|
|
||||||
log_debug(os,container)("container memory unlimited, using host value");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value",
|
||||||
|
mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit);
|
||||||
}
|
}
|
||||||
|
|
||||||
jlong phys_mem = Linux::physical_memory();
|
phys_mem = Linux::physical_memory();
|
||||||
log_trace(os)("total system memory: " JLONG_FORMAT, phys_mem);
|
log_trace(os)("total system memory: " JLONG_FORMAT, phys_mem);
|
||||||
return phys_mem;
|
return phys_mem;
|
||||||
}
|
}
|
||||||
|
@ -2135,63 +2128,54 @@ void os::Linux::print_full_memory_info(outputStream* st) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void os::Linux::print_container_info(outputStream* st) {
|
void os::Linux::print_container_info(outputStream* st) {
|
||||||
if (OSContainer::is_containerized()) {
|
if (!OSContainer::is_containerized()) {
|
||||||
st->print("container (cgroup) information:\n");
|
return;
|
||||||
|
|
||||||
char *p = OSContainer::container_type();
|
|
||||||
if (p == NULL)
|
|
||||||
st->print("container_type() failed\n");
|
|
||||||
else {
|
|
||||||
st->print("container_type: %s\n", p);
|
|
||||||
}
|
|
||||||
|
|
||||||
p = OSContainer::cpu_cpuset_cpus();
|
|
||||||
if (p == NULL)
|
|
||||||
st->print("cpu_cpuset_cpus() failed\n");
|
|
||||||
else {
|
|
||||||
st->print("cpu_cpuset_cpus: %s\n", p);
|
|
||||||
free(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
p = OSContainer::cpu_cpuset_memory_nodes();
|
|
||||||
if (p < 0)
|
|
||||||
st->print("cpu_memory_nodes() failed\n");
|
|
||||||
else {
|
|
||||||
st->print("cpu_memory_nodes: %s\n", p);
|
|
||||||
free(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
int i = OSContainer::active_processor_count();
|
|
||||||
if (i < 0)
|
|
||||||
st->print("active_processor_count() failed\n");
|
|
||||||
else
|
|
||||||
st->print("active_processor_count: %d\n", i);
|
|
||||||
|
|
||||||
i = OSContainer::cpu_quota();
|
|
||||||
st->print("cpu_quota: %d\n", i);
|
|
||||||
|
|
||||||
i = OSContainer::cpu_period();
|
|
||||||
st->print("cpu_period: %d\n", i);
|
|
||||||
|
|
||||||
i = OSContainer::cpu_shares();
|
|
||||||
st->print("cpu_shares: %d\n", i);
|
|
||||||
|
|
||||||
jlong j = OSContainer::memory_limit_in_bytes();
|
|
||||||
st->print("memory_limit_in_bytes: " JLONG_FORMAT "\n", j);
|
|
||||||
|
|
||||||
j = OSContainer::memory_and_swap_limit_in_bytes();
|
|
||||||
st->print("memory_and_swap_limit_in_bytes: " JLONG_FORMAT "\n", j);
|
|
||||||
|
|
||||||
j = OSContainer::memory_soft_limit_in_bytes();
|
|
||||||
st->print("memory_soft_limit_in_bytes: " JLONG_FORMAT "\n", j);
|
|
||||||
|
|
||||||
j = OSContainer::OSContainer::memory_usage_in_bytes();
|
|
||||||
st->print("memory_usage_in_bytes: " JLONG_FORMAT "\n", j);
|
|
||||||
|
|
||||||
j = OSContainer::OSContainer::memory_max_usage_in_bytes();
|
|
||||||
st->print("memory_max_usage_in_bytes: " JLONG_FORMAT "\n", j);
|
|
||||||
st->cr();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
st->print("container (cgroup) information:\n");
|
||||||
|
|
||||||
|
const char *p_ct = OSContainer::container_type();
|
||||||
|
st->print("container_type: %s\n", p_ct != NULL ? p_ct : "failed");
|
||||||
|
|
||||||
|
char *p = OSContainer::cpu_cpuset_cpus();
|
||||||
|
st->print("cpu_cpuset_cpus: %s\n", p != NULL ? p : "failed");
|
||||||
|
free(p);
|
||||||
|
|
||||||
|
p = OSContainer::cpu_cpuset_memory_nodes();
|
||||||
|
st->print("cpu_memory_nodes: %s\n", p != NULL ? p : "failed");
|
||||||
|
free(p);
|
||||||
|
|
||||||
|
int i = OSContainer::active_processor_count();
|
||||||
|
if (i > 0) {
|
||||||
|
st->print("active_processor_count: %d\n", i);
|
||||||
|
} else {
|
||||||
|
st->print("active_processor_count: failed\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
i = OSContainer::cpu_quota();
|
||||||
|
st->print("cpu_quota: %d\n", i);
|
||||||
|
|
||||||
|
i = OSContainer::cpu_period();
|
||||||
|
st->print("cpu_period: %d\n", i);
|
||||||
|
|
||||||
|
i = OSContainer::cpu_shares();
|
||||||
|
st->print("cpu_shares: %d\n", i);
|
||||||
|
|
||||||
|
jlong j = OSContainer::memory_limit_in_bytes();
|
||||||
|
st->print("memory_limit_in_bytes: " JLONG_FORMAT "\n", j);
|
||||||
|
|
||||||
|
j = OSContainer::memory_and_swap_limit_in_bytes();
|
||||||
|
st->print("memory_and_swap_limit_in_bytes: " JLONG_FORMAT "\n", j);
|
||||||
|
|
||||||
|
j = OSContainer::memory_soft_limit_in_bytes();
|
||||||
|
st->print("memory_soft_limit_in_bytes: " JLONG_FORMAT "\n", j);
|
||||||
|
|
||||||
|
j = OSContainer::OSContainer::memory_usage_in_bytes();
|
||||||
|
st->print("memory_usage_in_bytes: " JLONG_FORMAT "\n", j);
|
||||||
|
|
||||||
|
j = OSContainer::OSContainer::memory_max_usage_in_bytes();
|
||||||
|
st->print("memory_max_usage_in_bytes: " JLONG_FORMAT "\n", j);
|
||||||
|
st->cr();
|
||||||
}
|
}
|
||||||
|
|
||||||
void os::print_memory_info(outputStream* st) {
|
void os::print_memory_info(outputStream* st) {
|
||||||
|
@ -3069,10 +3053,12 @@ bool os::pd_uncommit_memory(char* addr, size_t size) {
|
||||||
return res != (uintptr_t) MAP_FAILED;
|
return res != (uintptr_t) MAP_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static address get_stack_commited_bottom(address bottom, size_t size) {
|
// If there is no page mapped/committed, top (bottom + size) is returned
|
||||||
address nbot = bottom;
|
static address get_stack_mapped_bottom(address bottom,
|
||||||
address ntop = bottom + size;
|
size_t size,
|
||||||
|
bool committed_only /* must have backing pages */) {
|
||||||
|
// address used to test if the page is mapped/committed
|
||||||
|
address test_addr = bottom + size;
|
||||||
size_t page_sz = os::vm_page_size();
|
size_t page_sz = os::vm_page_size();
|
||||||
unsigned pages = size / page_sz;
|
unsigned pages = size / page_sz;
|
||||||
|
|
||||||
|
@ -3084,39 +3070,40 @@ static address get_stack_commited_bottom(address bottom, size_t size) {
|
||||||
|
|
||||||
while (imin < imax) {
|
while (imin < imax) {
|
||||||
imid = (imax + imin) / 2;
|
imid = (imax + imin) / 2;
|
||||||
nbot = ntop - (imid * page_sz);
|
test_addr = bottom + (imid * page_sz);
|
||||||
|
|
||||||
// Use a trick with mincore to check whether the page is mapped or not.
|
// Use a trick with mincore to check whether the page is mapped or not.
|
||||||
// mincore sets vec to 1 if page resides in memory and to 0 if page
|
// mincore sets vec to 1 if page resides in memory and to 0 if page
|
||||||
// is swapped output but if page we are asking for is unmapped
|
// is swapped output but if page we are asking for is unmapped
|
||||||
// it returns -1,ENOMEM
|
// it returns -1,ENOMEM
|
||||||
mincore_return_value = mincore(nbot, page_sz, vec);
|
mincore_return_value = mincore(test_addr, page_sz, vec);
|
||||||
|
|
||||||
if (mincore_return_value == -1) {
|
if (mincore_return_value == -1 || (committed_only && (vec[0] & 0x01) == 0)) {
|
||||||
// Page is not mapped go up
|
// Page is not mapped/committed go up
|
||||||
// to find first mapped page
|
// to find first mapped/committed page
|
||||||
if (errno != EAGAIN) {
|
if ((mincore_return_value == -1 && errno != EAGAIN)
|
||||||
assert(errno == ENOMEM, "Unexpected mincore errno");
|
|| (committed_only && (vec[0] & 0x01) == 0)) {
|
||||||
imax = imid;
|
assert(mincore_return_value != -1 || errno == ENOMEM, "Unexpected mincore errno");
|
||||||
|
|
||||||
|
imin = imid + 1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Page is mapped go down
|
// mapped/committed, go down
|
||||||
// to find first not mapped page
|
imax= imid;
|
||||||
imin = imid + 1;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nbot = nbot + page_sz;
|
// Adjust stack bottom one page up if last checked page is not mapped/committed
|
||||||
|
if (mincore_return_value == -1 || (committed_only && (vec[0] & 0x01) == 0)) {
|
||||||
|
assert(mincore_return_value != -1 || (errno != EAGAIN && errno != ENOMEM),
|
||||||
|
"Should not get to here");
|
||||||
|
|
||||||
// Adjust stack bottom one page up if last checked page is not mapped
|
test_addr = test_addr + page_sz;
|
||||||
if (mincore_return_value == -1) {
|
|
||||||
nbot = nbot + page_sz;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nbot;
|
return test_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Linux uses a growable mapping for the stack, and if the mapping for
|
// Linux uses a growable mapping for the stack, and if the mapping for
|
||||||
// the stack guard pages is not removed when we detach a thread the
|
// the stack guard pages is not removed when we detach a thread the
|
||||||
// stack cannot grow beyond the pages where the stack guard was
|
// stack cannot grow beyond the pages where the stack guard was
|
||||||
|
@ -3153,9 +3140,9 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
|
||||||
|
|
||||||
if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
|
if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
|
||||||
// Fallback to slow path on all errors, including EAGAIN
|
// Fallback to slow path on all errors, including EAGAIN
|
||||||
stack_extent = (uintptr_t) get_stack_commited_bottom(
|
stack_extent = (uintptr_t) get_stack_mapped_bottom(os::Linux::initial_thread_stack_bottom(),
|
||||||
os::Linux::initial_thread_stack_bottom(),
|
(size_t)addr - stack_extent,
|
||||||
(size_t)addr - stack_extent);
|
false /* committed_only */);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stack_extent < (uintptr_t)addr) {
|
if (stack_extent < (uintptr_t)addr) {
|
||||||
|
@ -3182,6 +3169,11 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||||
return os::uncommit_memory(addr, size);
|
return os::uncommit_memory(addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t os::committed_stack_size(address bottom, size_t size) {
|
||||||
|
address bot = get_stack_mapped_bottom(bottom, size, true /* committed_only */);
|
||||||
|
return size_t(bottom + size - bot);
|
||||||
|
}
|
||||||
|
|
||||||
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
|
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
|
||||||
// at 'requested_addr'. If there are existing memory mappings at the same
|
// at 'requested_addr'. If there are existing memory mappings at the same
|
||||||
// location, however, they will be overwritten. If 'fixed' is false,
|
// location, however, they will be overwritten. If 'fixed' is false,
|
||||||
|
|
|
@ -331,8 +331,15 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
|
||||||
return aligned_base;
|
return aligned_base;
|
||||||
}
|
}
|
||||||
|
|
||||||
int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
|
int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
|
||||||
return vsnprintf(buf, len, fmt, args);
|
// All supported POSIX platforms provide C99 semantics.
|
||||||
|
int result = ::vsnprintf(buf, len, fmt, args);
|
||||||
|
// If an encoding error occurred (result < 0) then it's not clear
|
||||||
|
// whether the buffer is NUL terminated, so ensure it is.
|
||||||
|
if ((result < 0) && (len > 0)) {
|
||||||
|
buf[len - 1] = '\0';
|
||||||
|
}
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
int os::get_fileno(FILE* fp) {
|
int os::get_fileno(FILE* fp) {
|
||||||
|
|
|
@ -363,6 +363,25 @@ size_t os::current_stack_size() {
|
||||||
return sz;
|
return sz;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t os::committed_stack_size(address bottom, size_t size) {
|
||||||
|
MEMORY_BASIC_INFORMATION minfo;
|
||||||
|
address top = bottom + size;
|
||||||
|
size_t committed_size = 0;
|
||||||
|
|
||||||
|
while (committed_size < size) {
|
||||||
|
// top is exclusive
|
||||||
|
VirtualQuery(top - 1, &minfo, sizeof(minfo));
|
||||||
|
if ((minfo.State & MEM_COMMIT) != 0) {
|
||||||
|
committed_size += minfo.RegionSize;
|
||||||
|
top -= minfo.RegionSize;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return MIN2(committed_size, size);
|
||||||
|
}
|
||||||
|
|
||||||
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
|
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
|
||||||
const struct tm* time_struct_ptr = localtime(clock);
|
const struct tm* time_struct_ptr = localtime(clock);
|
||||||
if (time_struct_ptr != NULL) {
|
if (time_struct_ptr != NULL) {
|
||||||
|
@ -1494,13 +1513,39 @@ void os::get_summary_os_info(char* buf, size_t buflen) {
|
||||||
if (nl != NULL) *nl = '\0';
|
if (nl != NULL) *nl = '\0';
|
||||||
}
|
}
|
||||||
|
|
||||||
int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
|
int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
|
||||||
int ret = vsnprintf(buf, len, fmt, args);
|
#if _MSC_VER >= 1900
|
||||||
// Get the correct buffer size if buf is too small
|
// Starting with Visual Studio 2015, vsnprint is C99 compliant.
|
||||||
if (ret < 0) {
|
int result = ::vsnprintf(buf, len, fmt, args);
|
||||||
return _vscprintf(fmt, args);
|
// If an encoding error occurred (result < 0) then it's not clear
|
||||||
|
// whether the buffer is NUL terminated, so ensure it is.
|
||||||
|
if ((result < 0) && (len > 0)) {
|
||||||
|
buf[len - 1] = '\0';
|
||||||
}
|
}
|
||||||
return ret;
|
return result;
|
||||||
|
#else
|
||||||
|
// Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
|
||||||
|
// _vsnprintf, whose behavior seems to be *mostly* consistent across
|
||||||
|
// versions. However, when len == 0, avoid _vsnprintf too, and just
|
||||||
|
// go straight to _vscprintf. The output is going to be truncated in
|
||||||
|
// that case, except in the unusual case of empty output. More
|
||||||
|
// importantly, the documentation for various versions of Visual Studio
|
||||||
|
// are inconsistent about the behavior of _vsnprintf when len == 0,
|
||||||
|
// including it possibly being an error.
|
||||||
|
int result = -1;
|
||||||
|
if (len > 0) {
|
||||||
|
result = _vsnprintf(buf, len, fmt, args);
|
||||||
|
// If output (including NUL terminator) is truncated, the buffer
|
||||||
|
// won't be NUL terminated. Add the trailing NUL specified by C99.
|
||||||
|
if ((result < 0) || (result >= len)) {
|
||||||
|
buf[len - 1] = '\0';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (result < 0) {
|
||||||
|
result = _vscprintf(fmt, args);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
#endif // _MSC_VER dispatch
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline time_t get_mtime(const char* filename) {
|
static inline time_t get_mtime(const char* filename) {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -24,6 +24,7 @@
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc/shared/barrierSet.inline.hpp"
|
#include "gc/shared/barrierSet.inline.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
#include "gc/shared/cardTableModRefBS.inline.hpp"
|
#include "gc/shared/cardTableModRefBS.inline.hpp"
|
||||||
#include "gc/shared/collectedHeap.hpp"
|
#include "gc/shared/collectedHeap.hpp"
|
||||||
#include "memory/metaspaceShared.hpp"
|
#include "memory/metaspaceShared.hpp"
|
||||||
|
@ -42,7 +43,7 @@ void JavaThread::cache_global_variables() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bs->is_a(BarrierSet::CardTableModRef)) {
|
if (bs->is_a(BarrierSet::CardTableModRef)) {
|
||||||
_card_table_base = (address) (barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base);
|
_card_table_base = (address) (barrier_set_cast<CardTableModRefBS>(bs)->card_table()->byte_map_base());
|
||||||
} else {
|
} else {
|
||||||
_card_table_base = NULL;
|
_card_table_base = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,10 @@
|
||||||
|
|
||||||
#include "aot/aotCodeHeap.hpp"
|
#include "aot/aotCodeHeap.hpp"
|
||||||
#include "aot/aotLoader.hpp"
|
#include "aot/aotLoader.hpp"
|
||||||
|
#include "ci/ciUtilities.hpp"
|
||||||
#include "classfile/javaAssertions.hpp"
|
#include "classfile/javaAssertions.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "gc/g1/heapRegion.hpp"
|
#include "gc/g1/heapRegion.hpp"
|
||||||
#include "gc/shared/gcLocker.hpp"
|
#include "gc/shared/gcLocker.hpp"
|
||||||
#include "interpreter/abstractInterpreter.hpp"
|
#include "interpreter/abstractInterpreter.hpp"
|
||||||
|
@ -539,8 +542,7 @@ void AOTCodeHeap::link_global_lib_symbols() {
|
||||||
_lib_symbols_initialized = true;
|
_lib_symbols_initialized = true;
|
||||||
|
|
||||||
CollectedHeap* heap = Universe::heap();
|
CollectedHeap* heap = Universe::heap();
|
||||||
CardTableModRefBS* ct = (CardTableModRefBS*)(heap->barrier_set());
|
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_card_table_address", address, ci_card_table_address());
|
||||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_card_table_address", address, ct->byte_map_base);
|
|
||||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_top_address", address, (heap->supports_inline_contig_alloc() ? heap->top_addr() : NULL));
|
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_top_address", address, (heap->supports_inline_contig_alloc() ? heap->top_addr() : NULL));
|
||||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_end_address", address, (heap->supports_inline_contig_alloc() ? heap->end_addr() : NULL));
|
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_end_address", address, (heap->supports_inline_contig_alloc() ? heap->end_addr() : NULL));
|
||||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_polling_page", address, os::get_polling_page());
|
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_polling_page", address, os::get_polling_page());
|
||||||
|
|
|
@ -380,7 +380,7 @@ class CodeBuffer: public StackObj {
|
||||||
OopRecorder _default_oop_recorder; // override with initialize_oop_recorder
|
OopRecorder _default_oop_recorder; // override with initialize_oop_recorder
|
||||||
Arena* _overflow_arena;
|
Arena* _overflow_arena;
|
||||||
|
|
||||||
address _last_membar; // used to merge consecutive memory barriers
|
address _last_insn; // used to merge consecutive memory barriers, loads or stores.
|
||||||
|
|
||||||
address _decode_begin; // start address for decode
|
address _decode_begin; // start address for decode
|
||||||
address decode_begin();
|
address decode_begin();
|
||||||
|
@ -395,7 +395,7 @@ class CodeBuffer: public StackObj {
|
||||||
_decode_begin = NULL;
|
_decode_begin = NULL;
|
||||||
_overflow_arena = NULL;
|
_overflow_arena = NULL;
|
||||||
_code_strings = CodeStrings();
|
_code_strings = CodeStrings();
|
||||||
_last_membar = NULL;
|
_last_insn = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void initialize(address code_start, csize_t code_size) {
|
void initialize(address code_start, csize_t code_size) {
|
||||||
|
@ -587,9 +587,9 @@ class CodeBuffer: public StackObj {
|
||||||
OopRecorder* oop_recorder() const { return _oop_recorder; }
|
OopRecorder* oop_recorder() const { return _oop_recorder; }
|
||||||
CodeStrings& strings() { return _code_strings; }
|
CodeStrings& strings() { return _code_strings; }
|
||||||
|
|
||||||
address last_membar() const { return _last_membar; }
|
address last_insn() const { return _last_insn; }
|
||||||
void set_last_membar(address a) { _last_membar = a; }
|
void set_last_insn(address a) { _last_insn = a; }
|
||||||
void clear_last_membar() { set_last_membar(NULL); }
|
void clear_last_insn() { set_last_insn(NULL); }
|
||||||
|
|
||||||
void free_strings() {
|
void free_strings() {
|
||||||
if (!_code_strings.is_null()) {
|
if (!_code_strings.is_null()) {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -33,6 +33,8 @@
|
||||||
#include "ci/ciArrayKlass.hpp"
|
#include "ci/ciArrayKlass.hpp"
|
||||||
#include "ci/ciInstance.hpp"
|
#include "ci/ciInstance.hpp"
|
||||||
#include "ci/ciObjArray.hpp"
|
#include "ci/ciObjArray.hpp"
|
||||||
|
#include "ci/ciUtilities.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
#include "gc/shared/cardTableModRefBS.hpp"
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "runtime/arguments.hpp"
|
#include "runtime/arguments.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
|
@ -1461,11 +1463,7 @@ void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
|
||||||
G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
|
G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
// No pre barriers
|
|
||||||
break;
|
|
||||||
case BarrierSet::ModRef:
|
|
||||||
// No pre barriers
|
// No pre barriers
|
||||||
break;
|
break;
|
||||||
default :
|
default :
|
||||||
|
@ -1481,13 +1479,9 @@ void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
|
||||||
G1SATBCardTableModRef_post_barrier(addr, new_val);
|
G1SATBCardTableModRef_post_barrier(addr, new_val);
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
case BarrierSet::CardTableForRS:
|
case BarrierSet::CardTableModRef:
|
||||||
case BarrierSet::CardTableExtension:
|
|
||||||
CardTableModRef_post_barrier(addr, new_val);
|
CardTableModRef_post_barrier(addr, new_val);
|
||||||
break;
|
break;
|
||||||
case BarrierSet::ModRef:
|
|
||||||
// No post barriers
|
|
||||||
break;
|
|
||||||
default :
|
default :
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
|
@ -1616,9 +1610,7 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
|
void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(_bs);
|
LIR_Const* card_table_base = new LIR_Const(ci_card_table_address());
|
||||||
assert(sizeof(*(ct->byte_map_base)) == sizeof(jbyte), "adjust this code");
|
|
||||||
LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base);
|
|
||||||
if (addr->is_address()) {
|
if (addr->is_address()) {
|
||||||
LIR_Address* address = addr->as_address_ptr();
|
LIR_Address* address = addr->as_address_ptr();
|
||||||
// ptr cannot be an object because we use this barrier for array card marks
|
// ptr cannot be an object because we use this barrier for array card marks
|
||||||
|
@ -1640,9 +1632,9 @@ void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc*
|
||||||
LIR_Opr tmp = new_pointer_register();
|
LIR_Opr tmp = new_pointer_register();
|
||||||
if (TwoOperandLIRForm) {
|
if (TwoOperandLIRForm) {
|
||||||
__ move(addr, tmp);
|
__ move(addr, tmp);
|
||||||
__ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
|
__ unsigned_shift_right(tmp, CardTable::card_shift, tmp);
|
||||||
} else {
|
} else {
|
||||||
__ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
|
__ unsigned_shift_right(addr, CardTable::card_shift, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
LIR_Address* card_addr;
|
LIR_Address* card_addr;
|
||||||
|
@ -1652,7 +1644,7 @@ void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc*
|
||||||
card_addr = new LIR_Address(tmp, load_constant(card_table_base), T_BYTE);
|
card_addr = new LIR_Address(tmp, load_constant(card_table_base), T_BYTE);
|
||||||
}
|
}
|
||||||
|
|
||||||
LIR_Opr dirty = LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val());
|
LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
|
||||||
if (UseCondCardMark) {
|
if (UseCondCardMark) {
|
||||||
LIR_Opr cur_value = new_register(T_INT);
|
LIR_Opr cur_value = new_register(T_INT);
|
||||||
if (UseConcMarkSweepGC) {
|
if (UseConcMarkSweepGC) {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -53,6 +53,7 @@
|
||||||
#include "prims/jvmtiExport.hpp"
|
#include "prims/jvmtiExport.hpp"
|
||||||
#include "runtime/init.hpp"
|
#include "runtime/init.hpp"
|
||||||
#include "runtime/reflection.hpp"
|
#include "runtime/reflection.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "trace/tracing.hpp"
|
#include "trace/tracing.hpp"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -34,6 +34,7 @@
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "oops/fieldStreams.hpp"
|
#include "oops/fieldStreams.hpp"
|
||||||
#include "runtime/fieldDescriptor.hpp"
|
#include "runtime/fieldDescriptor.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
|
|
||||||
// ciInstanceKlass
|
// ciInstanceKlass
|
||||||
//
|
//
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -27,6 +27,7 @@
|
||||||
#include "ci/ciUtilities.hpp"
|
#include "ci/ciUtilities.hpp"
|
||||||
#include "gc/shared/collectedHeap.inline.hpp"
|
#include "gc/shared/collectedHeap.inline.hpp"
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
|
|
||||||
// ciObject
|
// ciObject
|
||||||
//
|
//
|
||||||
|
@ -97,6 +98,14 @@ ciObject::ciObject() {
|
||||||
_klass = NULL;
|
_klass = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------
|
||||||
|
// ciObject::get_oop
|
||||||
|
//
|
||||||
|
// Get the oop of this ciObject.
|
||||||
|
oop ciObject::get_oop() const {
|
||||||
|
return JNIHandles::resolve_non_null(_handle);
|
||||||
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
// ciObject::klass
|
// ciObject::klass
|
||||||
//
|
//
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -67,10 +67,7 @@ protected:
|
||||||
|
|
||||||
jobject handle() const { return _handle; }
|
jobject handle() const { return _handle; }
|
||||||
// Get the VM oop that this object holds.
|
// Get the VM oop that this object holds.
|
||||||
oop get_oop() const {
|
oop get_oop() const;
|
||||||
assert(_handle != NULL, "null oop");
|
|
||||||
return JNIHandles::resolve_non_null(_handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
void init_flags_from(oop x);
|
void init_flags_from(oop x);
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,9 @@
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "ci/ciUtilities.hpp"
|
#include "ci/ciUtilities.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "memory/universe.hpp"
|
||||||
|
|
||||||
// ciUtilities
|
// ciUtilities
|
||||||
//
|
//
|
||||||
|
@ -43,3 +46,13 @@ const char basictype_to_char(BasicType t) {
|
||||||
char c = type2char(t);
|
char c = type2char(t);
|
||||||
return c ? c : 'X';
|
return c ? c : 'X';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------
|
||||||
|
// card_table_base
|
||||||
|
jbyte *ci_card_table_address() {
|
||||||
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
|
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
|
CardTable* ct = ctbs->card_table();
|
||||||
|
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust users of this code");
|
||||||
|
return ct->byte_map_base();
|
||||||
|
}
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
|
|
||||||
#include "ci/ciEnv.hpp"
|
#include "ci/ciEnv.hpp"
|
||||||
#include "runtime/interfaceSupport.hpp"
|
#include "runtime/interfaceSupport.hpp"
|
||||||
|
#include "utilities/globalDefinitions.hpp"
|
||||||
|
|
||||||
// The following routines and definitions are used internally in the
|
// The following routines and definitions are used internally in the
|
||||||
// compiler interface.
|
// compiler interface.
|
||||||
|
@ -114,4 +115,9 @@ inline const char* bool_to_str(bool b) {
|
||||||
const char* basictype_to_str(BasicType t);
|
const char* basictype_to_str(BasicType t);
|
||||||
const char basictype_to_char(BasicType t);
|
const char basictype_to_char(BasicType t);
|
||||||
|
|
||||||
|
jbyte *ci_card_table_address();
|
||||||
|
template <typename T> T ci_card_table_address_as() {
|
||||||
|
return reinterpret_cast<T>(ci_card_table_address());
|
||||||
|
}
|
||||||
|
|
||||||
#endif // SHARE_VM_CI_CIUTILITIES_HPP
|
#endif // SHARE_VM_CI_CIUTILITIES_HPP
|
||||||
|
|
|
@ -53,6 +53,7 @@
|
||||||
#include "runtime/interfaceSupport.hpp"
|
#include "runtime/interfaceSupport.hpp"
|
||||||
#include "runtime/java.hpp"
|
#include "runtime/java.hpp"
|
||||||
#include "runtime/javaCalls.hpp"
|
#include "runtime/javaCalls.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
#include "runtime/safepoint.hpp"
|
#include "runtime/safepoint.hpp"
|
||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "runtime/vframe.hpp"
|
#include "runtime/vframe.hpp"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -44,6 +44,7 @@
|
||||||
#include "runtime/arguments.hpp"
|
#include "runtime/arguments.hpp"
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
#include "runtime/javaCalls.hpp"
|
#include "runtime/javaCalls.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
#include "runtime/reflection.hpp"
|
#include "runtime/reflection.hpp"
|
||||||
#include "utilities/stringUtils.hpp"
|
#include "utilities/stringUtils.hpp"
|
||||||
#include "utilities/utf8.hpp"
|
#include "utilities/utf8.hpp"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -45,6 +45,7 @@
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
#include "runtime/interfaceSupport.hpp"
|
#include "runtime/interfaceSupport.hpp"
|
||||||
#include "runtime/javaCalls.hpp"
|
#include "runtime/javaCalls.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
#include "runtime/orderAccess.inline.hpp"
|
#include "runtime/orderAccess.inline.hpp"
|
||||||
#include "runtime/os.hpp"
|
#include "runtime/os.hpp"
|
||||||
#include "runtime/thread.hpp"
|
#include "runtime/thread.hpp"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -29,6 +29,7 @@
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
#include "runtime/interfaceSupport.hpp"
|
#include "runtime/interfaceSupport.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
#include "runtime/thread.hpp"
|
#include "runtime/thread.hpp"
|
||||||
|
|
||||||
// Constructors
|
// Constructors
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -37,6 +37,7 @@
|
||||||
#include "oops/objArrayKlass.hpp"
|
#include "oops/objArrayKlass.hpp"
|
||||||
#include "runtime/handles.hpp"
|
#include "runtime/handles.hpp"
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "utilities/copy.hpp"
|
#include "utilities/copy.hpp"
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,7 @@
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "prims/jvmtiImpl.hpp"
|
#include "prims/jvmtiImpl.hpp"
|
||||||
#include "runtime/atomic.hpp"
|
#include "runtime/atomic.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
#include "runtime/orderAccess.inline.hpp"
|
#include "runtime/orderAccess.inline.hpp"
|
||||||
#include "runtime/os.hpp"
|
#include "runtime/os.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include "code/oopRecorder.hpp"
|
#include "code/oopRecorder.hpp"
|
||||||
#include "memory/allocation.inline.hpp"
|
#include "memory/allocation.inline.hpp"
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
template <class T> int ValueRecorder<T>::_find_index_calls = 0;
|
template <class T> int ValueRecorder<T>::_find_index_calls = 0;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -26,6 +26,7 @@
|
||||||
#include "code/codeCache.hpp"
|
#include "code/codeCache.hpp"
|
||||||
#include "code/relocInfo.hpp"
|
#include "code/relocInfo.hpp"
|
||||||
#include "code/relocInfo_ext.hpp"
|
#include "code/relocInfo_ext.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
#include "gc/shared/cardTableModRefBS.hpp"
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "gc/shared/collectedHeap.hpp"
|
#include "gc/shared/collectedHeap.hpp"
|
||||||
#include "memory/universe.hpp"
|
#include "memory/universe.hpp"
|
||||||
|
@ -59,8 +60,9 @@ address symbolic_Relocation::symbolic_value(symbolic_Relocation::symbolic_refere
|
||||||
}
|
}
|
||||||
case symbolic_Relocation::card_table_reference: {
|
case symbolic_Relocation::card_table_reference: {
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
|
||||||
return (address)ct->byte_map_base;
|
CardTable* ct = ctbs->card_table();
|
||||||
|
return (address)ct->byte_map_base();
|
||||||
}
|
}
|
||||||
case symbolic_Relocation::mark_bits_reference: {
|
case symbolic_Relocation::mark_bits_reference: {
|
||||||
return (address)Universe::verify_mark_bits();
|
return (address)Universe::verify_mark_bits();
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -23,9 +23,11 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
|
#include "ci/ciUtilities.hpp"
|
||||||
#include "classfile/javaClasses.hpp"
|
#include "classfile/javaClasses.hpp"
|
||||||
#include "code/codeCache.hpp"
|
#include "code/codeCache.hpp"
|
||||||
#include "compiler/disassembler.hpp"
|
#include "compiler/disassembler.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
#include "gc/shared/cardTableModRefBS.hpp"
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "gc/shared/collectedHeap.hpp"
|
#include "gc/shared/collectedHeap.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
|
@ -318,7 +320,7 @@ void decode_env::print_address(address adr) {
|
||||||
|
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||||
if (bs->is_a(BarrierSet::CardTableModRef) &&
|
if (bs->is_a(BarrierSet::CardTableModRef) &&
|
||||||
adr == (address)(barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base)) {
|
adr == ci_card_table_address_as<address>()) {
|
||||||
st->print("word_map_base");
|
st->print("word_map_base");
|
||||||
if (WizardMode) st->print(" " INTPTR_FORMAT, p2i(adr));
|
if (WizardMode) st->print(" " INTPTR_FORMAT, p2i(adr));
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -88,9 +88,9 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, M
|
||||||
_parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
|
_parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
|
||||||
"CompactibleFreeListSpace._dict_par_lock", true,
|
"CompactibleFreeListSpace._dict_par_lock", true,
|
||||||
Monitor::_safepoint_check_never),
|
Monitor::_safepoint_check_never),
|
||||||
_rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
|
_rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
|
||||||
CMSRescanMultiple),
|
CMSRescanMultiple),
|
||||||
_marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
|
_marking_task_size(CardTable::card_size_in_words * BitsPerWord *
|
||||||
CMSConcMarkMultiple),
|
CMSConcMarkMultiple),
|
||||||
_collector(NULL),
|
_collector(NULL),
|
||||||
_preconsumptionDirtyCardClosure(NULL)
|
_preconsumptionDirtyCardClosure(NULL)
|
||||||
|
@ -609,7 +609,7 @@ public:
|
||||||
FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
|
FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
|
||||||
CMSCollector* collector,
|
CMSCollector* collector,
|
||||||
ExtendedOopClosure* cl,
|
ExtendedOopClosure* cl,
|
||||||
CardTableModRefBS::PrecisionStyle precision,
|
CardTable::PrecisionStyle precision,
|
||||||
HeapWord* boundary,
|
HeapWord* boundary,
|
||||||
bool parallel) :
|
bool parallel) :
|
||||||
FilteringDCTOC(sp, cl, precision, boundary),
|
FilteringDCTOC(sp, cl, precision, boundary),
|
||||||
|
@ -693,7 +693,7 @@ FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
|
||||||
|
|
||||||
DirtyCardToOopClosure*
|
DirtyCardToOopClosure*
|
||||||
CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
|
CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
|
||||||
CardTableModRefBS::PrecisionStyle precision,
|
CardTable::PrecisionStyle precision,
|
||||||
HeapWord* boundary,
|
HeapWord* boundary,
|
||||||
bool parallel) {
|
bool parallel) {
|
||||||
return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
|
return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
|
||||||
|
@ -2828,7 +2828,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
|
||||||
}
|
}
|
||||||
|
|
||||||
const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
|
const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
|
||||||
const size_t ergo_max = _old_gen->reserved().word_size() / (CardTableModRefBS::card_size_in_words * BitsPerWord);
|
const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
|
||||||
return ergo_max;
|
return ergo_max;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2865,15 +2865,15 @@ initialize_sequential_subtasks_for_marking(int n_threads,
|
||||||
// The "size" of each task is fixed according to rescan_task_size.
|
// The "size" of each task is fixed according to rescan_task_size.
|
||||||
assert(n_threads > 0, "Unexpected n_threads argument");
|
assert(n_threads > 0, "Unexpected n_threads argument");
|
||||||
const size_t task_size = marking_task_size();
|
const size_t task_size = marking_task_size();
|
||||||
assert(task_size > CardTableModRefBS::card_size_in_words &&
|
assert(task_size > CardTable::card_size_in_words &&
|
||||||
(task_size % CardTableModRefBS::card_size_in_words == 0),
|
(task_size % CardTable::card_size_in_words == 0),
|
||||||
"Otherwise arithmetic below would be incorrect");
|
"Otherwise arithmetic below would be incorrect");
|
||||||
MemRegion span = _old_gen->reserved();
|
MemRegion span = _old_gen->reserved();
|
||||||
if (low != NULL) {
|
if (low != NULL) {
|
||||||
if (span.contains(low)) {
|
if (span.contains(low)) {
|
||||||
// Align low down to a card boundary so that
|
// Align low down to a card boundary so that
|
||||||
// we can use block_offset_careful() on span boundaries.
|
// we can use block_offset_careful() on span boundaries.
|
||||||
HeapWord* aligned_low = align_down(low, CardTableModRefBS::card_size);
|
HeapWord* aligned_low = align_down(low, CardTable::card_size);
|
||||||
// Clip span prefix at aligned_low
|
// Clip span prefix at aligned_low
|
||||||
span = span.intersection(MemRegion(aligned_low, span.end()));
|
span = span.intersection(MemRegion(aligned_low, span.end()));
|
||||||
} else if (low > span.end()) {
|
} else if (low > span.end()) {
|
||||||
|
@ -2881,7 +2881,7 @@ initialize_sequential_subtasks_for_marking(int n_threads,
|
||||||
} // else use entire span
|
} // else use entire span
|
||||||
}
|
}
|
||||||
assert(span.is_empty() ||
|
assert(span.is_empty() ||
|
||||||
((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
|
((uintptr_t)span.start() % CardTable::card_size == 0),
|
||||||
"span should start at a card boundary");
|
"span should start at a card boundary");
|
||||||
size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
|
size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
|
||||||
assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
|
assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -28,6 +28,7 @@
|
||||||
#include "gc/cms/adaptiveFreeList.hpp"
|
#include "gc/cms/adaptiveFreeList.hpp"
|
||||||
#include "gc/cms/promotionInfo.hpp"
|
#include "gc/cms/promotionInfo.hpp"
|
||||||
#include "gc/shared/blockOffsetTable.hpp"
|
#include "gc/shared/blockOffsetTable.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
#include "gc/shared/space.hpp"
|
#include "gc/shared/space.hpp"
|
||||||
#include "logging/log.hpp"
|
#include "logging/log.hpp"
|
||||||
#include "memory/binaryTreeDictionary.hpp"
|
#include "memory/binaryTreeDictionary.hpp"
|
||||||
|
@ -432,7 +433,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
|
|
||||||
// Override: provides a DCTO_CL specific to this kind of space.
|
// Override: provides a DCTO_CL specific to this kind of space.
|
||||||
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
|
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
|
||||||
CardTableModRefBS::PrecisionStyle precision,
|
CardTable::PrecisionStyle precision,
|
||||||
HeapWord* boundary,
|
HeapWord* boundary,
|
||||||
bool parallel);
|
bool parallel);
|
||||||
|
|
||||||
|
|
|
@ -448,7 +448,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||||
_start_sampling(false),
|
_start_sampling(false),
|
||||||
_between_prologue_and_epilogue(false),
|
_between_prologue_and_epilogue(false),
|
||||||
_markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
|
_markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
|
||||||
_modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
|
_modUnionTable((CardTable::card_shift - LogHeapWordSize),
|
||||||
-1 /* lock-free */, "No_lock" /* dummy */),
|
-1 /* lock-free */, "No_lock" /* dummy */),
|
||||||
_modUnionClosurePar(&_modUnionTable),
|
_modUnionClosurePar(&_modUnionTable),
|
||||||
// Adjust my span to cover old (cms) gen
|
// Adjust my span to cover old (cms) gen
|
||||||
|
@ -900,7 +900,7 @@ void CMSCollector::promoted(bool par, HeapWord* start,
|
||||||
// card size.
|
// card size.
|
||||||
MemRegion mr(start,
|
MemRegion mr(start,
|
||||||
align_up(start + obj_size,
|
align_up(start + obj_size,
|
||||||
CardTableModRefBS::card_size /* bytes */));
|
CardTable::card_size /* bytes */));
|
||||||
if (par) {
|
if (par) {
|
||||||
_modUnionTable.par_mark_range(mr);
|
_modUnionTable.par_mark_range(mr);
|
||||||
} else {
|
} else {
|
||||||
|
@ -3223,7 +3223,7 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
|
||||||
if (sp->used_region().contains(_restart_addr)) {
|
if (sp->used_region().contains(_restart_addr)) {
|
||||||
// Align down to a card boundary for the start of 0th task
|
// Align down to a card boundary for the start of 0th task
|
||||||
// for this space.
|
// for this space.
|
||||||
aligned_start = align_down(_restart_addr, CardTableModRefBS::card_size);
|
aligned_start = align_down(_restart_addr, CardTable::card_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t chunk_size = sp->marking_task_size();
|
size_t chunk_size = sp->marking_task_size();
|
||||||
|
@ -4026,17 +4026,16 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
|
||||||
startTimer();
|
startTimer();
|
||||||
sample_eden();
|
sample_eden();
|
||||||
// Get and clear dirty region from card table
|
// Get and clear dirty region from card table
|
||||||
dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
|
dirtyRegion = _ct->dirty_card_range_after_reset(MemRegion(nextAddr, endAddr),
|
||||||
MemRegion(nextAddr, endAddr),
|
true,
|
||||||
true,
|
CardTable::precleaned_card_val());
|
||||||
CardTableModRefBS::precleaned_card_val());
|
|
||||||
|
|
||||||
assert(dirtyRegion.start() >= nextAddr,
|
assert(dirtyRegion.start() >= nextAddr,
|
||||||
"returned region inconsistent?");
|
"returned region inconsistent?");
|
||||||
}
|
}
|
||||||
lastAddr = dirtyRegion.end();
|
lastAddr = dirtyRegion.end();
|
||||||
numDirtyCards =
|
numDirtyCards =
|
||||||
dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
|
dirtyRegion.word_size()/CardTable::card_size_in_words;
|
||||||
|
|
||||||
if (!dirtyRegion.is_empty()) {
|
if (!dirtyRegion.is_empty()) {
|
||||||
stopTimer();
|
stopTimer();
|
||||||
|
@ -4050,7 +4049,7 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
|
||||||
if (stop_point != NULL) {
|
if (stop_point != NULL) {
|
||||||
assert((_collectorState == AbortablePreclean && should_abort_preclean()),
|
assert((_collectorState == AbortablePreclean && should_abort_preclean()),
|
||||||
"Should only be AbortablePreclean.");
|
"Should only be AbortablePreclean.");
|
||||||
_ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
|
_ct->invalidate(MemRegion(stop_point, dirtyRegion.end()));
|
||||||
if (should_abort_preclean()) {
|
if (should_abort_preclean()) {
|
||||||
break; // out of preclean loop
|
break; // out of preclean loop
|
||||||
} else {
|
} else {
|
||||||
|
@ -4577,7 +4576,7 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
|
||||||
SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
|
SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
|
||||||
assert(pst->valid(), "Uninitialized use?");
|
assert(pst->valid(), "Uninitialized use?");
|
||||||
uint nth_task = 0;
|
uint nth_task = 0;
|
||||||
const int alignment = CardTableModRefBS::card_size * BitsPerWord;
|
const int alignment = CardTable::card_size * BitsPerWord;
|
||||||
MemRegion span = sp->used_region();
|
MemRegion span = sp->used_region();
|
||||||
HeapWord* start_addr = span.start();
|
HeapWord* start_addr = span.start();
|
||||||
HeapWord* end_addr = align_up(span.end(), alignment);
|
HeapWord* end_addr = align_up(span.end(), alignment);
|
||||||
|
@ -4603,7 +4602,7 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
|
||||||
// precleaned, and setting the corresponding bits in the mod union
|
// precleaned, and setting the corresponding bits in the mod union
|
||||||
// table. Since we have been careful to partition at Card and MUT-word
|
// table. Since we have been careful to partition at Card and MUT-word
|
||||||
// boundaries no synchronization is needed between parallel threads.
|
// boundaries no synchronization is needed between parallel threads.
|
||||||
_collector->_ct->ct_bs()->dirty_card_iterate(this_span,
|
_collector->_ct->dirty_card_iterate(this_span,
|
||||||
&modUnionClosure);
|
&modUnionClosure);
|
||||||
|
|
||||||
// Having transferred these marks into the modUnionTable,
|
// Having transferred these marks into the modUnionTable,
|
||||||
|
@ -4914,16 +4913,14 @@ void CMSCollector::do_remark_non_parallel() {
|
||||||
// mod union table.
|
// mod union table.
|
||||||
{
|
{
|
||||||
ModUnionClosure modUnionClosure(&_modUnionTable);
|
ModUnionClosure modUnionClosure(&_modUnionTable);
|
||||||
_ct->ct_bs()->dirty_card_iterate(
|
_ct->dirty_card_iterate(_cmsGen->used_region(),
|
||||||
_cmsGen->used_region(),
|
&modUnionClosure);
|
||||||
&modUnionClosure);
|
|
||||||
}
|
}
|
||||||
// Having transferred these marks into the modUnionTable, we just need
|
// Having transferred these marks into the modUnionTable, we just need
|
||||||
// to rescan the marked objects on the dirty cards in the modUnionTable.
|
// to rescan the marked objects on the dirty cards in the modUnionTable.
|
||||||
// The initial marking may have been done during an asynchronous
|
// The initial marking may have been done during an asynchronous
|
||||||
// collection so there may be dirty bits in the mod-union table.
|
// collection so there may be dirty bits in the mod-union table.
|
||||||
const int alignment =
|
const int alignment = CardTable::card_size * BitsPerWord;
|
||||||
CardTableModRefBS::card_size * BitsPerWord;
|
|
||||||
{
|
{
|
||||||
// ... First handle dirty cards in CMS gen
|
// ... First handle dirty cards in CMS gen
|
||||||
markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
|
markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
|
||||||
|
@ -5633,9 +5630,9 @@ HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
|
||||||
}
|
}
|
||||||
assert(sz > 0, "size must be nonzero");
|
assert(sz > 0, "size must be nonzero");
|
||||||
HeapWord* next_block = addr + sz;
|
HeapWord* next_block = addr + sz;
|
||||||
HeapWord* next_card = align_up(next_block, CardTableModRefBS::card_size);
|
HeapWord* next_card = align_up(next_block, CardTable::card_size);
|
||||||
assert(align_down((uintptr_t)addr, CardTableModRefBS::card_size) <
|
assert(align_down((uintptr_t)addr, CardTable::card_size) <
|
||||||
align_down((uintptr_t)next_card, CardTableModRefBS::card_size),
|
align_down((uintptr_t)next_card, CardTable::card_size),
|
||||||
"must be different cards");
|
"must be different cards");
|
||||||
return next_card;
|
return next_card;
|
||||||
}
|
}
|
||||||
|
@ -6294,7 +6291,7 @@ void MarkFromRootsClosure::reset(HeapWord* addr) {
|
||||||
assert(_markStack->isEmpty(), "would cause duplicates on stack");
|
assert(_markStack->isEmpty(), "would cause duplicates on stack");
|
||||||
assert(_span.contains(addr), "Out of bounds _finger?");
|
assert(_span.contains(addr), "Out of bounds _finger?");
|
||||||
_finger = addr;
|
_finger = addr;
|
||||||
_threshold = align_up(_finger, CardTableModRefBS::card_size);
|
_threshold = align_up(_finger, CardTable::card_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Should revisit to see if this should be restructured for
|
// Should revisit to see if this should be restructured for
|
||||||
|
@ -6321,7 +6318,7 @@ bool MarkFromRootsClosure::do_bit(size_t offset) {
|
||||||
// during the preclean or remark phase. (CMSCleanOnEnter)
|
// during the preclean or remark phase. (CMSCleanOnEnter)
|
||||||
if (CMSCleanOnEnter) {
|
if (CMSCleanOnEnter) {
|
||||||
size_t sz = _collector->block_size_using_printezis_bits(addr);
|
size_t sz = _collector->block_size_using_printezis_bits(addr);
|
||||||
HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
|
HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
|
||||||
MemRegion redirty_range = MemRegion(addr, end_card_addr);
|
MemRegion redirty_range = MemRegion(addr, end_card_addr);
|
||||||
assert(!redirty_range.is_empty(), "Arithmetical tautology");
|
assert(!redirty_range.is_empty(), "Arithmetical tautology");
|
||||||
// Bump _threshold to end_card_addr; note that
|
// Bump _threshold to end_card_addr; note that
|
||||||
|
@ -6408,9 +6405,9 @@ void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
|
||||||
// _threshold is always kept card-aligned but _finger isn't
|
// _threshold is always kept card-aligned but _finger isn't
|
||||||
// always card-aligned.
|
// always card-aligned.
|
||||||
HeapWord* old_threshold = _threshold;
|
HeapWord* old_threshold = _threshold;
|
||||||
assert(is_aligned(old_threshold, CardTableModRefBS::card_size),
|
assert(is_aligned(old_threshold, CardTable::card_size),
|
||||||
"_threshold should always be card-aligned");
|
"_threshold should always be card-aligned");
|
||||||
_threshold = align_up(_finger, CardTableModRefBS::card_size);
|
_threshold = align_up(_finger, CardTable::card_size);
|
||||||
MemRegion mr(old_threshold, _threshold);
|
MemRegion mr(old_threshold, _threshold);
|
||||||
assert(!mr.is_empty(), "Control point invariant");
|
assert(!mr.is_empty(), "Control point invariant");
|
||||||
assert(_span.contains(mr), "Should clear within span");
|
assert(_span.contains(mr), "Should clear within span");
|
||||||
|
@ -6520,9 +6517,9 @@ void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
|
||||||
// _threshold is always kept card-aligned but _finger isn't
|
// _threshold is always kept card-aligned but _finger isn't
|
||||||
// always card-aligned.
|
// always card-aligned.
|
||||||
HeapWord* old_threshold = _threshold;
|
HeapWord* old_threshold = _threshold;
|
||||||
assert(is_aligned(old_threshold, CardTableModRefBS::card_size),
|
assert(is_aligned(old_threshold, CardTable::card_size),
|
||||||
"_threshold should always be card-aligned");
|
"_threshold should always be card-aligned");
|
||||||
_threshold = align_up(_finger, CardTableModRefBS::card_size);
|
_threshold = align_up(_finger, CardTable::card_size);
|
||||||
MemRegion mr(old_threshold, _threshold);
|
MemRegion mr(old_threshold, _threshold);
|
||||||
assert(!mr.is_empty(), "Control point invariant");
|
assert(!mr.is_empty(), "Control point invariant");
|
||||||
assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
|
assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
|
||||||
|
@ -6890,7 +6887,7 @@ void PushAndMarkClosure::do_oop(oop obj) {
|
||||||
// are required.
|
// are required.
|
||||||
if (obj->is_objArray()) {
|
if (obj->is_objArray()) {
|
||||||
size_t sz = obj->size();
|
size_t sz = obj->size();
|
||||||
HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
|
HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
|
||||||
MemRegion redirty_range = MemRegion(addr, end_card_addr);
|
MemRegion redirty_range = MemRegion(addr, end_card_addr);
|
||||||
assert(!redirty_range.is_empty(), "Arithmetical tautology");
|
assert(!redirty_range.is_empty(), "Arithmetical tautology");
|
||||||
_mod_union_table->mark_range(redirty_range);
|
_mod_union_table->mark_range(redirty_range);
|
||||||
|
@ -7003,15 +7000,15 @@ bool CMSPrecleanRefsYieldClosure::should_return() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
|
void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
|
||||||
assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
|
assert(((size_t)mr.start())%CardTable::card_size_in_words == 0,
|
||||||
"mr should be aligned to start at a card boundary");
|
"mr should be aligned to start at a card boundary");
|
||||||
// We'd like to assert:
|
// We'd like to assert:
|
||||||
// assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
|
// assert(mr.word_size()%CardTable::card_size_in_words == 0,
|
||||||
// "mr should be a range of cards");
|
// "mr should be a range of cards");
|
||||||
// However, that would be too strong in one case -- the last
|
// However, that would be too strong in one case -- the last
|
||||||
// partition ends at _unallocated_block which, in general, can be
|
// partition ends at _unallocated_block which, in general, can be
|
||||||
// an arbitrary boundary, not necessarily card aligned.
|
// an arbitrary boundary, not necessarily card aligned.
|
||||||
_num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words;
|
_num_dirty_cards += mr.word_size()/CardTable::card_size_in_words;
|
||||||
_space->object_iterate_mem(mr, &_scan_cl);
|
_space->object_iterate_mem(mr, &_scan_cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7620,7 +7617,7 @@ void CMSKeepAliveClosure::do_oop(oop obj) {
|
||||||
// table.
|
// table.
|
||||||
if (obj->is_objArray()) {
|
if (obj->is_objArray()) {
|
||||||
size_t sz = obj->size();
|
size_t sz = obj->size();
|
||||||
HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
|
HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
|
||||||
MemRegion redirty_range = MemRegion(addr, end_card_addr);
|
MemRegion redirty_range = MemRegion(addr, end_card_addr);
|
||||||
assert(!redirty_range.is_empty(), "Arithmetical tautology");
|
assert(!redirty_range.is_empty(), "Arithmetical tautology");
|
||||||
_collector->_modUnionTable.mark_range(redirty_range);
|
_collector->_modUnionTable.mark_range(redirty_range);
|
||||||
|
|
|
@ -77,7 +77,7 @@ class SerialOldTracer;
|
||||||
// methods are used). This is essentially a wrapper around the BitMap class,
|
// methods are used). This is essentially a wrapper around the BitMap class,
|
||||||
// with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
|
// with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
|
||||||
// we have _shifter == 0. and for the mod union table we have
|
// we have _shifter == 0. and for the mod union table we have
|
||||||
// shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
|
// shifter == CardTable::card_shift - LogHeapWordSize.)
|
||||||
// XXX 64-bit issues in BitMap?
|
// XXX 64-bit issues in BitMap?
|
||||||
class CMSBitMap VALUE_OBJ_CLASS_SPEC {
|
class CMSBitMap VALUE_OBJ_CLASS_SPEC {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -448,7 +448,7 @@ inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
|
||||||
// This is superfluous except at the end of the space;
|
// This is superfluous except at the end of the space;
|
||||||
// we should do better than this XXX
|
// we should do better than this XXX
|
||||||
MemRegion mr2(mr.start(), align_up(mr.end(),
|
MemRegion mr2(mr.start(), align_up(mr.end(),
|
||||||
CardTableModRefBS::card_size /* bytes */));
|
CardTable::card_size /* bytes */));
|
||||||
_t->mark_range(mr2);
|
_t->mark_range(mr2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -457,7 +457,7 @@ inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
|
||||||
// This is superfluous except at the end of the space;
|
// This is superfluous except at the end of the space;
|
||||||
// we should do better than this XXX
|
// we should do better than this XXX
|
||||||
MemRegion mr2(mr.start(), align_up(mr.end(),
|
MemRegion mr2(mr.start(), align_up(mr.end(),
|
||||||
CardTableModRefBS::card_size /* bytes */));
|
CardTable::card_size /* bytes */));
|
||||||
_t->par_mark_range(mr2);
|
_t->par_mark_range(mr2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -36,7 +36,7 @@
|
||||||
#include "runtime/orderAccess.inline.hpp"
|
#include "runtime/orderAccess.inline.hpp"
|
||||||
#include "runtime/vmThread.hpp"
|
#include "runtime/vmThread.hpp"
|
||||||
|
|
||||||
void CardTableModRefBSForCTRS::
|
void CardTableRS::
|
||||||
non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
|
non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
|
||||||
OopsInGenClosure* cl,
|
OopsInGenClosure* cl,
|
||||||
CardTableRS* ct,
|
CardTableRS* ct,
|
||||||
|
@ -82,7 +82,7 @@ non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
CardTableModRefBSForCTRS::
|
CardTableRS::
|
||||||
process_stride(Space* sp,
|
process_stride(Space* sp,
|
||||||
MemRegion used,
|
MemRegion used,
|
||||||
jint stride, int n_strides,
|
jint stride, int n_strides,
|
||||||
|
@ -162,7 +162,7 @@ process_stride(Space* sp,
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
CardTableModRefBSForCTRS::
|
CardTableRS::
|
||||||
process_chunk_boundaries(Space* sp,
|
process_chunk_boundaries(Space* sp,
|
||||||
DirtyCardToOopClosure* dcto_cl,
|
DirtyCardToOopClosure* dcto_cl,
|
||||||
MemRegion chunk_mr,
|
MemRegion chunk_mr,
|
||||||
|
@ -371,7 +371,7 @@ process_chunk_boundaries(Space* sp,
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
CardTableModRefBSForCTRS::
|
CardTableRS::
|
||||||
get_LNC_array_for_space(Space* sp,
|
get_LNC_array_for_space(Space* sp,
|
||||||
jbyte**& lowest_non_clean,
|
jbyte**& lowest_non_clean,
|
||||||
uintptr_t& lowest_non_clean_base_chunk_index,
|
uintptr_t& lowest_non_clean_base_chunk_index,
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -40,12 +40,12 @@ void G1CardCountsMappingChangedListener::on_commit(uint start_idx, size_t num_re
|
||||||
size_t G1CardCounts::compute_size(size_t mem_region_size_in_words) {
|
size_t G1CardCounts::compute_size(size_t mem_region_size_in_words) {
|
||||||
// We keep card counts for every card, so the size of the card counts table must
|
// We keep card counts for every card, so the size of the card counts table must
|
||||||
// be the same as the card table.
|
// be the same as the card table.
|
||||||
return G1SATBCardTableLoggingModRefBS::compute_size(mem_region_size_in_words);
|
return G1CardTable::compute_size(mem_region_size_in_words);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t G1CardCounts::heap_map_factor() {
|
size_t G1CardCounts::heap_map_factor() {
|
||||||
// See G1CardCounts::compute_size() why we reuse the card table value.
|
// See G1CardCounts::compute_size() why we reuse the card table value.
|
||||||
return G1SATBCardTableLoggingModRefBS::heap_map_factor();
|
return G1CardTable::heap_map_factor();
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
|
void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
|
||||||
|
@ -72,8 +72,8 @@ void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
|
||||||
// threshold limit is no more than this.
|
// threshold limit is no more than this.
|
||||||
guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
|
guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
|
||||||
|
|
||||||
_ct_bs = _g1h->g1_barrier_set();
|
_ct = _g1h->card_table();
|
||||||
_ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
|
_ct_bot = _ct->byte_for_const(_g1h->reserved_region().start());
|
||||||
|
|
||||||
_card_counts = (jubyte*) mapper->reserved().start();
|
_card_counts = (jubyte*) mapper->reserved().start();
|
||||||
_reserved_max_card_num = mapper->reserved().byte_size();
|
_reserved_max_card_num = mapper->reserved().byte_size();
|
||||||
|
@ -116,17 +116,17 @@ void G1CardCounts::clear_region(HeapRegion* hr) {
|
||||||
|
|
||||||
void G1CardCounts::clear_range(MemRegion mr) {
|
void G1CardCounts::clear_range(MemRegion mr) {
|
||||||
if (has_count_table()) {
|
if (has_count_table()) {
|
||||||
const jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start());
|
const jbyte* from_card_ptr = _ct->byte_for_const(mr.start());
|
||||||
// We use the last address in the range as the range could represent the
|
// We use the last address in the range as the range could represent the
|
||||||
// last region in the heap. In which case trying to find the card will be an
|
// last region in the heap. In which case trying to find the card will be an
|
||||||
// OOB access to the card table.
|
// OOB access to the card table.
|
||||||
const jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last());
|
const jbyte* last_card_ptr = _ct->byte_for_const(mr.last());
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
|
HeapWord* start_addr = _ct->addr_for(from_card_ptr);
|
||||||
assert(start_addr == mr.start(), "MemRegion start must be aligned to a card.");
|
assert(start_addr == mr.start(), "MemRegion start must be aligned to a card.");
|
||||||
HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
|
HeapWord* last_addr = _ct->addr_for(last_card_ptr);
|
||||||
assert((last_addr + CardTableModRefBS::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card.");
|
assert((last_addr + G1CardTable::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card.");
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
|
|
||||||
// Clear the counts for the (exclusive) card range.
|
// Clear the counts for the (exclusive) card range.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -25,6 +25,7 @@
|
||||||
#ifndef SHARE_VM_GC_G1_G1CARDCOUNTS_HPP
|
#ifndef SHARE_VM_GC_G1_G1CARDCOUNTS_HPP
|
||||||
#define SHARE_VM_GC_G1_G1CARDCOUNTS_HPP
|
#define SHARE_VM_GC_G1_G1CARDCOUNTS_HPP
|
||||||
|
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1RegionToSpaceMapper.hpp"
|
#include "gc/g1/g1RegionToSpaceMapper.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
#include "memory/virtualspace.hpp"
|
#include "memory/virtualspace.hpp"
|
||||||
|
@ -56,6 +57,7 @@ class G1CardCounts: public CHeapObj<mtGC> {
|
||||||
G1CardCountsMappingChangedListener _listener;
|
G1CardCountsMappingChangedListener _listener;
|
||||||
|
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
|
G1CardTable* _ct;
|
||||||
|
|
||||||
// The table of counts
|
// The table of counts
|
||||||
jubyte* _card_counts;
|
jubyte* _card_counts;
|
||||||
|
@ -66,9 +68,6 @@ class G1CardCounts: public CHeapObj<mtGC> {
|
||||||
// CardTable bottom.
|
// CardTable bottom.
|
||||||
const jbyte* _ct_bot;
|
const jbyte* _ct_bot;
|
||||||
|
|
||||||
// Barrier set
|
|
||||||
CardTableModRefBS* _ct_bs;
|
|
||||||
|
|
||||||
// Returns true if the card counts table has been reserved.
|
// Returns true if the card counts table has been reserved.
|
||||||
bool has_reserved_count_table() { return _card_counts != NULL; }
|
bool has_reserved_count_table() { return _card_counts != NULL; }
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -68,10 +68,10 @@ void G1CardLiveData::initialize(size_t max_capacity, uint num_max_regions) {
|
||||||
assert(max_capacity % num_max_regions == 0,
|
assert(max_capacity % num_max_regions == 0,
|
||||||
"Given capacity must be evenly divisible by region size.");
|
"Given capacity must be evenly divisible by region size.");
|
||||||
size_t region_size = max_capacity / num_max_regions;
|
size_t region_size = max_capacity / num_max_regions;
|
||||||
assert(region_size % (G1SATBCardTableModRefBS::card_size * BitsPerWord) == 0,
|
assert(region_size % (G1CardTable::card_size * BitsPerWord) == 0,
|
||||||
"Region size must be evenly divisible by area covered by a single word.");
|
"Region size must be evenly divisible by area covered by a single word.");
|
||||||
_max_capacity = max_capacity;
|
_max_capacity = max_capacity;
|
||||||
_cards_per_region = region_size / G1SATBCardTableModRefBS::card_size;
|
_cards_per_region = region_size / G1CardTable::card_size;
|
||||||
|
|
||||||
_live_regions_size_in_bits = live_region_bitmap_size_in_bits();
|
_live_regions_size_in_bits = live_region_bitmap_size_in_bits();
|
||||||
_live_regions = allocate_large_bitmap(_live_regions_size_in_bits);
|
_live_regions = allocate_large_bitmap(_live_regions_size_in_bits);
|
||||||
|
@ -85,11 +85,11 @@ void G1CardLiveData::pretouch() {
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t G1CardLiveData::live_region_bitmap_size_in_bits() const {
|
size_t G1CardLiveData::live_region_bitmap_size_in_bits() const {
|
||||||
return _max_capacity / (_cards_per_region << G1SATBCardTableModRefBS::card_shift);
|
return _max_capacity / (_cards_per_region << G1CardTable::card_shift);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t G1CardLiveData::live_card_bitmap_size_in_bits() const {
|
size_t G1CardLiveData::live_card_bitmap_size_in_bits() const {
|
||||||
return _max_capacity >> G1SATBCardTableModRefBS::card_shift;
|
return _max_capacity >> G1CardTable::card_shift;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper class that provides functionality to generate the Live Data Count
|
// Helper class that provides functionality to generate the Live Data Count
|
||||||
|
@ -132,7 +132,7 @@ private:
|
||||||
|
|
||||||
void clear_card_bitmap_range(HeapWord* start, HeapWord* end) {
|
void clear_card_bitmap_range(HeapWord* start, HeapWord* end) {
|
||||||
BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
|
BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
|
||||||
BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTableModRefBS::card_size));
|
BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size));
|
||||||
|
|
||||||
_card_bm.clear_range(start_idx, end_idx);
|
_card_bm.clear_range(start_idx, end_idx);
|
||||||
}
|
}
|
||||||
|
@ -140,7 +140,7 @@ private:
|
||||||
// Mark the card liveness bitmap for the object spanning from start to end.
|
// Mark the card liveness bitmap for the object spanning from start to end.
|
||||||
void mark_card_bitmap_range(HeapWord* start, HeapWord* end) {
|
void mark_card_bitmap_range(HeapWord* start, HeapWord* end) {
|
||||||
BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
|
BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
|
||||||
BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTableModRefBS::card_size));
|
BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size));
|
||||||
|
|
||||||
assert((end_idx - start_idx) > 0, "Trying to mark zero sized range.");
|
assert((end_idx - start_idx) > 0, "Trying to mark zero sized range.");
|
||||||
|
|
||||||
|
@ -168,7 +168,7 @@ public:
|
||||||
// by the card shift -- address 0 corresponds to card number 0. One
|
// by the card shift -- address 0 corresponds to card number 0. One
|
||||||
// must subtract the card num of the bottom of the heap to obtain a
|
// must subtract the card num of the bottom of the heap to obtain a
|
||||||
// card table index.
|
// card table index.
|
||||||
BitMap::idx_t card_num = uintptr_t(addr) >> CardTableModRefBS::card_shift;
|
BitMap::idx_t card_num = uintptr_t(addr) >> G1CardTable::card_shift;
|
||||||
return card_num - _heap_card_bias;
|
return card_num - _heap_card_bias;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,7 +262,7 @@ public:
|
||||||
// Calculate the card number for the bottom of the heap. Used
|
// Calculate the card number for the bottom of the heap. Used
|
||||||
// in biasing indexes into the accounting card bitmaps.
|
// in biasing indexes into the accounting card bitmaps.
|
||||||
_heap_card_bias =
|
_heap_card_bias =
|
||||||
uintptr_t(base_address) >> CardTableModRefBS::card_shift;
|
uintptr_t(base_address) >> G1CardTable::card_shift;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
102
src/hotspot/share/gc/g1/g1CardTable.cpp
Normal file
102
src/hotspot/share/gc/g1/g1CardTable.cpp
Normal file
|
@ -0,0 +1,102 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
|
#include "gc/shared/memset_with_concurrent_readers.hpp"
|
||||||
|
#include "logging/log.hpp"
|
||||||
|
#include "runtime/atomic.hpp"
|
||||||
|
#include "runtime/orderAccess.inline.hpp"
|
||||||
|
|
||||||
|
bool G1CardTable::mark_card_deferred(size_t card_index) {
|
||||||
|
jbyte val = _byte_map[card_index];
|
||||||
|
// It's already processed
|
||||||
|
if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cached bit can be installed either on a clean card or on a claimed card.
|
||||||
|
jbyte new_val = val;
|
||||||
|
if (val == clean_card_val()) {
|
||||||
|
new_val = (jbyte)deferred_card_val();
|
||||||
|
} else {
|
||||||
|
if (val & claimed_card_val()) {
|
||||||
|
new_val = val | (jbyte)deferred_card_val();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (new_val != val) {
|
||||||
|
Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1CardTable::g1_mark_as_young(const MemRegion& mr) {
|
||||||
|
jbyte *const first = byte_for(mr.start());
|
||||||
|
jbyte *const last = byte_after(mr.last());
|
||||||
|
|
||||||
|
memset_with_concurrent_readers(first, g1_young_gen, last - first);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
void G1CardTable::verify_g1_young_region(MemRegion mr) {
|
||||||
|
verify_region(mr, g1_young_gen, true);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void G1CardTableChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
|
||||||
|
// Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter.
|
||||||
|
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
|
||||||
|
_card_table->clear(mr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1CardTable::initialize(G1RegionToSpaceMapper* mapper) {
|
||||||
|
mapper->set_mapping_changed_listener(&_listener);
|
||||||
|
|
||||||
|
_byte_map_size = mapper->reserved().byte_size();
|
||||||
|
|
||||||
|
_guard_index = cards_required(_whole_heap.word_size()) - 1;
|
||||||
|
_last_valid_index = _guard_index - 1;
|
||||||
|
|
||||||
|
HeapWord* low_bound = _whole_heap.start();
|
||||||
|
HeapWord* high_bound = _whole_heap.end();
|
||||||
|
|
||||||
|
_cur_covered_regions = 1;
|
||||||
|
_covered[0] = _whole_heap;
|
||||||
|
|
||||||
|
_byte_map = (jbyte*) mapper->reserved().start();
|
||||||
|
_byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
|
||||||
|
assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
|
||||||
|
assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
|
||||||
|
|
||||||
|
log_trace(gc, barrier)("G1CardTable::G1CardTable: ");
|
||||||
|
log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
|
||||||
|
p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
|
||||||
|
log_trace(gc, barrier)(" _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool G1CardTable::is_in_young(oop obj) const {
|
||||||
|
volatile jbyte* p = byte_for(obj);
|
||||||
|
return *p == G1CardTable::g1_young_card_val();
|
||||||
|
}
|
113
src/hotspot/share/gc/g1/g1CardTable.hpp
Normal file
113
src/hotspot/share/gc/g1/g1CardTable.hpp
Normal file
|
@ -0,0 +1,113 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_G1_G1CARDTABLE_HPP
|
||||||
|
#define SHARE_VM_GC_G1_G1CARDTABLE_HPP
|
||||||
|
|
||||||
|
#include "gc/g1/g1RegionToSpaceMapper.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "oops/oopsHierarchy.hpp"
|
||||||
|
#include "utilities/macros.hpp"
|
||||||
|
|
||||||
|
class G1CardTable;
|
||||||
|
class G1RegionToSpaceMapper;
|
||||||
|
|
||||||
|
class G1CardTableChangedListener : public G1MappingChangedListener {
|
||||||
|
private:
|
||||||
|
G1CardTable* _card_table;
|
||||||
|
public:
|
||||||
|
G1CardTableChangedListener() : _card_table(NULL) { }
|
||||||
|
|
||||||
|
void set_card_table(G1CardTable* card_table) { _card_table = card_table; }
|
||||||
|
|
||||||
|
virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
|
||||||
|
};
|
||||||
|
|
||||||
|
class G1CardTable: public CardTable {
|
||||||
|
friend class VMStructs;
|
||||||
|
friend class G1CardTableChangedListener;
|
||||||
|
|
||||||
|
G1CardTableChangedListener _listener;
|
||||||
|
|
||||||
|
enum G1CardValues {
|
||||||
|
g1_young_gen = CT_MR_BS_last_reserved << 1
|
||||||
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1CardTable(MemRegion whole_heap): CardTable(whole_heap, /* scanned concurrently */ true), _listener() {
|
||||||
|
_listener.set_card_table(this);
|
||||||
|
}
|
||||||
|
bool is_card_dirty(size_t card_index) {
|
||||||
|
return _byte_map[card_index] == dirty_card_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
static jbyte g1_young_card_val() { return g1_young_gen; }
|
||||||
|
|
||||||
|
/*
|
||||||
|
Claimed and deferred bits are used together in G1 during the evacuation
|
||||||
|
pause. These bits can have the following state transitions:
|
||||||
|
1. The claimed bit can be put over any other card state. Except that
|
||||||
|
the "dirty -> dirty and claimed" transition is checked for in
|
||||||
|
G1 code and is not used.
|
||||||
|
2. Deferred bit can be set only if the previous state of the card
|
||||||
|
was either clean or claimed. mark_card_deferred() is wait-free.
|
||||||
|
We do not care if the operation is be successful because if
|
||||||
|
it does not it will only result in duplicate entry in the update
|
||||||
|
buffer because of the "cache-miss". So it's not worth spinning.
|
||||||
|
*/
|
||||||
|
|
||||||
|
bool is_card_claimed(size_t card_index) {
|
||||||
|
jbyte val = _byte_map[card_index];
|
||||||
|
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void set_card_claimed(size_t card_index);
|
||||||
|
|
||||||
|
void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
|
||||||
|
void g1_mark_as_young(const MemRegion& mr);
|
||||||
|
|
||||||
|
bool mark_card_deferred(size_t card_index);
|
||||||
|
|
||||||
|
bool is_card_deferred(size_t card_index) {
|
||||||
|
jbyte val = _byte_map[card_index];
|
||||||
|
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t compute_size(size_t mem_region_size_in_words) {
|
||||||
|
size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
|
||||||
|
return ReservedSpace::allocation_align_size_up(number_of_slots);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns how many bytes of the heap a single byte of the Card Table corresponds to.
|
||||||
|
static size_t heap_map_factor() { return card_size; }
|
||||||
|
|
||||||
|
void initialize() {}
|
||||||
|
void initialize(G1RegionToSpaceMapper* mapper);
|
||||||
|
|
||||||
|
virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
|
||||||
|
|
||||||
|
virtual bool is_in_young(oop obj) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_G1_G1CARDTABLE_HPP
|
40
src/hotspot/share/gc/g1/g1CardTable.inline.hpp
Normal file
40
src/hotspot/share/gc/g1/g1CardTable.inline.hpp
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP
|
||||||
|
#define SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP
|
||||||
|
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
|
|
||||||
|
void G1CardTable::set_card_claimed(size_t card_index) {
|
||||||
|
jbyte val = _byte_map[card_index];
|
||||||
|
if (val == clean_card_val()) {
|
||||||
|
val = (jbyte)claimed_card_val();
|
||||||
|
} else {
|
||||||
|
val |= (jbyte)claimed_card_val();
|
||||||
|
}
|
||||||
|
_byte_map[card_index] = val;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP
|
|
@ -52,6 +52,7 @@
|
||||||
#include "gc/g1/g1RemSet.hpp"
|
#include "gc/g1/g1RemSet.hpp"
|
||||||
#include "gc/g1/g1RootClosures.hpp"
|
#include "gc/g1/g1RootClosures.hpp"
|
||||||
#include "gc/g1/g1RootProcessor.hpp"
|
#include "gc/g1/g1RootProcessor.hpp"
|
||||||
|
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
#include "gc/g1/g1StringDedup.hpp"
|
#include "gc/g1/g1StringDedup.hpp"
|
||||||
#include "gc/g1/g1YCTypes.hpp"
|
#include "gc/g1/g1YCTypes.hpp"
|
||||||
#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
|
#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
|
||||||
|
@ -103,10 +104,10 @@ class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
|
||||||
private:
|
private:
|
||||||
size_t _num_dirtied;
|
size_t _num_dirtied;
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
G1SATBCardTableLoggingModRefBS* _g1_bs;
|
G1CardTable* _g1_ct;
|
||||||
|
|
||||||
HeapRegion* region_for_card(jbyte* card_ptr) const {
|
HeapRegion* region_for_card(jbyte* card_ptr) const {
|
||||||
return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
|
return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool will_become_free(HeapRegion* hr) const {
|
bool will_become_free(HeapRegion* hr) const {
|
||||||
|
@ -117,14 +118,14 @@ class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
|
RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
|
||||||
_num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
|
_num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
|
||||||
|
|
||||||
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
||||||
HeapRegion* hr = region_for_card(card_ptr);
|
HeapRegion* hr = region_for_card(card_ptr);
|
||||||
|
|
||||||
// Should only dirty cards in regions that won't be freed.
|
// Should only dirty cards in regions that won't be freed.
|
||||||
if (!will_become_free(hr)) {
|
if (!will_become_free(hr)) {
|
||||||
*card_ptr = CardTableModRefBS::dirty_card_val();
|
*card_ptr = G1CardTable::dirty_card_val();
|
||||||
_num_dirtied++;
|
_num_dirtied++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1465,6 +1466,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
|
||||||
_young_gen_sampling_thread(NULL),
|
_young_gen_sampling_thread(NULL),
|
||||||
_collector_policy(collector_policy),
|
_collector_policy(collector_policy),
|
||||||
_soft_ref_policy(),
|
_soft_ref_policy(),
|
||||||
|
_card_table(NULL),
|
||||||
_memory_manager("G1 Young Generation", "end of minor GC"),
|
_memory_manager("G1 Young Generation", "end of minor GC"),
|
||||||
_full_gc_memory_manager("G1 Old Generation", "end of major GC"),
|
_full_gc_memory_manager("G1 Old Generation", "end of major GC"),
|
||||||
_eden_pool(NULL),
|
_eden_pool(NULL),
|
||||||
|
@ -1616,11 +1618,13 @@ jint G1CollectedHeap::initialize() {
|
||||||
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
|
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
|
||||||
|
|
||||||
// Create the barrier set for the entire reserved region.
|
// Create the barrier set for the entire reserved region.
|
||||||
G1SATBCardTableLoggingModRefBS* bs
|
G1CardTable* ct = new G1CardTable(reserved_region());
|
||||||
= new G1SATBCardTableLoggingModRefBS(reserved_region());
|
ct->initialize();
|
||||||
|
G1SATBCardTableLoggingModRefBS* bs = new G1SATBCardTableLoggingModRefBS(ct);
|
||||||
bs->initialize();
|
bs->initialize();
|
||||||
assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
|
assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
|
||||||
set_barrier_set(bs);
|
set_barrier_set(bs);
|
||||||
|
_card_table = ct;
|
||||||
|
|
||||||
// Create the hot card cache.
|
// Create the hot card cache.
|
||||||
_hot_card_cache = new G1HotCardCache(this);
|
_hot_card_cache = new G1HotCardCache(this);
|
||||||
|
@ -1651,8 +1655,8 @@ jint G1CollectedHeap::initialize() {
|
||||||
|
|
||||||
G1RegionToSpaceMapper* cardtable_storage =
|
G1RegionToSpaceMapper* cardtable_storage =
|
||||||
create_aux_memory_mapper("Card Table",
|
create_aux_memory_mapper("Card Table",
|
||||||
G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
|
G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
|
||||||
G1SATBCardTableLoggingModRefBS::heap_map_factor());
|
G1CardTable::heap_map_factor());
|
||||||
|
|
||||||
G1RegionToSpaceMapper* card_counts_storage =
|
G1RegionToSpaceMapper* card_counts_storage =
|
||||||
create_aux_memory_mapper("Card Counts Table",
|
create_aux_memory_mapper("Card Counts Table",
|
||||||
|
@ -1666,7 +1670,7 @@ jint G1CollectedHeap::initialize() {
|
||||||
create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
|
create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
|
||||||
|
|
||||||
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
|
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
|
||||||
g1_barrier_set()->initialize(cardtable_storage);
|
_card_table->initialize(cardtable_storage);
|
||||||
// Do later initialization work for concurrent refinement.
|
// Do later initialization work for concurrent refinement.
|
||||||
_hot_card_cache->initialize(card_counts_storage);
|
_hot_card_cache->initialize(card_counts_storage);
|
||||||
|
|
||||||
|
@ -1676,7 +1680,7 @@ jint G1CollectedHeap::initialize() {
|
||||||
guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
|
guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
|
||||||
|
|
||||||
// Also create a G1 rem set.
|
// Also create a G1 rem set.
|
||||||
_g1_rem_set = new G1RemSet(this, g1_barrier_set(), _hot_card_cache);
|
_g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
|
||||||
_g1_rem_set->initialize(max_capacity(), max_regions());
|
_g1_rem_set->initialize(max_capacity(), max_regions());
|
||||||
|
|
||||||
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
|
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
|
||||||
|
@ -2691,17 +2695,17 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
|
||||||
if (!r->rem_set()->is_empty()) {
|
if (!r->rem_set()->is_empty()) {
|
||||||
guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
|
guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
|
||||||
"Found a not-small remembered set here. This is inconsistent with previous assumptions.");
|
"Found a not-small remembered set here. This is inconsistent with previous assumptions.");
|
||||||
G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
|
G1CardTable* ct = g1h->card_table();
|
||||||
HeapRegionRemSetIterator hrrs(r->rem_set());
|
HeapRegionRemSetIterator hrrs(r->rem_set());
|
||||||
size_t card_index;
|
size_t card_index;
|
||||||
while (hrrs.has_next(card_index)) {
|
while (hrrs.has_next(card_index)) {
|
||||||
jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
|
jbyte* card_ptr = (jbyte*)ct->byte_for_index(card_index);
|
||||||
// The remembered set might contain references to already freed
|
// The remembered set might contain references to already freed
|
||||||
// regions. Filter out such entries to avoid failing card table
|
// regions. Filter out such entries to avoid failing card table
|
||||||
// verification.
|
// verification.
|
||||||
if (g1h->is_in_closed_subset(bs->addr_for(card_ptr))) {
|
if (g1h->is_in_closed_subset(ct->addr_for(card_ptr))) {
|
||||||
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
|
if (*card_ptr != G1CardTable::dirty_card_val()) {
|
||||||
*card_ptr = CardTableModRefBS::dirty_card_val();
|
*card_ptr = G1CardTable::dirty_card_val();
|
||||||
_dcq.enqueue(card_ptr);
|
_dcq.enqueue(card_ptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "gc/g1/evacuationInfo.hpp"
|
#include "gc/g1/evacuationInfo.hpp"
|
||||||
#include "gc/g1/g1AllocationContext.hpp"
|
#include "gc/g1/g1AllocationContext.hpp"
|
||||||
#include "gc/g1/g1BiasedArray.hpp"
|
#include "gc/g1/g1BiasedArray.hpp"
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1CollectionSet.hpp"
|
#include "gc/g1/g1CollectionSet.hpp"
|
||||||
#include "gc/g1/g1CollectorState.hpp"
|
#include "gc/g1/g1CollectorState.hpp"
|
||||||
#include "gc/g1/g1ConcurrentMark.hpp"
|
#include "gc/g1/g1ConcurrentMark.hpp"
|
||||||
|
@ -150,6 +151,7 @@ private:
|
||||||
|
|
||||||
WorkGang* _workers;
|
WorkGang* _workers;
|
||||||
G1CollectorPolicy* _collector_policy;
|
G1CollectorPolicy* _collector_policy;
|
||||||
|
G1CardTable* _card_table;
|
||||||
|
|
||||||
SoftRefPolicy _soft_ref_policy;
|
SoftRefPolicy _soft_ref_policy;
|
||||||
|
|
||||||
|
@ -1178,6 +1180,10 @@ public:
|
||||||
|
|
||||||
G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
|
G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
|
||||||
|
|
||||||
|
G1CardTable* card_table() const {
|
||||||
|
return _card_table;
|
||||||
|
}
|
||||||
|
|
||||||
// Iteration functions.
|
// Iteration functions.
|
||||||
|
|
||||||
// Iterate over all objects, calling "cl.do_object" on each.
|
// Iterate over all objects, calling "cl.do_object" on each.
|
||||||
|
|
|
@ -123,7 +123,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
|
||||||
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
|
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
|
||||||
|
|
||||||
MemRegion mr(start, end);
|
MemRegion mr(start, end);
|
||||||
g1_barrier_set()->g1_mark_as_young(mr);
|
card_table()->g1_mark_as_young(mr);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
|
inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -38,12 +38,12 @@
|
||||||
class UpdateRSetDeferred : public ExtendedOopClosure {
|
class UpdateRSetDeferred : public ExtendedOopClosure {
|
||||||
private:
|
private:
|
||||||
G1CollectedHeap* _g1;
|
G1CollectedHeap* _g1;
|
||||||
DirtyCardQueue *_dcq;
|
DirtyCardQueue* _dcq;
|
||||||
G1SATBCardTableModRefBS* _ct_bs;
|
G1CardTable* _ct;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
UpdateRSetDeferred(DirtyCardQueue* dcq) :
|
UpdateRSetDeferred(DirtyCardQueue* dcq) :
|
||||||
_g1(G1CollectedHeap::heap()), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {}
|
_g1(G1CollectedHeap::heap()), _ct(_g1->card_table()), _dcq(dcq) {}
|
||||||
|
|
||||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||||
|
@ -59,9 +59,9 @@ public:
|
||||||
if (HeapRegion::is_in_same_region(p, oopDesc::decode_heap_oop(o))) {
|
if (HeapRegion::is_in_same_region(p, oopDesc::decode_heap_oop(o))) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
size_t card_index = _ct_bs->index_for(p);
|
size_t card_index = _ct->index_for(p);
|
||||||
if (_ct_bs->mark_card_deferred(card_index)) {
|
if (_ct->mark_card_deferred(card_index)) {
|
||||||
_dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
|
_dcq->enqueue((jbyte*)_ct->byte_for_index(card_index));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -112,7 +112,7 @@ void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(Heap
|
||||||
hr->reset_gc_time_stamp();
|
hr->reset_gc_time_stamp();
|
||||||
hr->rem_set()->clear();
|
hr->rem_set()->clear();
|
||||||
|
|
||||||
_g1h->g1_barrier_set()->clear(MemRegion(hr->bottom(), hr->end()));
|
_g1h->card_table()->clear(MemRegion(hr->bottom(), hr->end()));
|
||||||
|
|
||||||
if (_g1h->g1_hot_card_cache()->use_cache()) {
|
if (_g1h->g1_hot_card_cache()->use_cache()) {
|
||||||
_g1h->g1_hot_card_cache()->reset_card_counts(hr);
|
_g1h->g1_hot_card_cache()->reset_card_counts(hr);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -604,10 +604,9 @@ void G1HeapVerifier::verify_after_gc(G1VerifyType type) {
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
class G1VerifyCardTableCleanup: public HeapRegionClosure {
|
class G1VerifyCardTableCleanup: public HeapRegionClosure {
|
||||||
G1HeapVerifier* _verifier;
|
G1HeapVerifier* _verifier;
|
||||||
G1SATBCardTableModRefBS* _ct_bs;
|
|
||||||
public:
|
public:
|
||||||
G1VerifyCardTableCleanup(G1HeapVerifier* verifier, G1SATBCardTableModRefBS* ct_bs)
|
G1VerifyCardTableCleanup(G1HeapVerifier* verifier)
|
||||||
: _verifier(verifier), _ct_bs(ct_bs) { }
|
: _verifier(verifier) { }
|
||||||
virtual bool do_heap_region(HeapRegion* r) {
|
virtual bool do_heap_region(HeapRegion* r) {
|
||||||
if (r->is_survivor()) {
|
if (r->is_survivor()) {
|
||||||
_verifier->verify_dirty_region(r);
|
_verifier->verify_dirty_region(r);
|
||||||
|
@ -620,16 +619,16 @@ public:
|
||||||
|
|
||||||
void G1HeapVerifier::verify_card_table_cleanup() {
|
void G1HeapVerifier::verify_card_table_cleanup() {
|
||||||
if (G1VerifyCTCleanup || VerifyAfterGC) {
|
if (G1VerifyCTCleanup || VerifyAfterGC) {
|
||||||
G1VerifyCardTableCleanup cleanup_verifier(this, _g1h->g1_barrier_set());
|
G1VerifyCardTableCleanup cleanup_verifier(this);
|
||||||
_g1h->heap_region_iterate(&cleanup_verifier);
|
_g1h->heap_region_iterate(&cleanup_verifier);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) {
|
void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) {
|
||||||
// All of the region should be clean.
|
// All of the region should be clean.
|
||||||
G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
|
G1CardTable* ct = _g1h->card_table();
|
||||||
MemRegion mr(hr->bottom(), hr->end());
|
MemRegion mr(hr->bottom(), hr->end());
|
||||||
ct_bs->verify_not_dirty_region(mr);
|
ct->verify_not_dirty_region(mr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
|
void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
|
||||||
|
@ -640,12 +639,12 @@ void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
|
||||||
// not dirty that area (one less thing to have to do while holding
|
// not dirty that area (one less thing to have to do while holding
|
||||||
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
|
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
|
||||||
// is dirty.
|
// is dirty.
|
||||||
G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
|
G1CardTable* ct = _g1h->card_table();
|
||||||
MemRegion mr(hr->bottom(), hr->pre_dummy_top());
|
MemRegion mr(hr->bottom(), hr->pre_dummy_top());
|
||||||
if (hr->is_young()) {
|
if (hr->is_young()) {
|
||||||
ct_bs->verify_g1_young_region(mr);
|
ct->verify_g1_young_region(mr);
|
||||||
} else {
|
} else {
|
||||||
ct_bs->verify_dirty_region(mr);
|
ct->verify_dirty_region(mr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -40,7 +40,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id,
|
||||||
: _g1h(g1h),
|
: _g1h(g1h),
|
||||||
_refs(g1h->task_queue(worker_id)),
|
_refs(g1h->task_queue(worker_id)),
|
||||||
_dcq(&g1h->dirty_card_queue_set()),
|
_dcq(&g1h->dirty_card_queue_set()),
|
||||||
_ct_bs(g1h->g1_barrier_set()),
|
_ct(g1h->card_table()),
|
||||||
_closures(NULL),
|
_closures(NULL),
|
||||||
_hash_seed(17),
|
_hash_seed(17),
|
||||||
_worker_id(worker_id),
|
_worker_id(worker_id),
|
||||||
|
@ -390,7 +390,6 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
|
||||||
return forward_ptr;
|
return forward_ptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length) :
|
G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length) :
|
||||||
_g1h(g1h),
|
_g1h(g1h),
|
||||||
_states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)),
|
_states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)),
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -45,7 +45,7 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
RefToScanQueue* _refs;
|
RefToScanQueue* _refs;
|
||||||
DirtyCardQueue _dcq;
|
DirtyCardQueue _dcq;
|
||||||
G1SATBCardTableModRefBS* _ct_bs;
|
G1CardTable* _ct;
|
||||||
G1EvacuationRootClosures* _closures;
|
G1EvacuationRootClosures* _closures;
|
||||||
|
|
||||||
G1PLABAllocator* _plab_allocator;
|
G1PLABAllocator* _plab_allocator;
|
||||||
|
@ -72,7 +72,7 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
|
||||||
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
|
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
|
||||||
|
|
||||||
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
||||||
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
|
G1CardTable* ct() { return _ct; }
|
||||||
|
|
||||||
InCSetState dest(InCSetState original) const {
|
InCSetState dest(InCSetState original) const {
|
||||||
assert(original.is_valid(),
|
assert(original.is_valid(),
|
||||||
|
@ -104,10 +104,10 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
|
||||||
// If the field originates from the to-space, we don't need to include it
|
// If the field originates from the to-space, we don't need to include it
|
||||||
// in the remembered set updates.
|
// in the remembered set updates.
|
||||||
if (!from->is_young()) {
|
if (!from->is_young()) {
|
||||||
size_t card_index = ctbs()->index_for(p);
|
size_t card_index = ct()->index_for(p);
|
||||||
// If the card hasn't been added to the buffer, do it.
|
// If the card hasn't been added to the buffer, do it.
|
||||||
if (ctbs()->mark_card_deferred(card_index)) {
|
if (ct()->mark_card_deferred(card_index)) {
|
||||||
dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
|
dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc/g1/dirtyCardQueue.hpp"
|
#include "gc/g1/dirtyCardQueue.hpp"
|
||||||
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
|
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
|
||||||
|
#include "gc/g1/g1CardTable.inline.hpp"
|
||||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc/g1/g1ConcurrentRefine.hpp"
|
#include "gc/g1/g1ConcurrentRefine.hpp"
|
||||||
#include "gc/g1/g1FromCardCache.hpp"
|
#include "gc/g1/g1FromCardCache.hpp"
|
||||||
|
@ -74,7 +75,7 @@ private:
|
||||||
static size_t chunk_size() { return M; }
|
static size_t chunk_size() { return M; }
|
||||||
|
|
||||||
void work(uint worker_id) {
|
void work(uint worker_id) {
|
||||||
G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
|
G1CardTable* ct = _g1h->card_table();
|
||||||
|
|
||||||
while (_cur_dirty_regions < _num_dirty_regions) {
|
while (_cur_dirty_regions < _num_dirty_regions) {
|
||||||
size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length;
|
size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length;
|
||||||
|
@ -83,7 +84,7 @@ private:
|
||||||
for (size_t i = next; i < max; i++) {
|
for (size_t i = next; i < max; i++) {
|
||||||
HeapRegion* r = _g1h->region_at(_dirty_region_list[i]);
|
HeapRegion* r = _g1h->region_at(_dirty_region_list[i]);
|
||||||
if (!r->is_survivor()) {
|
if (!r->is_survivor()) {
|
||||||
ct_bs->clear(MemRegion(r->bottom(), r->end()));
|
ct->clear(MemRegion(r->bottom(), r->end()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -280,12 +281,12 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
G1RemSet::G1RemSet(G1CollectedHeap* g1,
|
G1RemSet::G1RemSet(G1CollectedHeap* g1,
|
||||||
CardTableModRefBS* ct_bs,
|
G1CardTable* ct,
|
||||||
G1HotCardCache* hot_card_cache) :
|
G1HotCardCache* hot_card_cache) :
|
||||||
_g1(g1),
|
_g1(g1),
|
||||||
_scan_state(new G1RemSetScanState()),
|
_scan_state(new G1RemSetScanState()),
|
||||||
_num_conc_refined_cards(0),
|
_num_conc_refined_cards(0),
|
||||||
_ct_bs(ct_bs),
|
_ct(ct),
|
||||||
_g1p(_g1->g1_policy()),
|
_g1p(_g1->g1_policy()),
|
||||||
_hot_card_cache(hot_card_cache),
|
_hot_card_cache(hot_card_cache),
|
||||||
_prev_period_summary() {
|
_prev_period_summary() {
|
||||||
|
@ -328,7 +329,7 @@ G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state
|
||||||
_worker_i(worker_i) {
|
_worker_i(worker_i) {
|
||||||
_g1h = G1CollectedHeap::heap();
|
_g1h = G1CollectedHeap::heap();
|
||||||
_bot = _g1h->bot();
|
_bot = _g1h->bot();
|
||||||
_ct_bs = _g1h->g1_barrier_set();
|
_ct = _g1h->card_table();
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) {
|
void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) {
|
||||||
|
@ -345,7 +346,7 @@ void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_idx_for_card){
|
void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_idx_for_card){
|
||||||
_ct_bs->set_card_claimed(card_index);
|
_ct->set_card_claimed(card_index);
|
||||||
_scan_state->add_dirty_region(region_idx_for_card);
|
_scan_state->add_dirty_region(region_idx_for_card);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -381,7 +382,7 @@ bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) {
|
||||||
_cards_claimed++;
|
_cards_claimed++;
|
||||||
|
|
||||||
// If the card is dirty, then G1 will scan it during Update RS.
|
// If the card is dirty, then G1 will scan it during Update RS.
|
||||||
if (_ct_bs->is_card_claimed(card_index) || _ct_bs->is_card_dirty(card_index)) {
|
if (_ct->is_card_claimed(card_index) || _ct->is_card_dirty(card_index)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -535,15 +536,15 @@ void G1RemSet::scrub(uint worker_num, HeapRegionClaimer *hrclaimer) {
|
||||||
_g1->heap_region_par_iterate_from_worker_offset(&scrub_cl, hrclaimer, worker_num);
|
_g1->heap_region_par_iterate_from_worker_offset(&scrub_cl, hrclaimer, worker_num);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void check_card_ptr(jbyte* card_ptr, CardTableModRefBS* ct_bs) {
|
inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
||||||
assert(g1->is_in_exact(ct_bs->addr_for(card_ptr)),
|
assert(g1->is_in_exact(ct->addr_for(card_ptr)),
|
||||||
"Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
|
"Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
|
||||||
p2i(card_ptr),
|
p2i(card_ptr),
|
||||||
ct_bs->index_for(ct_bs->addr_for(card_ptr)),
|
ct->index_for(ct->addr_for(card_ptr)),
|
||||||
p2i(ct_bs->addr_for(card_ptr)),
|
p2i(ct->addr_for(card_ptr)),
|
||||||
g1->addr_to_region(ct_bs->addr_for(card_ptr)));
|
g1->addr_to_region(ct->addr_for(card_ptr)));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -551,15 +552,15 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
|
||||||
uint worker_i) {
|
uint worker_i) {
|
||||||
assert(!_g1->is_gc_active(), "Only call concurrently");
|
assert(!_g1->is_gc_active(), "Only call concurrently");
|
||||||
|
|
||||||
check_card_ptr(card_ptr, _ct_bs);
|
check_card_ptr(card_ptr, _ct);
|
||||||
|
|
||||||
// If the card is no longer dirty, nothing to do.
|
// If the card is no longer dirty, nothing to do.
|
||||||
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
|
if (*card_ptr != G1CardTable::dirty_card_val()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct the region representing the card.
|
// Construct the region representing the card.
|
||||||
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
HeapWord* start = _ct->addr_for(card_ptr);
|
||||||
// And find the region containing it.
|
// And find the region containing it.
|
||||||
HeapRegion* r = _g1->heap_region_containing(start);
|
HeapRegion* r = _g1->heap_region_containing(start);
|
||||||
|
|
||||||
|
@ -619,7 +620,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
|
||||||
return;
|
return;
|
||||||
} else if (card_ptr != orig_card_ptr) {
|
} else if (card_ptr != orig_card_ptr) {
|
||||||
// Original card was inserted and an old card was evicted.
|
// Original card was inserted and an old card was evicted.
|
||||||
start = _ct_bs->addr_for(card_ptr);
|
start = _ct->addr_for(card_ptr);
|
||||||
r = _g1->heap_region_containing(start);
|
r = _g1->heap_region_containing(start);
|
||||||
|
|
||||||
// Check whether the region formerly in the cache should be
|
// Check whether the region formerly in the cache should be
|
||||||
|
@ -654,7 +655,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
|
||||||
// Okay to clean and process the card now. There are still some
|
// Okay to clean and process the card now. There are still some
|
||||||
// stale card cases that may be detected by iteration and dealt with
|
// stale card cases that may be detected by iteration and dealt with
|
||||||
// as iteration failure.
|
// as iteration failure.
|
||||||
*const_cast<volatile jbyte*>(card_ptr) = CardTableModRefBS::clean_card_val();
|
*const_cast<volatile jbyte*>(card_ptr) = G1CardTable::clean_card_val();
|
||||||
|
|
||||||
// This fence serves two purposes. First, the card must be cleaned
|
// This fence serves two purposes. First, the card must be cleaned
|
||||||
// before processing the contents. Second, we can't proceed with
|
// before processing the contents. Second, we can't proceed with
|
||||||
|
@ -666,7 +667,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
|
||||||
|
|
||||||
// Don't use addr_for(card_ptr + 1) which can ask for
|
// Don't use addr_for(card_ptr + 1) which can ask for
|
||||||
// a card beyond the heap.
|
// a card beyond the heap.
|
||||||
HeapWord* end = start + CardTableModRefBS::card_size_in_words;
|
HeapWord* end = start + G1CardTable::card_size_in_words;
|
||||||
MemRegion dirty_region(start, MIN2(scan_limit, end));
|
MemRegion dirty_region(start, MIN2(scan_limit, end));
|
||||||
assert(!dirty_region.is_empty(), "sanity");
|
assert(!dirty_region.is_empty(), "sanity");
|
||||||
|
|
||||||
|
@ -683,8 +684,8 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
|
||||||
if (!card_processed) {
|
if (!card_processed) {
|
||||||
// The card might have gotten re-dirtied and re-enqueued while we
|
// The card might have gotten re-dirtied and re-enqueued while we
|
||||||
// worked. (In fact, it's pretty likely.)
|
// worked. (In fact, it's pretty likely.)
|
||||||
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
|
if (*card_ptr != G1CardTable::dirty_card_val()) {
|
||||||
*card_ptr = CardTableModRefBS::dirty_card_val();
|
*card_ptr = G1CardTable::dirty_card_val();
|
||||||
MutexLockerEx x(Shared_DirtyCardQ_lock,
|
MutexLockerEx x(Shared_DirtyCardQ_lock,
|
||||||
Mutex::_no_safepoint_check_flag);
|
Mutex::_no_safepoint_check_flag);
|
||||||
DirtyCardQueue* sdcq =
|
DirtyCardQueue* sdcq =
|
||||||
|
@ -700,20 +701,20 @@ bool G1RemSet::refine_card_during_gc(jbyte* card_ptr,
|
||||||
G1ScanObjsDuringUpdateRSClosure* update_rs_cl) {
|
G1ScanObjsDuringUpdateRSClosure* update_rs_cl) {
|
||||||
assert(_g1->is_gc_active(), "Only call during GC");
|
assert(_g1->is_gc_active(), "Only call during GC");
|
||||||
|
|
||||||
check_card_ptr(card_ptr, _ct_bs);
|
check_card_ptr(card_ptr, _ct);
|
||||||
|
|
||||||
// If the card is no longer dirty, nothing to do. This covers cards that were already
|
// If the card is no longer dirty, nothing to do. This covers cards that were already
|
||||||
// scanned as parts of the remembered sets.
|
// scanned as parts of the remembered sets.
|
||||||
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
|
if (*card_ptr != G1CardTable::dirty_card_val()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// We claim lazily (so races are possible but they're benign), which reduces the
|
// We claim lazily (so races are possible but they're benign), which reduces the
|
||||||
// number of potential duplicate scans (multiple threads may enqueue the same card twice).
|
// number of potential duplicate scans (multiple threads may enqueue the same card twice).
|
||||||
*card_ptr = CardTableModRefBS::clean_card_val() | CardTableModRefBS::claimed_card_val();
|
*card_ptr = G1CardTable::clean_card_val() | G1CardTable::claimed_card_val();
|
||||||
|
|
||||||
// Construct the region representing the card.
|
// Construct the region representing the card.
|
||||||
HeapWord* card_start = _ct_bs->addr_for(card_ptr);
|
HeapWord* card_start = _ct->addr_for(card_ptr);
|
||||||
// And find the region containing it.
|
// And find the region containing it.
|
||||||
uint const card_region_idx = _g1->addr_to_region(card_start);
|
uint const card_region_idx = _g1->addr_to_region(card_start);
|
||||||
|
|
||||||
|
@ -726,7 +727,7 @@ bool G1RemSet::refine_card_during_gc(jbyte* card_ptr,
|
||||||
|
|
||||||
// Don't use addr_for(card_ptr + 1) which can ask for
|
// Don't use addr_for(card_ptr + 1) which can ask for
|
||||||
// a card beyond the heap.
|
// a card beyond the heap.
|
||||||
HeapWord* card_end = card_start + CardTableModRefBS::card_size_in_words;
|
HeapWord* card_end = card_start + G1CardTable::card_size_in_words;
|
||||||
MemRegion dirty_region(card_start, MIN2(scan_limit, card_end));
|
MemRegion dirty_region(card_start, MIN2(scan_limit, card_end));
|
||||||
assert(!dirty_region.is_empty(), "sanity");
|
assert(!dirty_region.is_empty(), "sanity");
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -27,6 +27,7 @@
|
||||||
|
|
||||||
#include "gc/g1/dirtyCardQueue.hpp"
|
#include "gc/g1/dirtyCardQueue.hpp"
|
||||||
#include "gc/g1/g1CardLiveData.hpp"
|
#include "gc/g1/g1CardLiveData.hpp"
|
||||||
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1RemSetSummary.hpp"
|
#include "gc/g1/g1RemSetSummary.hpp"
|
||||||
#include "gc/g1/heapRegion.hpp"
|
#include "gc/g1/heapRegion.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
|
@ -72,7 +73,7 @@ private:
|
||||||
G1CollectedHeap* _g1;
|
G1CollectedHeap* _g1;
|
||||||
size_t _num_conc_refined_cards; // Number of cards refined concurrently to the mutator.
|
size_t _num_conc_refined_cards; // Number of cards refined concurrently to the mutator.
|
||||||
|
|
||||||
CardTableModRefBS* _ct_bs;
|
G1CardTable* _ct;
|
||||||
G1Policy* _g1p;
|
G1Policy* _g1p;
|
||||||
G1HotCardCache* _hot_card_cache;
|
G1HotCardCache* _hot_card_cache;
|
||||||
|
|
||||||
|
@ -93,7 +94,7 @@ public:
|
||||||
void cleanupHRRS();
|
void cleanupHRRS();
|
||||||
|
|
||||||
G1RemSet(G1CollectedHeap* g1,
|
G1RemSet(G1CollectedHeap* g1,
|
||||||
CardTableModRefBS* ct_bs,
|
G1CardTable* ct,
|
||||||
G1HotCardCache* hot_card_cache);
|
G1HotCardCache* hot_card_cache);
|
||||||
~G1RemSet();
|
~G1RemSet();
|
||||||
|
|
||||||
|
@ -162,7 +163,7 @@ class G1ScanRSForRegionClosure : public HeapRegionClosure {
|
||||||
CodeBlobClosure* _code_root_cl;
|
CodeBlobClosure* _code_root_cl;
|
||||||
|
|
||||||
G1BlockOffsetTable* _bot;
|
G1BlockOffsetTable* _bot;
|
||||||
G1SATBCardTableModRefBS *_ct_bs;
|
G1CardTable *_ct;
|
||||||
|
|
||||||
double _strong_code_root_scan_time_sec;
|
double _strong_code_root_scan_time_sec;
|
||||||
uint _worker_i;
|
uint _worker_i;
|
||||||
|
|
|
@ -23,22 +23,20 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
|
#include "gc/g1/g1CardTable.inline.hpp"
|
||||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
|
#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
|
||||||
#include "gc/g1/heapRegion.hpp"
|
#include "gc/g1/heapRegion.hpp"
|
||||||
#include "gc/g1/satbMarkQueue.hpp"
|
#include "gc/g1/satbMarkQueue.hpp"
|
||||||
#include "gc/shared/memset_with_concurrent_readers.hpp"
|
|
||||||
#include "logging/log.hpp"
|
#include "logging/log.hpp"
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "runtime/atomic.hpp"
|
|
||||||
#include "runtime/mutexLocker.hpp"
|
#include "runtime/mutexLocker.hpp"
|
||||||
#include "runtime/orderAccess.inline.hpp"
|
|
||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
|
|
||||||
G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(
|
G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(
|
||||||
MemRegion whole_heap,
|
G1CardTable* card_table,
|
||||||
const BarrierSet::FakeRtti& fake_rtti) :
|
const BarrierSet::FakeRtti& fake_rtti) :
|
||||||
CardTableModRefBS(whole_heap, fake_rtti.add_tag(BarrierSet::G1SATBCT))
|
CardTableModRefBS(card_table, fake_rtti.add_tag(BarrierSet::G1SATBCT))
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
|
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
|
||||||
|
@ -80,88 +78,17 @@ void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, boo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
|
|
||||||
jbyte val = _byte_map[card_index];
|
|
||||||
// It's already processed
|
|
||||||
if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cached bit can be installed either on a clean card or on a claimed card.
|
|
||||||
jbyte new_val = val;
|
|
||||||
if (val == clean_card_val()) {
|
|
||||||
new_val = (jbyte)deferred_card_val();
|
|
||||||
} else {
|
|
||||||
if (val & claimed_card_val()) {
|
|
||||||
new_val = val | (jbyte)deferred_card_val();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (new_val != val) {
|
|
||||||
Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
|
|
||||||
jbyte *const first = byte_for(mr.start());
|
|
||||||
jbyte *const last = byte_after(mr.last());
|
|
||||||
|
|
||||||
memset_with_concurrent_readers(first, g1_young_gen, last - first);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
|
|
||||||
verify_region(mr, g1_young_gen, true);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
|
|
||||||
// Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter.
|
|
||||||
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
|
|
||||||
_card_table->clear(mr);
|
|
||||||
}
|
|
||||||
|
|
||||||
G1SATBCardTableLoggingModRefBS::
|
G1SATBCardTableLoggingModRefBS::
|
||||||
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) :
|
G1SATBCardTableLoggingModRefBS(G1CardTable* card_table) :
|
||||||
G1SATBCardTableModRefBS(whole_heap, BarrierSet::FakeRtti(G1SATBCTLogging)),
|
G1SATBCardTableModRefBS(card_table, BarrierSet::FakeRtti(G1SATBCTLogging)),
|
||||||
_dcqs(JavaThread::dirty_card_queue_set()),
|
_dcqs(JavaThread::dirty_card_queue_set()) {}
|
||||||
_listener()
|
|
||||||
{
|
|
||||||
_listener.set_card_table(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
|
|
||||||
initialize_deferred_card_mark_barriers();
|
|
||||||
mapper->set_mapping_changed_listener(&_listener);
|
|
||||||
|
|
||||||
_byte_map_size = mapper->reserved().byte_size();
|
|
||||||
|
|
||||||
_guard_index = cards_required(_whole_heap.word_size()) - 1;
|
|
||||||
_last_valid_index = _guard_index - 1;
|
|
||||||
|
|
||||||
HeapWord* low_bound = _whole_heap.start();
|
|
||||||
HeapWord* high_bound = _whole_heap.end();
|
|
||||||
|
|
||||||
_cur_covered_regions = 1;
|
|
||||||
_covered[0] = _whole_heap;
|
|
||||||
|
|
||||||
_byte_map = (jbyte*) mapper->reserved().start();
|
|
||||||
byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
|
|
||||||
assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
|
|
||||||
assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
|
|
||||||
|
|
||||||
log_trace(gc, barrier)("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
|
|
||||||
log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
|
|
||||||
p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
|
|
||||||
log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1SATBCardTableLoggingModRefBS::write_ref_field_post_slow(volatile jbyte* byte) {
|
void G1SATBCardTableLoggingModRefBS::write_ref_field_post_slow(volatile jbyte* byte) {
|
||||||
// In the slow path, we know a card is not young
|
// In the slow path, we know a card is not young
|
||||||
assert(*byte != g1_young_gen, "slow path invoked without filtering");
|
assert(*byte != G1CardTable::g1_young_card_val(), "slow path invoked without filtering");
|
||||||
OrderAccess::storeload();
|
OrderAccess::storeload();
|
||||||
if (*byte != dirty_card) {
|
if (*byte != G1CardTable::dirty_card_val()) {
|
||||||
*byte = dirty_card;
|
*byte = G1CardTable::dirty_card_val();
|
||||||
Thread* thr = Thread::current();
|
Thread* thr = Thread::current();
|
||||||
if (thr->is_Java_thread()) {
|
if (thr->is_Java_thread()) {
|
||||||
JavaThread* jt = (JavaThread*)thr;
|
JavaThread* jt = (JavaThread*)thr;
|
||||||
|
@ -174,16 +101,15 @@ void G1SATBCardTableLoggingModRefBS::write_ref_field_post_slow(volatile jbyte* b
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
|
||||||
G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
|
|
||||||
if (mr.is_empty()) {
|
if (mr.is_empty()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
volatile jbyte* byte = byte_for(mr.start());
|
volatile jbyte* byte = _card_table->byte_for(mr.start());
|
||||||
jbyte* last_byte = byte_for(mr.last());
|
jbyte* last_byte = _card_table->byte_for(mr.last());
|
||||||
Thread* thr = Thread::current();
|
Thread* thr = Thread::current();
|
||||||
// skip all consecutive young cards
|
// skip all consecutive young cards
|
||||||
for (; byte <= last_byte && *byte == g1_young_gen; byte++);
|
for (; byte <= last_byte && *byte == G1CardTable::g1_young_card_val(); byte++);
|
||||||
|
|
||||||
if (byte <= last_byte) {
|
if (byte <= last_byte) {
|
||||||
OrderAccess::storeload();
|
OrderAccess::storeload();
|
||||||
|
@ -191,11 +117,11 @@ G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
|
||||||
if (thr->is_Java_thread()) {
|
if (thr->is_Java_thread()) {
|
||||||
JavaThread* jt = (JavaThread*)thr;
|
JavaThread* jt = (JavaThread*)thr;
|
||||||
for (; byte <= last_byte; byte++) {
|
for (; byte <= last_byte; byte++) {
|
||||||
if (*byte == g1_young_gen) {
|
if (*byte == G1CardTable::g1_young_card_val()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (*byte != dirty_card) {
|
if (*byte != G1CardTable::dirty_card_val()) {
|
||||||
*byte = dirty_card;
|
*byte = G1CardTable::dirty_card_val();
|
||||||
jt->dirty_card_queue().enqueue(byte);
|
jt->dirty_card_queue().enqueue(byte);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -203,11 +129,11 @@ G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
|
||||||
MutexLockerEx x(Shared_DirtyCardQ_lock,
|
MutexLockerEx x(Shared_DirtyCardQ_lock,
|
||||||
Mutex::_no_safepoint_check_flag);
|
Mutex::_no_safepoint_check_flag);
|
||||||
for (; byte <= last_byte; byte++) {
|
for (; byte <= last_byte; byte++) {
|
||||||
if (*byte == g1_young_gen) {
|
if (*byte == G1CardTable::g1_young_card_val()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (*byte != dirty_card) {
|
if (*byte != G1CardTable::dirty_card_val()) {
|
||||||
*byte = dirty_card;
|
*byte = G1CardTable::dirty_card_val();
|
||||||
_dcqs.shared_dirty_card_queue()->enqueue(byte);
|
_dcqs.shared_dirty_card_queue()->enqueue(byte);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -215,11 +141,6 @@ G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool G1SATBCardTableModRefBS::is_in_young(oop obj) const {
|
|
||||||
volatile jbyte* p = byte_for((void*)obj);
|
|
||||||
return *p == g1_young_card_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1SATBCardTableLoggingModRefBS::on_thread_attach(JavaThread* thread) {
|
void G1SATBCardTableLoggingModRefBS::on_thread_attach(JavaThread* thread) {
|
||||||
// This method initializes the SATB and dirty card queues before a
|
// This method initializes the SATB and dirty card queues before a
|
||||||
// JavaThread is added to the Java thread list. Right now, we don't
|
// JavaThread is added to the Java thread list. Right now, we don't
|
||||||
|
|
|
@ -33,6 +33,8 @@
|
||||||
|
|
||||||
class DirtyCardQueueSet;
|
class DirtyCardQueueSet;
|
||||||
class G1SATBCardTableLoggingModRefBS;
|
class G1SATBCardTableLoggingModRefBS;
|
||||||
|
class CardTable;
|
||||||
|
class G1CardTable;
|
||||||
|
|
||||||
// This barrier is specialized to use a logging barrier to support
|
// This barrier is specialized to use a logging barrier to support
|
||||||
// snapshot-at-the-beginning marking.
|
// snapshot-at-the-beginning marking.
|
||||||
|
@ -40,16 +42,10 @@ class G1SATBCardTableLoggingModRefBS;
|
||||||
class G1SATBCardTableModRefBS: public CardTableModRefBS {
|
class G1SATBCardTableModRefBS: public CardTableModRefBS {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
protected:
|
protected:
|
||||||
enum G1CardValues {
|
G1SATBCardTableModRefBS(G1CardTable* table, const BarrierSet::FakeRtti& fake_rtti);
|
||||||
g1_young_gen = CT_MR_BS_last_reserved << 1
|
|
||||||
};
|
|
||||||
|
|
||||||
G1SATBCardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
|
|
||||||
~G1SATBCardTableModRefBS() { }
|
~G1SATBCardTableModRefBS() { }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static int g1_young_card_val() { return g1_young_gen; }
|
|
||||||
|
|
||||||
// Add "pre_val" to a set of objects that may have been disconnected from the
|
// Add "pre_val" to a set of objects that may have been disconnected from the
|
||||||
// pre-marking object graph.
|
// pre-marking object graph.
|
||||||
static void enqueue(oop pre_val);
|
static void enqueue(oop pre_val);
|
||||||
|
@ -62,38 +58,6 @@ public:
|
||||||
|
|
||||||
template <DecoratorSet decorators, typename T>
|
template <DecoratorSet decorators, typename T>
|
||||||
void write_ref_field_pre(T* field);
|
void write_ref_field_pre(T* field);
|
||||||
|
|
||||||
/*
|
|
||||||
Claimed and deferred bits are used together in G1 during the evacuation
|
|
||||||
pause. These bits can have the following state transitions:
|
|
||||||
1. The claimed bit can be put over any other card state. Except that
|
|
||||||
the "dirty -> dirty and claimed" transition is checked for in
|
|
||||||
G1 code and is not used.
|
|
||||||
2. Deferred bit can be set only if the previous state of the card
|
|
||||||
was either clean or claimed. mark_card_deferred() is wait-free.
|
|
||||||
We do not care if the operation is be successful because if
|
|
||||||
it does not it will only result in duplicate entry in the update
|
|
||||||
buffer because of the "cache-miss". So it's not worth spinning.
|
|
||||||
*/
|
|
||||||
|
|
||||||
bool is_card_claimed(size_t card_index) {
|
|
||||||
jbyte val = _byte_map[card_index];
|
|
||||||
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void set_card_claimed(size_t card_index);
|
|
||||||
|
|
||||||
void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
|
|
||||||
void g1_mark_as_young(const MemRegion& mr);
|
|
||||||
|
|
||||||
bool mark_card_deferred(size_t card_index);
|
|
||||||
|
|
||||||
bool is_card_deferred(size_t card_index) {
|
|
||||||
jbyte val = _byte_map[card_index];
|
|
||||||
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool is_in_young(oop obj) const;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
|
@ -106,42 +70,14 @@ struct BarrierSet::GetType<BarrierSet::G1SATBCT> {
|
||||||
typedef G1SATBCardTableModRefBS type;
|
typedef G1SATBCardTableModRefBS type;
|
||||||
};
|
};
|
||||||
|
|
||||||
class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener {
|
|
||||||
private:
|
|
||||||
G1SATBCardTableLoggingModRefBS* _card_table;
|
|
||||||
public:
|
|
||||||
G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { }
|
|
||||||
|
|
||||||
void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; }
|
|
||||||
|
|
||||||
virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
|
|
||||||
};
|
|
||||||
|
|
||||||
// Adds card-table logging to the post-barrier.
|
// Adds card-table logging to the post-barrier.
|
||||||
// Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
|
// Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
|
||||||
class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
|
class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
|
||||||
friend class G1SATBCardTableLoggingModRefBSChangedListener;
|
|
||||||
private:
|
private:
|
||||||
G1SATBCardTableLoggingModRefBSChangedListener _listener;
|
|
||||||
DirtyCardQueueSet& _dcqs;
|
DirtyCardQueueSet& _dcqs;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static size_t compute_size(size_t mem_region_size_in_words) {
|
G1SATBCardTableLoggingModRefBS(G1CardTable* card_table);
|
||||||
size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
|
|
||||||
return ReservedSpace::allocation_align_size_up(number_of_slots);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns how many bytes of the heap a single byte of the Card Table corresponds to.
|
|
||||||
static size_t heap_map_factor() {
|
|
||||||
return CardTableModRefBS::card_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap);
|
|
||||||
|
|
||||||
virtual void initialize() { }
|
|
||||||
virtual void initialize(G1RegionToSpaceMapper* mapper);
|
|
||||||
|
|
||||||
virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
|
|
||||||
|
|
||||||
// NB: if you do a whole-heap invalidation, the "usual invariant" defined
|
// NB: if you do a whole-heap invalidation, the "usual invariant" defined
|
||||||
// above no longer applies.
|
// above no longer applies.
|
||||||
|
@ -157,10 +93,6 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
|
||||||
virtual void on_thread_attach(JavaThread* thread);
|
virtual void on_thread_attach(JavaThread* thread);
|
||||||
virtual void on_thread_detach(JavaThread* thread);
|
virtual void on_thread_detach(JavaThread* thread);
|
||||||
|
|
||||||
virtual bool card_mark_must_follow_store() const {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Callbacks for runtime accesses.
|
// Callbacks for runtime accesses.
|
||||||
template <DecoratorSet decorators, typename BarrierSetT = G1SATBCardTableLoggingModRefBS>
|
template <DecoratorSet decorators, typename BarrierSetT = G1SATBCardTableLoggingModRefBS>
|
||||||
class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {
|
class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {
|
||||||
|
|
|
@ -25,8 +25,9 @@
|
||||||
#ifndef SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
|
#ifndef SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
|
||||||
#define SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
|
#define SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
|
||||||
|
|
||||||
#include "gc/shared/accessBarrierSupport.inline.hpp"
|
#include "gc/g1/g1CardTable.hpp"
|
||||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||||
|
#include "gc/shared/accessBarrierSupport.inline.hpp"
|
||||||
|
|
||||||
template <DecoratorSet decorators, typename T>
|
template <DecoratorSet decorators, typename T>
|
||||||
inline void G1SATBCardTableModRefBS::write_ref_field_pre(T* field) {
|
inline void G1SATBCardTableModRefBS::write_ref_field_pre(T* field) {
|
||||||
|
@ -43,23 +44,13 @@ inline void G1SATBCardTableModRefBS::write_ref_field_pre(T* field) {
|
||||||
|
|
||||||
template <DecoratorSet decorators, typename T>
|
template <DecoratorSet decorators, typename T>
|
||||||
inline void G1SATBCardTableLoggingModRefBS::write_ref_field_post(T* field, oop new_val) {
|
inline void G1SATBCardTableLoggingModRefBS::write_ref_field_post(T* field, oop new_val) {
|
||||||
volatile jbyte* byte = byte_for(field);
|
volatile jbyte* byte = _card_table->byte_for(field);
|
||||||
if (*byte != g1_young_gen) {
|
if (*byte != G1CardTable::g1_young_card_val()) {
|
||||||
// Take a slow path for cards in old
|
// Take a slow path for cards in old
|
||||||
write_ref_field_post_slow(byte);
|
write_ref_field_post_slow(byte);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1SATBCardTableModRefBS::set_card_claimed(size_t card_index) {
|
|
||||||
jbyte val = _byte_map[card_index];
|
|
||||||
if (val == clean_card_val()) {
|
|
||||||
val = (jbyte)claimed_card_val();
|
|
||||||
} else {
|
|
||||||
val |= (jbyte)claimed_card_val();
|
|
||||||
}
|
|
||||||
_byte_map[card_index] = val;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void G1SATBCardTableModRefBS::enqueue_if_weak_or_archive(DecoratorSet decorators, oop value) {
|
inline void G1SATBCardTableModRefBS::enqueue_if_weak_or_archive(DecoratorSet decorators, oop value) {
|
||||||
assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
|
assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
|
||||||
// Archive roots need to be enqueued since they add subgraphs to the
|
// Archive roots need to be enqueued since they add subgraphs to the
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -100,7 +100,7 @@ void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_hea
|
||||||
guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
|
guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
|
||||||
|
|
||||||
guarantee(CardsPerRegion == 0, "we should only set it once");
|
guarantee(CardsPerRegion == 0, "we should only set it once");
|
||||||
CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
|
CardsPerRegion = GrainBytes >> G1CardTable::card_shift;
|
||||||
|
|
||||||
if (G1HeapRegionSize != GrainBytes) {
|
if (G1HeapRegionSize != GrainBytes) {
|
||||||
FLAG_SET_ERGO(size_t, G1HeapRegionSize, GrainBytes);
|
FLAG_SET_ERGO(size_t, G1HeapRegionSize, GrainBytes);
|
||||||
|
@ -139,9 +139,8 @@ void HeapRegion::par_clear() {
|
||||||
assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
|
assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
|
||||||
HeapRegionRemSet* hrrs = rem_set();
|
HeapRegionRemSet* hrrs = rem_set();
|
||||||
hrrs->clear();
|
hrrs->clear();
|
||||||
CardTableModRefBS* ct_bs =
|
G1CardTable* ct = G1CollectedHeap::heap()->card_table();
|
||||||
barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set());
|
ct->clear(MemRegion(bottom(), end()));
|
||||||
ct_bs->clear(MemRegion(bottom(), end()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegion::calc_gc_efficiency() {
|
void HeapRegion::calc_gc_efficiency() {
|
||||||
|
@ -463,7 +462,7 @@ void HeapRegion::print_on(outputStream* st) const {
|
||||||
class G1VerificationClosure : public OopClosure {
|
class G1VerificationClosure : public OopClosure {
|
||||||
protected:
|
protected:
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
CardTableModRefBS* _bs;
|
G1CardTable *_ct;
|
||||||
oop _containing_obj;
|
oop _containing_obj;
|
||||||
bool _failures;
|
bool _failures;
|
||||||
int _n_failures;
|
int _n_failures;
|
||||||
|
@ -473,7 +472,7 @@ public:
|
||||||
// _vo == UseNextMarking -> use "next" marking information,
|
// _vo == UseNextMarking -> use "next" marking information,
|
||||||
// _vo == UseFullMarking -> use "next" marking bitmap but no TAMS.
|
// _vo == UseFullMarking -> use "next" marking bitmap but no TAMS.
|
||||||
G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) :
|
G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) :
|
||||||
_g1h(g1h), _bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
|
_g1h(g1h), _ct(g1h->card_table()),
|
||||||
_containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) {
|
_containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -576,9 +575,9 @@ public:
|
||||||
if (from != NULL && to != NULL &&
|
if (from != NULL && to != NULL &&
|
||||||
from != to &&
|
from != to &&
|
||||||
!to->is_pinned()) {
|
!to->is_pinned()) {
|
||||||
jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
|
jbyte cv_obj = *_ct->byte_for_const(_containing_obj);
|
||||||
jbyte cv_field = *_bs->byte_for_const(p);
|
jbyte cv_field = *_ct->byte_for_const(p);
|
||||||
const jbyte dirty = CardTableModRefBS::dirty_card_val();
|
const jbyte dirty = G1CardTable::dirty_card_val();
|
||||||
|
|
||||||
bool is_bad = !(from->is_young()
|
bool is_bad = !(from->is_young()
|
||||||
|| to->rem_set()->contains_reference(p)
|
|| to->rem_set()->contains_reference(p)
|
||||||
|
@ -834,7 +833,6 @@ void G1ContiguousSpace::clear(bool mangle_space) {
|
||||||
CompactibleSpace::clear(mangle_space);
|
CompactibleSpace::clear(mangle_space);
|
||||||
reset_bot();
|
reset_bot();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void G1ContiguousSpace::mangle_unused_area() {
|
void G1ContiguousSpace::mangle_unused_area() {
|
||||||
mangle_unused_area_complete();
|
mangle_unused_area_complete();
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -32,6 +32,7 @@
|
||||||
#include "gc/g1/heapRegionType.hpp"
|
#include "gc/g1/heapRegionType.hpp"
|
||||||
#include "gc/g1/survRateGroup.hpp"
|
#include "gc/g1/survRateGroup.hpp"
|
||||||
#include "gc/shared/ageTable.hpp"
|
#include "gc/shared/ageTable.hpp"
|
||||||
|
#include "gc/shared/cardTable.hpp"
|
||||||
#include "gc/shared/spaceDecorator.hpp"
|
#include "gc/shared/spaceDecorator.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -103,7 +103,7 @@ protected:
|
||||||
if (loc_hr->is_in_reserved(from)) {
|
if (loc_hr->is_in_reserved(from)) {
|
||||||
size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
|
size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
|
||||||
CardIdx_t from_card = (CardIdx_t)
|
CardIdx_t from_card = (CardIdx_t)
|
||||||
hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
|
hw_offset >> (G1CardTable::card_shift - LogHeapWordSize);
|
||||||
|
|
||||||
assert((size_t)from_card < HeapRegion::CardsPerRegion,
|
assert((size_t)from_card < HeapRegion::CardsPerRegion,
|
||||||
"Must be in range.");
|
"Must be in range.");
|
||||||
|
@ -170,7 +170,7 @@ public:
|
||||||
bool contains_reference(OopOrNarrowOopStar from) const {
|
bool contains_reference(OopOrNarrowOopStar from) const {
|
||||||
assert(hr()->is_in_reserved(from), "Precondition.");
|
assert(hr()->is_in_reserved(from), "Precondition.");
|
||||||
size_t card_ind = pointer_delta(from, hr()->bottom(),
|
size_t card_ind = pointer_delta(from, hr()->bottom(),
|
||||||
CardTableModRefBS::card_size);
|
G1CardTable::card_size);
|
||||||
return _bm.at(card_ind);
|
return _bm.at(card_ind);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -354,7 +354,7 @@ void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
|
||||||
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
|
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
|
||||||
uint cur_hrm_ind = _hr->hrm_index();
|
uint cur_hrm_ind = _hr->hrm_index();
|
||||||
|
|
||||||
int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
|
int from_card = (int)(uintptr_t(from) >> G1CardTable::card_shift);
|
||||||
|
|
||||||
if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
|
if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
|
||||||
assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from));
|
assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from));
|
||||||
|
@ -382,7 +382,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
|
||||||
|
|
||||||
uintptr_t from_hr_bot_card_index =
|
uintptr_t from_hr_bot_card_index =
|
||||||
uintptr_t(from_hr->bottom())
|
uintptr_t(from_hr->bottom())
|
||||||
>> CardTableModRefBS::card_shift;
|
>> G1CardTable::card_shift;
|
||||||
CardIdx_t card_index = from_card - from_hr_bot_card_index;
|
CardIdx_t card_index = from_card - from_hr_bot_card_index;
|
||||||
assert((size_t)card_index < HeapRegion::CardsPerRegion,
|
assert((size_t)card_index < HeapRegion::CardsPerRegion,
|
||||||
"Must be in range.");
|
"Must be in range.");
|
||||||
|
@ -671,9 +671,9 @@ bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
uintptr_t from_card =
|
uintptr_t from_card =
|
||||||
(uintptr_t(from) >> CardTableModRefBS::card_shift);
|
(uintptr_t(from) >> G1CardTable::card_shift);
|
||||||
uintptr_t hr_bot_card_index =
|
uintptr_t hr_bot_card_index =
|
||||||
uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
|
uintptr_t(hr->bottom()) >> G1CardTable::card_shift;
|
||||||
assert(from_card >= hr_bot_card_index, "Inv");
|
assert(from_card >= hr_bot_card_index, "Inv");
|
||||||
CardIdx_t card_index = from_card - hr_bot_card_index;
|
CardIdx_t card_index = from_card - hr_bot_card_index;
|
||||||
assert((size_t)card_index < HeapRegion::CardsPerRegion,
|
assert((size_t)card_index < HeapRegion::CardsPerRegion,
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -41,7 +41,7 @@ void SparsePRTEntry::init(RegionIdx_t region_ind) {
|
||||||
// Check that the card array element type can represent all cards in the region.
|
// Check that the card array element type can represent all cards in the region.
|
||||||
// Choose a large SparsePRTEntry::card_elem_t (e.g. CardIdx_t) if required.
|
// Choose a large SparsePRTEntry::card_elem_t (e.g. CardIdx_t) if required.
|
||||||
assert(((size_t)1 << (sizeof(SparsePRTEntry::card_elem_t) * BitsPerByte)) *
|
assert(((size_t)1 << (sizeof(SparsePRTEntry::card_elem_t) * BitsPerByte)) *
|
||||||
G1SATBCardTableModRefBS::card_size >= HeapRegionBounds::max_size(), "precondition");
|
G1CardTable::card_size >= HeapRegionBounds::max_size(), "precondition");
|
||||||
assert(G1RSetSparseRegionEntries > 0, "precondition");
|
assert(G1RSetSparseRegionEntries > 0, "precondition");
|
||||||
_region_ind = region_ind;
|
_region_ind = region_ind;
|
||||||
_next_index = RSHashTable::NullEntry;
|
_next_index = RSHashTable::NullEntry;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -509,7 +509,7 @@ void ASPSYoungGen::reset_after_change() {
|
||||||
}
|
}
|
||||||
MemRegion cmr((HeapWord*)virtual_space()->low(),
|
MemRegion cmr((HeapWord*)virtual_space()->low(),
|
||||||
(HeapWord*)virtual_space()->high());
|
(HeapWord*)virtual_space()->high());
|
||||||
ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
|
ParallelScavengeHeap::heap()->barrier_set()->card_table()->resize_covered_region(cmr);
|
||||||
|
|
||||||
space_invariants();
|
space_invariants();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -34,7 +34,7 @@
|
||||||
void ObjectStartArray::initialize(MemRegion reserved_region) {
|
void ObjectStartArray::initialize(MemRegion reserved_region) {
|
||||||
// We're based on the assumption that we use the same
|
// We're based on the assumption that we use the same
|
||||||
// size blocks as the card table.
|
// size blocks as the card table.
|
||||||
assert((int)block_size == (int)CardTableModRefBS::card_size, "Sanity");
|
assert((int)block_size == (int)CardTable::card_size, "Sanity");
|
||||||
assert((int)block_size <= 512, "block_size must be less than or equal to 512");
|
assert((int)block_size <= 512, "block_size must be less than or equal to 512");
|
||||||
|
|
||||||
// Calculate how much space must be reserved
|
// Calculate how much space must be reserved
|
||||||
|
|
|
@ -26,7 +26,6 @@
|
||||||
#include "code/codeCache.hpp"
|
#include "code/codeCache.hpp"
|
||||||
#include "gc/parallel/adjoiningGenerations.hpp"
|
#include "gc/parallel/adjoiningGenerations.hpp"
|
||||||
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
|
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
|
||||||
#include "gc/parallel/cardTableExtension.hpp"
|
|
||||||
#include "gc/parallel/gcTaskManager.hpp"
|
#include "gc/parallel/gcTaskManager.hpp"
|
||||||
#include "gc/parallel/generationSizer.hpp"
|
#include "gc/parallel/generationSizer.hpp"
|
||||||
#include "gc/parallel/objectStartArray.inline.hpp"
|
#include "gc/parallel/objectStartArray.inline.hpp"
|
||||||
|
@ -70,7 +69,9 @@ jint ParallelScavengeHeap::initialize() {
|
||||||
|
|
||||||
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
|
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
|
||||||
|
|
||||||
CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
|
PSCardTable* card_table = new PSCardTable(reserved_region());
|
||||||
|
card_table->initialize();
|
||||||
|
CardTableModRefBS* const barrier_set = new CardTableModRefBS(card_table);
|
||||||
barrier_set->initialize();
|
barrier_set->initialize();
|
||||||
set_barrier_set(barrier_set);
|
set_barrier_set(barrier_set);
|
||||||
|
|
||||||
|
@ -625,6 +626,14 @@ ParallelScavengeHeap* ParallelScavengeHeap::heap() {
|
||||||
return (ParallelScavengeHeap*)heap;
|
return (ParallelScavengeHeap*)heap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
CardTableModRefBS* ParallelScavengeHeap::barrier_set() {
|
||||||
|
return barrier_set_cast<CardTableModRefBS>(CollectedHeap::barrier_set());
|
||||||
|
}
|
||||||
|
|
||||||
|
PSCardTable* ParallelScavengeHeap::card_table() {
|
||||||
|
return static_cast<PSCardTable*>(barrier_set()->card_table());
|
||||||
|
}
|
||||||
|
|
||||||
// Before delegating the resize to the young generation,
|
// Before delegating the resize to the young generation,
|
||||||
// the reserved space for the young and old generations
|
// the reserved space for the young and old generations
|
||||||
// may be changed to accommodate the desired resize.
|
// may be changed to accommodate the desired resize.
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#include "gc/parallel/psGCAdaptivePolicyCounters.hpp"
|
#include "gc/parallel/psGCAdaptivePolicyCounters.hpp"
|
||||||
#include "gc/parallel/psOldGen.hpp"
|
#include "gc/parallel/psOldGen.hpp"
|
||||||
#include "gc/parallel/psYoungGen.hpp"
|
#include "gc/parallel/psYoungGen.hpp"
|
||||||
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
#include "gc/shared/collectedHeap.hpp"
|
#include "gc/shared/collectedHeap.hpp"
|
||||||
#include "gc/shared/collectorPolicy.hpp"
|
#include "gc/shared/collectorPolicy.hpp"
|
||||||
#include "gc/shared/gcPolicyCounters.hpp"
|
#include "gc/shared/gcPolicyCounters.hpp"
|
||||||
|
@ -46,6 +47,7 @@ class GCTaskManager;
|
||||||
class MemoryManager;
|
class MemoryManager;
|
||||||
class MemoryPool;
|
class MemoryPool;
|
||||||
class PSAdaptiveSizePolicy;
|
class PSAdaptiveSizePolicy;
|
||||||
|
class PSCardTable;
|
||||||
class PSHeapSummary;
|
class PSHeapSummary;
|
||||||
|
|
||||||
class ParallelScavengeHeap : public CollectedHeap {
|
class ParallelScavengeHeap : public CollectedHeap {
|
||||||
|
@ -125,6 +127,9 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||||
|
|
||||||
static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
|
static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
|
||||||
|
|
||||||
|
CardTableModRefBS* barrier_set();
|
||||||
|
PSCardTable* card_table();
|
||||||
|
|
||||||
AdjoiningGenerations* gens() { return _gens; }
|
AdjoiningGenerations* gens() { return _gens; }
|
||||||
|
|
||||||
// Returns JNI_OK on success
|
// Returns JNI_OK on success
|
||||||
|
|
|
@ -23,10 +23,10 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc/parallel/cardTableExtension.hpp"
|
|
||||||
#include "gc/parallel/gcTaskManager.hpp"
|
#include "gc/parallel/gcTaskManager.hpp"
|
||||||
#include "gc/parallel/objectStartArray.inline.hpp"
|
#include "gc/parallel/objectStartArray.inline.hpp"
|
||||||
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
|
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
|
||||||
|
#include "gc/parallel/psCardTable.hpp"
|
||||||
#include "gc/parallel/psPromotionManager.inline.hpp"
|
#include "gc/parallel/psPromotionManager.inline.hpp"
|
||||||
#include "gc/parallel/psScavenge.hpp"
|
#include "gc/parallel/psScavenge.hpp"
|
||||||
#include "gc/parallel/psTasks.hpp"
|
#include "gc/parallel/psTasks.hpp"
|
||||||
|
@ -39,9 +39,9 @@
|
||||||
// may be either dirty or newgen.
|
// may be either dirty or newgen.
|
||||||
class CheckForUnmarkedOops : public OopClosure {
|
class CheckForUnmarkedOops : public OopClosure {
|
||||||
private:
|
private:
|
||||||
PSYoungGen* _young_gen;
|
PSYoungGen* _young_gen;
|
||||||
CardTableExtension* _card_table;
|
PSCardTable* _card_table;
|
||||||
HeapWord* _unmarked_addr;
|
HeapWord* _unmarked_addr;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
template <class T> void do_oop_work(T* p) {
|
template <class T> void do_oop_work(T* p) {
|
||||||
|
@ -56,7 +56,7 @@ class CheckForUnmarkedOops : public OopClosure {
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
|
CheckForUnmarkedOops(PSYoungGen* young_gen, PSCardTable* card_table) :
|
||||||
_young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
|
_young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
|
||||||
|
|
||||||
virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
|
virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
|
||||||
|
@ -71,16 +71,14 @@ class CheckForUnmarkedOops : public OopClosure {
|
||||||
// precise or imprecise, dirty or newgen.
|
// precise or imprecise, dirty or newgen.
|
||||||
class CheckForUnmarkedObjects : public ObjectClosure {
|
class CheckForUnmarkedObjects : public ObjectClosure {
|
||||||
private:
|
private:
|
||||||
PSYoungGen* _young_gen;
|
PSYoungGen* _young_gen;
|
||||||
CardTableExtension* _card_table;
|
PSCardTable* _card_table;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
CheckForUnmarkedObjects() {
|
CheckForUnmarkedObjects() {
|
||||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||||
_young_gen = heap->young_gen();
|
_young_gen = heap->young_gen();
|
||||||
_card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
|
_card_table = heap->card_table();
|
||||||
// No point in asserting barrier set type here. Need to make CardTableExtension
|
|
||||||
// a unique barrier set type.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Card marks are not precise. The current system can leave us with
|
// Card marks are not precise. The current system can leave us with
|
||||||
|
@ -99,8 +97,8 @@ class CheckForUnmarkedObjects : public ObjectClosure {
|
||||||
// Checks for precise marking of oops as newgen.
|
// Checks for precise marking of oops as newgen.
|
||||||
class CheckForPreciseMarks : public OopClosure {
|
class CheckForPreciseMarks : public OopClosure {
|
||||||
private:
|
private:
|
||||||
PSYoungGen* _young_gen;
|
PSYoungGen* _young_gen;
|
||||||
CardTableExtension* _card_table;
|
PSCardTable* _card_table;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
template <class T> void do_oop_work(T* p) {
|
template <class T> void do_oop_work(T* p) {
|
||||||
|
@ -112,7 +110,7 @@ class CheckForPreciseMarks : public OopClosure {
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
|
CheckForPreciseMarks(PSYoungGen* young_gen, PSCardTable* card_table) :
|
||||||
_young_gen(young_gen), _card_table(card_table) { }
|
_young_gen(young_gen), _card_table(card_table) { }
|
||||||
|
|
||||||
virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); }
|
virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); }
|
||||||
|
@ -128,12 +126,12 @@ class CheckForPreciseMarks : public OopClosure {
|
||||||
// when the space is empty, fix the calculation of
|
// when the space is empty, fix the calculation of
|
||||||
// end_card to allow sp_top == sp->bottom().
|
// end_card to allow sp_top == sp->bottom().
|
||||||
|
|
||||||
void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
|
void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array,
|
||||||
MutableSpace* sp,
|
MutableSpace* sp,
|
||||||
HeapWord* space_top,
|
HeapWord* space_top,
|
||||||
PSPromotionManager* pm,
|
PSPromotionManager* pm,
|
||||||
uint stripe_number,
|
uint stripe_number,
|
||||||
uint stripe_total) {
|
uint stripe_total) {
|
||||||
int ssize = 128; // Naked constant! Work unit = 64k.
|
int ssize = 128; // Naked constant! Work unit = 64k.
|
||||||
int dirty_card_count = 0;
|
int dirty_card_count = 0;
|
||||||
|
|
||||||
|
@ -320,7 +318,7 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
|
||||||
}
|
}
|
||||||
|
|
||||||
// This should be called before a scavenge.
|
// This should be called before a scavenge.
|
||||||
void CardTableExtension::verify_all_young_refs_imprecise() {
|
void PSCardTable::verify_all_young_refs_imprecise() {
|
||||||
CheckForUnmarkedObjects check;
|
CheckForUnmarkedObjects check;
|
||||||
|
|
||||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||||
|
@ -330,26 +328,21 @@ void CardTableExtension::verify_all_young_refs_imprecise() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// This should be called immediately after a scavenge, before mutators resume.
|
// This should be called immediately after a scavenge, before mutators resume.
|
||||||
void CardTableExtension::verify_all_young_refs_precise() {
|
void PSCardTable::verify_all_young_refs_precise() {
|
||||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||||
PSOldGen* old_gen = heap->old_gen();
|
PSOldGen* old_gen = heap->old_gen();
|
||||||
|
|
||||||
CheckForPreciseMarks check(
|
CheckForPreciseMarks check(heap->young_gen(), this);
|
||||||
heap->young_gen(),
|
|
||||||
barrier_set_cast<CardTableExtension>(heap->barrier_set()));
|
|
||||||
|
|
||||||
old_gen->oop_iterate_no_header(&check);
|
old_gen->oop_iterate_no_header(&check);
|
||||||
|
|
||||||
verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
|
verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
|
||||||
}
|
}
|
||||||
|
|
||||||
void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
|
void PSCardTable::verify_all_young_refs_precise_helper(MemRegion mr) {
|
||||||
CardTableExtension* card_table =
|
jbyte* bot = byte_for(mr.start());
|
||||||
barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
|
jbyte* top = byte_for(mr.end());
|
||||||
|
while (bot <= top) {
|
||||||
jbyte* bot = card_table->byte_for(mr.start());
|
|
||||||
jbyte* top = card_table->byte_for(mr.end());
|
|
||||||
while(bot <= top) {
|
|
||||||
assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
|
assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
|
||||||
if (*bot == verify_card)
|
if (*bot == verify_card)
|
||||||
*bot = youngergen_card;
|
*bot = youngergen_card;
|
||||||
|
@ -357,7 +350,7 @@ void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
|
bool PSCardTable::addr_is_marked_imprecise(void *addr) {
|
||||||
jbyte* p = byte_for(addr);
|
jbyte* p = byte_for(addr);
|
||||||
jbyte val = *p;
|
jbyte val = *p;
|
||||||
|
|
||||||
|
@ -376,7 +369,7 @@ bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also includes verify_card
|
// Also includes verify_card
|
||||||
bool CardTableExtension::addr_is_marked_precise(void *addr) {
|
bool PSCardTable::addr_is_marked_precise(void *addr) {
|
||||||
jbyte* p = byte_for(addr);
|
jbyte* p = byte_for(addr);
|
||||||
jbyte val = *p;
|
jbyte val = *p;
|
||||||
|
|
||||||
|
@ -404,8 +397,7 @@ bool CardTableExtension::addr_is_marked_precise(void *addr) {
|
||||||
// The method resize_covered_region_by_end() is analogous to
|
// The method resize_covered_region_by_end() is analogous to
|
||||||
// CardTableModRefBS::resize_covered_region() but
|
// CardTableModRefBS::resize_covered_region() but
|
||||||
// for regions that grow or shrink at the low end.
|
// for regions that grow or shrink at the low end.
|
||||||
void CardTableExtension::resize_covered_region(MemRegion new_region) {
|
void PSCardTable::resize_covered_region(MemRegion new_region) {
|
||||||
|
|
||||||
for (int i = 0; i < _cur_covered_regions; i++) {
|
for (int i = 0; i < _cur_covered_regions; i++) {
|
||||||
if (_covered[i].start() == new_region.start()) {
|
if (_covered[i].start() == new_region.start()) {
|
||||||
// Found a covered region with the same start as the
|
// Found a covered region with the same start as the
|
||||||
|
@ -439,13 +431,13 @@ void CardTableExtension::resize_covered_region(MemRegion new_region) {
|
||||||
resize_covered_region_by_start(new_region);
|
resize_covered_region_by_start(new_region);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) {
|
void PSCardTable::resize_covered_region_by_start(MemRegion new_region) {
|
||||||
CardTableModRefBS::resize_covered_region(new_region);
|
CardTable::resize_covered_region(new_region);
|
||||||
debug_only(verify_guard();)
|
debug_only(verify_guard();)
|
||||||
}
|
}
|
||||||
|
|
||||||
void CardTableExtension::resize_covered_region_by_end(int changed_region,
|
void PSCardTable::resize_covered_region_by_end(int changed_region,
|
||||||
MemRegion new_region) {
|
MemRegion new_region) {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(),
|
assert(SafepointSynchronize::is_at_safepoint(),
|
||||||
"Only expect an expansion at the low end at a GC");
|
"Only expect an expansion at the low end at a GC");
|
||||||
debug_only(verify_guard();)
|
debug_only(verify_guard();)
|
||||||
|
@ -484,8 +476,8 @@ void CardTableExtension::resize_covered_region_by_end(int changed_region,
|
||||||
debug_only(verify_guard();)
|
debug_only(verify_guard();)
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CardTableExtension::resize_commit_uncommit(int changed_region,
|
bool PSCardTable::resize_commit_uncommit(int changed_region,
|
||||||
MemRegion new_region) {
|
MemRegion new_region) {
|
||||||
bool result = false;
|
bool result = false;
|
||||||
// Commit new or uncommit old pages, if necessary.
|
// Commit new or uncommit old pages, if necessary.
|
||||||
MemRegion cur_committed = _committed[changed_region];
|
MemRegion cur_committed = _committed[changed_region];
|
||||||
|
@ -506,13 +498,12 @@ bool CardTableExtension::resize_commit_uncommit(int changed_region,
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||||
assert(cur_committed.start() == align_up(cur_committed.start(), os::vm_page_size()),
|
assert(cur_committed.start() == align_up(cur_committed.start(), os::vm_page_size()),
|
||||||
"Starts should have proper alignment");
|
"Starts should have proper alignment");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
jbyte* new_start = byte_for(new_region.start());
|
jbyte* new_start = byte_for(new_region.start());
|
||||||
// Round down because this is for the start address
|
// Round down because this is for the start address
|
||||||
HeapWord* new_start_aligned =
|
HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size());
|
||||||
(HeapWord*)align_down((uintptr_t)new_start, os::vm_page_size());
|
|
||||||
// The guard page is always committed and should not be committed over.
|
// The guard page is always committed and should not be committed over.
|
||||||
// This method is used in cases where the generation is growing toward
|
// This method is used in cases where the generation is growing toward
|
||||||
// lower addresses but the guard region is still at the end of the
|
// lower addresses but the guard region is still at the end of the
|
||||||
|
@ -579,21 +570,20 @@ bool CardTableExtension::resize_commit_uncommit(int changed_region,
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void CardTableExtension::resize_update_committed_table(int changed_region,
|
void PSCardTable::resize_update_committed_table(int changed_region,
|
||||||
MemRegion new_region) {
|
MemRegion new_region) {
|
||||||
|
|
||||||
jbyte* new_start = byte_for(new_region.start());
|
jbyte* new_start = byte_for(new_region.start());
|
||||||
// Set the new start of the committed region
|
// Set the new start of the committed region
|
||||||
HeapWord* new_start_aligned =
|
HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size());
|
||||||
(HeapWord*)align_down(new_start, os::vm_page_size());
|
|
||||||
MemRegion new_committed = MemRegion(new_start_aligned,
|
MemRegion new_committed = MemRegion(new_start_aligned,
|
||||||
_committed[changed_region].end());
|
_committed[changed_region].end());
|
||||||
_committed[changed_region] = new_committed;
|
_committed[changed_region] = new_committed;
|
||||||
_committed[changed_region].set_start(new_start_aligned);
|
_committed[changed_region].set_start(new_start_aligned);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CardTableExtension::resize_update_card_table_entries(int changed_region,
|
void PSCardTable::resize_update_card_table_entries(int changed_region,
|
||||||
MemRegion new_region) {
|
MemRegion new_region) {
|
||||||
debug_only(verify_guard();)
|
debug_only(verify_guard();)
|
||||||
MemRegion original_covered = _covered[changed_region];
|
MemRegion original_covered = _covered[changed_region];
|
||||||
// Initialize the card entries. Only consider the
|
// Initialize the card entries. Only consider the
|
||||||
|
@ -610,8 +600,8 @@ void CardTableExtension::resize_update_card_table_entries(int changed_region,
|
||||||
while (entry < end) { *entry++ = clean_card; }
|
while (entry < end) { *entry++ = clean_card; }
|
||||||
}
|
}
|
||||||
|
|
||||||
void CardTableExtension::resize_update_covered_table(int changed_region,
|
void PSCardTable::resize_update_covered_table(int changed_region,
|
||||||
MemRegion new_region) {
|
MemRegion new_region) {
|
||||||
// Update the covered region
|
// Update the covered region
|
||||||
_covered[changed_region].set_start(new_region.start());
|
_covered[changed_region].set_start(new_region.start());
|
||||||
_covered[changed_region].set_word_size(new_region.word_size());
|
_covered[changed_region].set_word_size(new_region.word_size());
|
||||||
|
@ -665,7 +655,7 @@ void CardTableExtension::resize_update_covered_table(int changed_region,
|
||||||
// -------------
|
// -------------
|
||||||
// ^ returns this
|
// ^ returns this
|
||||||
|
|
||||||
HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
|
HeapWord* PSCardTable::lowest_prev_committed_start(int ind) const {
|
||||||
assert(_cur_covered_regions >= 0, "Expecting at least on region");
|
assert(_cur_covered_regions >= 0, "Expecting at least on region");
|
||||||
HeapWord* min_start = _committed[ind].start();
|
HeapWord* min_start = _committed[ind].start();
|
||||||
for (int j = 0; j < ind; j++) {
|
for (int j = 0; j < ind; j++) {
|
||||||
|
@ -678,6 +668,6 @@ HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
|
||||||
return min_start;
|
return min_start;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CardTableExtension::is_in_young(oop obj) const {
|
bool PSCardTable::is_in_young(oop obj) const {
|
||||||
return ParallelScavengeHeap::heap()->is_in_young(obj);
|
return ParallelScavengeHeap::heap()->is_in_young(obj);
|
||||||
}
|
}
|
|
@ -22,17 +22,18 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP
|
#ifndef SHARE_VM_GC_PARALLEL_PSCARDTABLE_HPP
|
||||||
#define SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP
|
#define SHARE_VM_GC_PARALLEL_PSCARDTABLE_HPP
|
||||||
|
|
||||||
#include "gc/shared/cardTableModRefBS.hpp"
|
#include "gc/shared/cardTable.hpp"
|
||||||
|
#include "oops/oop.hpp"
|
||||||
|
|
||||||
class MutableSpace;
|
class MutableSpace;
|
||||||
class ObjectStartArray;
|
class ObjectStartArray;
|
||||||
class PSPromotionManager;
|
class PSPromotionManager;
|
||||||
class GCTaskQueue;
|
class GCTaskQueue;
|
||||||
|
|
||||||
class CardTableExtension : public CardTableModRefBS {
|
class PSCardTable: public CardTable {
|
||||||
private:
|
private:
|
||||||
// Support methods for resizing the card table.
|
// Support methods for resizing the card table.
|
||||||
// resize_commit_uncommit() returns true if the pages were committed or
|
// resize_commit_uncommit() returns true if the pages were committed or
|
||||||
|
@ -43,21 +44,18 @@ class CardTableExtension : public CardTableModRefBS {
|
||||||
void resize_update_committed_table(int changed_region, MemRegion new_region);
|
void resize_update_committed_table(int changed_region, MemRegion new_region);
|
||||||
void resize_update_covered_table(int changed_region, MemRegion new_region);
|
void resize_update_covered_table(int changed_region, MemRegion new_region);
|
||||||
|
|
||||||
protected:
|
void verify_all_young_refs_precise_helper(MemRegion mr);
|
||||||
|
|
||||||
static void verify_all_young_refs_precise_helper(MemRegion mr);
|
|
||||||
|
|
||||||
public:
|
|
||||||
enum ExtendedCardValue {
|
enum ExtendedCardValue {
|
||||||
youngergen_card = CardTableModRefBS::CT_MR_BS_last_reserved + 1,
|
youngergen_card = CT_MR_BS_last_reserved + 1,
|
||||||
verify_card = CardTableModRefBS::CT_MR_BS_last_reserved + 5
|
verify_card = CT_MR_BS_last_reserved + 5
|
||||||
};
|
};
|
||||||
|
|
||||||
CardTableExtension(MemRegion whole_heap) :
|
public:
|
||||||
CardTableModRefBS(
|
PSCardTable(MemRegion whole_heap) : CardTable(whole_heap, /* scanned_concurrently */ false) {}
|
||||||
whole_heap,
|
|
||||||
BarrierSet::FakeRtti(BarrierSet::CardTableExtension))
|
static jbyte youngergen_card_val() { return youngergen_card; }
|
||||||
{ }
|
static jbyte verify_card_val() { return verify_card; }
|
||||||
|
|
||||||
// Scavenge support
|
// Scavenge support
|
||||||
void scavenge_contents_parallel(ObjectStartArray* start_array,
|
void scavenge_contents_parallel(ObjectStartArray* start_array,
|
||||||
|
@ -67,10 +65,6 @@ class CardTableExtension : public CardTableModRefBS {
|
||||||
uint stripe_number,
|
uint stripe_number,
|
||||||
uint stripe_total);
|
uint stripe_total);
|
||||||
|
|
||||||
// Verification
|
|
||||||
static void verify_all_young_refs_imprecise();
|
|
||||||
static void verify_all_young_refs_precise();
|
|
||||||
|
|
||||||
bool addr_is_marked_imprecise(void *addr);
|
bool addr_is_marked_imprecise(void *addr);
|
||||||
bool addr_is_marked_precise(void *addr);
|
bool addr_is_marked_precise(void *addr);
|
||||||
|
|
||||||
|
@ -88,6 +82,9 @@ class CardTableExtension : public CardTableModRefBS {
|
||||||
*byte = youngergen_card;
|
*byte = youngergen_card;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReduceInitialCardMarks support
|
||||||
|
bool is_in_young(oop obj) const;
|
||||||
|
|
||||||
// Adaptive size policy support
|
// Adaptive size policy support
|
||||||
// Allows adjustment of the base and size of the covered regions
|
// Allows adjustment of the base and size of the covered regions
|
||||||
void resize_covered_region(MemRegion new_region);
|
void resize_covered_region(MemRegion new_region);
|
||||||
|
@ -102,29 +99,14 @@ class CardTableExtension : public CardTableModRefBS {
|
||||||
HeapWord* lowest_prev_committed_start(int ind) const;
|
HeapWord* lowest_prev_committed_start(int ind) const;
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
|
|
||||||
bool is_valid_card_address(jbyte* addr) {
|
bool is_valid_card_address(jbyte* addr) {
|
||||||
return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size);
|
return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
|
|
||||||
// ReduceInitialCardMarks support
|
// Verification
|
||||||
virtual bool is_in_young(oop obj) const;
|
void verify_all_young_refs_imprecise();
|
||||||
|
void verify_all_young_refs_precise();
|
||||||
virtual bool card_mark_must_follow_store() const {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
#endif // SHARE_VM_GC_PARALLEL_PSCARDTABLE
|
||||||
struct BarrierSet::GetName<CardTableExtension> {
|
|
||||||
static const BarrierSet::Name value = BarrierSet::CardTableExtension;
|
|
||||||
};
|
|
||||||
|
|
||||||
template<>
|
|
||||||
struct BarrierSet::GetType<BarrierSet::CardTableExtension> {
|
|
||||||
typedef ::CardTableExtension type;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -236,12 +236,12 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||||
young_gen->to_space()->is_empty();
|
young_gen->to_space()->is_empty();
|
||||||
young_gen_empty = eden_empty && survivors_empty;
|
young_gen_empty = eden_empty && survivors_empty;
|
||||||
|
|
||||||
ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
|
PSCardTable* card_table = heap->card_table();
|
||||||
MemRegion old_mr = heap->old_gen()->reserved();
|
MemRegion old_mr = heap->old_gen()->reserved();
|
||||||
if (young_gen_empty) {
|
if (young_gen_empty) {
|
||||||
modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
|
card_table->clear(MemRegion(old_mr.start(), old_mr.end()));
|
||||||
} else {
|
} else {
|
||||||
modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
|
card_table->invalidate(MemRegion(old_mr.start(), old_mr.end()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -26,6 +26,7 @@
|
||||||
#include "gc/parallel/objectStartArray.inline.hpp"
|
#include "gc/parallel/objectStartArray.inline.hpp"
|
||||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||||
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
|
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
|
||||||
|
#include "gc/parallel/psCardTable.hpp"
|
||||||
#include "gc/parallel/psMarkSweepDecorator.hpp"
|
#include "gc/parallel/psMarkSweepDecorator.hpp"
|
||||||
#include "gc/parallel/psOldGen.hpp"
|
#include "gc/parallel/psOldGen.hpp"
|
||||||
#include "gc/shared/cardTableModRefBS.hpp"
|
#include "gc/shared/cardTableModRefBS.hpp"
|
||||||
|
@ -111,11 +112,8 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||||
BarrierSet* bs = heap->barrier_set();
|
PSCardTable* ct = heap->card_table();
|
||||||
|
ct->resize_covered_region(cmr);
|
||||||
bs->resize_covered_region(cmr);
|
|
||||||
|
|
||||||
CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
|
|
||||||
|
|
||||||
// Verify that the start and end of this generation is the start of a card.
|
// Verify that the start and end of this generation is the start of a card.
|
||||||
// If this wasn't true, a single card could span more than one generation,
|
// If this wasn't true, a single card could span more than one generation,
|
||||||
|
@ -386,7 +384,7 @@ void PSOldGen::post_resize() {
|
||||||
size_t new_word_size = new_memregion.word_size();
|
size_t new_word_size = new_memregion.word_size();
|
||||||
|
|
||||||
start_array()->set_covered_region(new_memregion);
|
start_array()->set_covered_region(new_memregion);
|
||||||
ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion);
|
ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion);
|
||||||
|
|
||||||
// ALWAYS do this last!!
|
// ALWAYS do this last!!
|
||||||
object_space()->initialize(new_memregion,
|
object_space()->initialize(new_memregion,
|
||||||
|
|
|
@ -1017,12 +1017,12 @@ void PSParallelCompact::post_compact()
|
||||||
bool young_gen_empty = eden_empty && from_space->is_empty() &&
|
bool young_gen_empty = eden_empty && from_space->is_empty() &&
|
||||||
to_space->is_empty();
|
to_space->is_empty();
|
||||||
|
|
||||||
ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
|
PSCardTable* ct = heap->card_table();
|
||||||
MemRegion old_mr = heap->old_gen()->reserved();
|
MemRegion old_mr = heap->old_gen()->reserved();
|
||||||
if (young_gen_empty) {
|
if (young_gen_empty) {
|
||||||
modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
|
ct->clear(MemRegion(old_mr.start(), old_mr.end()));
|
||||||
} else {
|
} else {
|
||||||
modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
|
ct->invalidate(MemRegion(old_mr.start(), old_mr.end()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue