8220301: Remove jbyte use in CardTable

Use CardTable::CardValue aliased to uint8_t instead.

Reviewed-by: kbarrett, shade
This commit is contained in:
Thomas Schatzl 2019-03-13 21:01:56 +01:00
parent 4df6db5e3f
commit ece7e8a2a1
50 changed files with 255 additions and 251 deletions

View file

@ -4246,7 +4246,7 @@ operand immByteMapBase()
%{
// Get base of card map
predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
(jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
(CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
match(ConP);
op_cost(0);

View file

@ -193,7 +193,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label done;
Label runtime;
@ -211,7 +210,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
// storing region crossing non-NULL, is card already dirty?
ExternalAddress cardtable((address) ct->byte_map_base());
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
const Register card_addr = tmp;
__ lsr(card_addr, store_addr, CardTable::card_shift);
@ -417,7 +415,6 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label done;
Label runtime;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,6 @@
#define __ masm->
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst) {
BarrierSet* bs = BarrierSet::barrier_set();
@ -40,7 +39,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
__ lsr(obj, obj, CardTable::card_shift);
@ -68,7 +66,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_loop;

View file

@ -4305,7 +4305,7 @@ void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byt
}
void MacroAssembler::load_byte_map_base(Register reg) {
jbyte *byte_map_base =
CardTable::CardValue* byte_map_base =
((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
if (is_valid_AArch64_address((address)byte_map_base)) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -216,7 +216,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
// storing region crossing non-NULL, is card already dirty?
const Register card_addr = tmp1;
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
__ mov_address(tmp2, (address)ct->byte_map_base());
__ add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTable::card_shift));

View file

@ -47,7 +47,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_cardtable_loop, L_done;
@ -102,7 +101,6 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code");
// Load card table base address.
@ -132,7 +130,6 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code");
assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift);

View file

@ -213,7 +213,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
assert_different_registers(store_addr, new_val, tmp1, tmp2);
CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
// Does store cross heap regions?
__ xorr(tmp1, store_addr, new_val);
@ -478,7 +477,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
Register tmp = R0;
Register addr = R14;
Register tmp2 = R15;
jbyte* byte_map_base = bs->card_table()->byte_map_base();
CardTable::CardValue* byte_map_base = bs->card_table()->byte_map_base();
Label restart, refill, ret;

View file

@ -45,7 +45,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
Register count, Register preserve) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(addr, count, R0);
Label Lskip_loop, Lstore_loop;
@ -73,7 +72,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
}
void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
jbyte* byte_map_base,
CardTable::CardValue* byte_map_base,
Register tmp, Register obj) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();

View file

@ -34,7 +34,7 @@ protected:
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register preserve);
void card_table_write(MacroAssembler* masm, jbyte* byte_map_base, Register tmp, Register obj);
void card_table_write(MacroAssembler* masm, CardTable::CardValue* byte_map_base, Register tmp, Register obj);
void card_write_barrier_post(MacroAssembler* masm, Register store_addr, Register tmp);

View file

@ -269,7 +269,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
Label callRuntime, filtered;
CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
BLOCK_COMMENT("g1_write_barrier_post {");
@ -298,7 +297,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
Rnew_val = noreg; // end of lifetime
// Storing region crossing non-NULL, is card already dirty?
assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
// Make sure not to use Z_R0 for any of these registers.
Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
@ -542,7 +540,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card.
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
jbyte* byte_map_base = ct->byte_map_base();
CardTable::CardValue* byte_map_base = ct->byte_map_base();
// Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
__ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);

View file

@ -47,7 +47,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
bool do_return) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
NearLabel doXC, done;
assert_different_registers(Z_R0, Z_R1, addr, count);
@ -144,7 +143,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register st
// register obj is destroyed afterwards.
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(store_addr, tmp);

View file

@ -275,7 +275,7 @@ static address dirty_card_log_enqueue = 0;
static u_char* dirty_card_log_enqueue_end = 0;
// This gets to assume that o0 contains the object address.
static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
static void generate_dirty_card_log_enqueue(CardTable::CardValue* byte_map_base) {
BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2);
CodeBuffer buf(bb);
MacroAssembler masm(&buf);
@ -626,7 +626,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
Register cardtable = G5;
Register tmp = G1_scratch;
Register tmp2 = G3_scratch;
jbyte* byte_map_base = bs->card_table()->byte_map_base();
CardTable::CardValue* byte_map_base = bs->card_table()->byte_map_base();
Label not_already_dirty, restart, refill, young_card;

View file

@ -44,7 +44,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
Register addr, Register count, Register tmp) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(addr, count, tmp);
Label L_loop, L_done;
@ -70,7 +69,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
}
void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
jbyte* byte_map_base,
CardTable::CardValue* byte_map_base,
Register tmp, Register obj) {
__ srlx(obj, CardTable::card_shift, obj);
assert(tmp != obj, "need separate temp reg");

View file

@ -26,6 +26,7 @@
#define CPU_SPARC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_SPARC_HPP
#include "asm/macroAssembler.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
@ -33,7 +34,7 @@ protected:
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp);
void card_table_write(MacroAssembler* masm, jbyte* byte_map_base, Register tmp, Register obj);
void card_table_write(MacroAssembler* masm, CardTable::CardValue* byte_map_base, Register tmp, Register obj);
void card_write_barrier_post(MacroAssembler* masm, Register store_addr, Register new_val, Register tmp);

View file

@ -273,7 +273,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
CardTableBarrierSet* ct =
barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label done;
Label runtime;
@ -522,7 +521,6 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
CardTableBarrierSet* ct =
barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label done;
Label enqueued;

View file

@ -46,7 +46,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
BarrierSet *bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
intptr_t disp = (intptr_t) ct->byte_map_base();
Label L_loop, L_done;
@ -92,7 +91,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
__ shrptr(obj, CardTable::card_shift);

View file

@ -43,10 +43,9 @@ const char* basictype_to_str(BasicType t) {
// ------------------------------------------------------------------
// card_table_base
jbyte *ci_card_table_address() {
CardTable::CardValue* ci_card_table_address() {
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust users of this code");
return ct->byte_map_base();
}

View file

@ -26,6 +26,7 @@
#define SHARE_CI_CIUTILITIES_HPP
#include "ci/ciEnv.hpp"
#include "gc/shared/cardTable.hpp"
#include "utilities/globalDefinitions.hpp"
// The following routines and definitions are used internally in the
@ -50,7 +51,7 @@ inline const char* bool_to_str(bool b) {
const char* basictype_to_str(BasicType t);
jbyte *ci_card_table_address();
CardTable::CardValue* ci_card_table_address();
template <typename T> T ci_card_table_address_as() {
return reinterpret_cast<T>(ci_card_table_address());
}

View file

@ -64,7 +64,7 @@ non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
"n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads);
// Make sure the LNC array is valid for the space.
jbyte** lowest_non_clean;
CardValue** lowest_non_clean;
uintptr_t lowest_non_clean_base_chunk_index;
size_t lowest_non_clean_chunk_size;
get_LNC_array_for_space(sp, lowest_non_clean,
@ -106,7 +106,7 @@ process_stride(Space* sp,
jint stride, int n_strides,
OopsInGenClosure* cl,
CardTableRS* ct,
jbyte** lowest_non_clean,
CardValue** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size) {
// We go from higher to lower addresses here; it wouldn't help that much
@ -114,21 +114,19 @@ process_stride(Space* sp,
// Find the first card address of the first chunk in the stride that is
// at least "bottom" of the used region.
jbyte* start_card = byte_for(used.start());
jbyte* end_card = byte_after(used.last());
CardValue* start_card = byte_for(used.start());
CardValue* end_card = byte_after(used.last());
uintptr_t start_chunk = addr_to_chunk_index(used.start());
uintptr_t start_chunk_stride_num = start_chunk % n_strides;
jbyte* chunk_card_start;
CardValue* chunk_card_start;
if ((uintptr_t)stride >= start_chunk_stride_num) {
chunk_card_start = (jbyte*)(start_card +
(stride - start_chunk_stride_num) *
ParGCCardsPerStrideChunk);
chunk_card_start = (start_card +
(stride - start_chunk_stride_num) * ParGCCardsPerStrideChunk);
} else {
// Go ahead to the next chunk group boundary, then to the requested stride.
chunk_card_start = (jbyte*)(start_card +
(n_strides - start_chunk_stride_num + stride) *
ParGCCardsPerStrideChunk);
chunk_card_start = (start_card +
(n_strides - start_chunk_stride_num + stride) * ParGCCardsPerStrideChunk);
}
while (chunk_card_start < end_card) {
@ -139,7 +137,7 @@ process_stride(Space* sp,
// by suitably initializing the "min_done" field in process_chunk_boundaries()
// below, together with the dirty region extension accomplished in
// DirtyCardToOopClosure::do_MemRegion().
jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
CardValue* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
// Invariant: chunk_mr should be fully contained within the "used" region.
MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
chunk_card_end >= end_card ?
@ -185,7 +183,7 @@ process_chunk_boundaries(Space* sp,
DirtyCardToOopClosure* dcto_cl,
MemRegion chunk_mr,
MemRegion used,
jbyte** lowest_non_clean,
CardValue** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size)
{
@ -224,21 +222,20 @@ process_chunk_boundaries(Space* sp,
// does not scan an object straddling the mutual boundary
// too far to the right, and attempt to scan a portion of
// that object twice.
jbyte* first_dirty_card = NULL;
jbyte* last_card_of_first_obj =
CardValue* first_dirty_card = NULL;
CardValue* last_card_of_first_obj =
byte_for(first_block + sp->block_size(first_block) - 1);
jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last());
jbyte* last_card_to_check =
(jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
(intptr_t) last_card_of_first_obj);
CardValue* first_card_of_cur_chunk = byte_for(chunk_mr.start());
CardValue* last_card_of_cur_chunk = byte_for(chunk_mr.last());
CardValue* last_card_to_check = MIN2(last_card_of_cur_chunk, last_card_of_first_obj);
// Note that this does not need to go beyond our last card
// if our first object completely straddles this chunk.
for (jbyte* cur = first_card_of_cur_chunk;
for (CardValue* cur = first_card_of_cur_chunk;
cur <= last_card_to_check; cur++) {
jbyte val = *cur;
CardValue val = *cur;
if (card_will_be_scanned(val)) {
first_dirty_card = cur; break;
first_dirty_card = cur;
break;
} else {
assert(!card_may_have_been_dirty(val), "Error");
}
@ -253,7 +250,7 @@ process_chunk_boundaries(Space* sp,
// In this case we can help our neighbor by just asking them
// to stop at our first card (even though it may not be dirty).
assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
CardValue* first_card_of_cur_chunk = byte_for(chunk_mr.start());
lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk;
}
@ -278,8 +275,8 @@ process_chunk_boundaries(Space* sp,
// last_obj_card is the card corresponding to the start of the last object
// in the chunk. Note that the last object may not start in
// the chunk.
jbyte* const last_obj_card = byte_for(last_block);
const jbyte val = *last_obj_card;
CardValue* const last_obj_card = byte_for(last_block);
const CardValue val = *last_obj_card;
if (!card_will_be_scanned(val)) {
assert(!card_may_have_been_dirty(val), "Error");
// The card containing the head is not dirty. Any marks on
@ -290,20 +287,20 @@ process_chunk_boundaries(Space* sp,
// The last object must be considered dirty, and extends onto the
// following chunk. Look for a dirty card in that chunk that will
// bound our processing.
jbyte* limit_card = NULL;
CardValue* limit_card = NULL;
const size_t last_block_size = sp->block_size(last_block);
jbyte* const last_card_of_last_obj =
CardValue* const last_card_of_last_obj =
byte_for(last_block + last_block_size - 1);
jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end());
CardValue* const first_card_of_next_chunk = byte_for(chunk_mr.end());
// This search potentially goes a long distance looking
// for the next card that will be scanned, terminating
// at the end of the last_block, if no earlier dirty card
// is found.
assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk,
"last card of next chunk may be wrong");
for (jbyte* cur = first_card_of_next_chunk;
for (CardValue* cur = first_card_of_next_chunk;
cur <= last_card_of_last_obj; cur++) {
const jbyte val = *cur;
const CardValue val = *cur;
if (card_will_be_scanned(val)) {
limit_card = cur; break;
} else {
@ -359,7 +356,7 @@ process_chunk_boundaries(Space* sp,
for (uintptr_t lnc_index = cur_chunk_index + 1;
lnc_index <= last_chunk_index_to_check;
lnc_index++) {
jbyte* lnc_card = lowest_non_clean[lnc_index];
CardValue* lnc_card = lowest_non_clean[lnc_index];
if (lnc_card != NULL) {
// we can stop at the first non-NULL entry we find
if (lnc_card <= limit_card) {
@ -391,7 +388,7 @@ process_chunk_boundaries(Space* sp,
void
CMSCardTable::
get_LNC_array_for_space(Space* sp,
jbyte**& lowest_non_clean,
CardValue**& lowest_non_clean,
uintptr_t& lowest_non_clean_base_chunk_index,
size_t& lowest_non_clean_chunk_size) {

View file

@ -48,7 +48,7 @@ private:
// Ensures that these arrays are of sufficient size, allocating if necessary.
// May be called by several threads concurrently.
void get_LNC_array_for_space(Space* sp,
jbyte**& lowest_non_clean,
CardValue**& lowest_non_clean,
uintptr_t& lowest_non_clean_base_chunk_index,
size_t& lowest_non_clean_chunk_size);
@ -59,7 +59,7 @@ private:
jint stride, int n_strides,
OopsInGenClosure* cl,
CardTableRS* ct,
jbyte** lowest_non_clean,
CardValue** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size);
@ -70,7 +70,7 @@ private:
DirtyCardToOopClosure* dcto_cl,
MemRegion chunk_mr,
MemRegion used,
jbyte** lowest_non_clean,
CardValue** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size);

View file

@ -91,7 +91,7 @@ void G1BarrierSet::write_ref_array_pre(narrowOop* dst, size_t count, bool dest_u
}
}
void G1BarrierSet::write_ref_field_post_slow(volatile jbyte* byte) {
void G1BarrierSet::write_ref_field_post_slow(volatile CardValue* byte) {
// In the slow path, we know a card is not young
assert(*byte != G1CardTable::g1_young_card_val(), "slow path invoked without filtering");
OrderAccess::storeload();
@ -106,8 +106,8 @@ void G1BarrierSet::invalidate(MemRegion mr) {
if (mr.is_empty()) {
return;
}
volatile jbyte* byte = _card_table->byte_for(mr.start());
jbyte* last_byte = _card_table->byte_for(mr.last());
volatile CardValue* byte = _card_table->byte_for(mr.start());
CardValue* last_byte = _card_table->byte_for(mr.last());
// skip initial young cards
for (; byte <= last_byte && *byte == G1CardTable::g1_young_card_val(); byte++);
@ -117,7 +117,7 @@ void G1BarrierSet::invalidate(MemRegion mr) {
Thread* thr = Thread::current();
G1DirtyCardQueue& queue = G1ThreadLocalData::dirty_card_queue(thr);
for (; byte <= last_byte; byte++) {
jbyte bv = *byte;
CardValue bv = *byte;
if ((bv != G1CardTable::g1_young_card_val()) &&
(bv != G1CardTable::dirty_card_val())) {
*byte = G1CardTable::dirty_card_val();

View file

@ -27,9 +27,9 @@
#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
class CardTable;
class G1CardTable;
// This barrier is specialized to use a logging barrier to support
@ -73,7 +73,7 @@ class G1BarrierSet: public CardTableBarrierSet {
template <DecoratorSet decorators, typename T>
void write_ref_field_post(T* field, oop new_val);
void write_ref_field_post_slow(volatile jbyte* byte);
void write_ref_field_post_slow(volatile CardValue* byte);
virtual void on_thread_create(Thread* thread);
virtual void on_thread_destroy(Thread* thread);

View file

@ -47,7 +47,7 @@ inline void G1BarrierSet::write_ref_field_pre(T* field) {
template <DecoratorSet decorators, typename T>
inline void G1BarrierSet::write_ref_field_post(T* field, oop new_val) {
volatile jbyte* byte = _card_table->byte_for(field);
volatile CardValue* byte = _card_table->byte_for(field);
if (*byte != G1CardTable::g1_young_card_val()) {
// Take a slow path for cards in old
write_ref_field_post_slow(byte);

View file

@ -81,7 +81,7 @@ void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
}
}
uint G1CardCounts::add_card_count(jbyte* card_ptr) {
uint G1CardCounts::add_card_count(CardValue* card_ptr) {
// Returns the number of times the card has been refined.
// If we failed to reserve/commit the counts table, return 0.
// If card_ptr is beyond the committed end of the counts table,
@ -116,11 +116,11 @@ void G1CardCounts::clear_region(HeapRegion* hr) {
void G1CardCounts::clear_range(MemRegion mr) {
if (has_count_table()) {
const jbyte* from_card_ptr = _ct->byte_for_const(mr.start());
const CardValue* from_card_ptr = _ct->byte_for_const(mr.start());
// We use the last address in the range as the range could represent the
// last region in the heap. In which case trying to find the card will be an
// OOB access to the card table.
const jbyte* last_card_ptr = _ct->byte_for_const(mr.last());
const CardValue* last_card_ptr = _ct->byte_for_const(mr.last());
#ifdef ASSERT
HeapWord* start_addr = _ct->addr_for(from_card_ptr);

View file

@ -54,19 +54,23 @@ class G1CardCountsMappingChangedListener : public G1MappingChangedListener {
// is 'drained' during the next evacuation pause.
class G1CardCounts: public CHeapObj<mtGC> {
public:
typedef CardTable::CardValue CardValue;
private:
G1CardCountsMappingChangedListener _listener;
G1CollectedHeap* _g1h;
G1CardTable* _ct;
// The table of counts
jubyte* _card_counts;
uint8_t* _card_counts;
// Max capacity of the reserved space for the counts table
size_t _reserved_max_card_num;
// CardTable bottom.
const jbyte* _ct_bot;
const CardValue* _ct_bot;
// Returns true if the card counts table has been reserved.
bool has_reserved_count_table() { return _card_counts != NULL; }
@ -76,22 +80,22 @@ class G1CardCounts: public CHeapObj<mtGC> {
return has_reserved_count_table();
}
size_t ptr_2_card_num(const jbyte* card_ptr) {
size_t ptr_2_card_num(const CardValue* card_ptr) {
assert(card_ptr >= _ct_bot,
"Invalid card pointer: "
"card_ptr: " PTR_FORMAT ", "
"_ct_bot: " PTR_FORMAT,
p2i(card_ptr), p2i(_ct_bot));
size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(CardValue));
assert(card_num < _reserved_max_card_num,
"card pointer out of range: " PTR_FORMAT, p2i(card_ptr));
return card_num;
}
jbyte* card_num_2_ptr(size_t card_num) {
CardValue* card_num_2_ptr(size_t card_num) {
assert(card_num < _reserved_max_card_num,
"card num out of range: " SIZE_FORMAT, card_num);
return (jbyte*) (_ct_bot + card_num);
return (CardValue*) (_ct_bot + card_num);
}
// Clear the counts table for the given (exclusive) index range.
@ -112,7 +116,7 @@ class G1CardCounts: public CHeapObj<mtGC> {
// Increments the refinement count for the given card.
// Returns the pre-increment count value.
uint add_card_count(jbyte* card_ptr);
uint add_card_count(CardValue* card_ptr);
// Returns true if the given count is high enough to be considered
// 'hot'; false otherwise.

View file

@ -31,19 +31,19 @@
#include "runtime/orderAccess.hpp"
bool G1CardTable::mark_card_deferred(size_t card_index) {
jbyte val = _byte_map[card_index];
CardValue val = _byte_map[card_index];
// It's already processed
if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
return false;
}
// Cached bit can be installed either on a clean card or on a claimed card.
jbyte new_val = val;
CardValue new_val = val;
if (val == clean_card_val()) {
new_val = (jbyte)deferred_card_val();
new_val = deferred_card_val();
} else {
if (val & claimed_card_val()) {
new_val = val | (jbyte)deferred_card_val();
new_val = val | deferred_card_val();
}
}
if (new_val != val) {
@ -53,8 +53,8 @@ bool G1CardTable::mark_card_deferred(size_t card_index) {
}
void G1CardTable::g1_mark_as_young(const MemRegion& mr) {
jbyte *const first = byte_for(mr.start());
jbyte *const last = byte_after(mr.last());
CardValue *const first = byte_for(mr.start());
CardValue *const last = byte_after(mr.last());
memset_with_concurrent_readers(first, g1_young_gen, last - first);
}
@ -85,7 +85,7 @@ void G1CardTable::initialize(G1RegionToSpaceMapper* mapper) {
_cur_covered_regions = 1;
_covered[0] = _whole_heap;
_byte_map = (jbyte*) mapper->reserved().start();
_byte_map = (CardValue*) mapper->reserved().start();
_byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
@ -97,6 +97,6 @@ void G1CardTable::initialize(G1RegionToSpaceMapper* mapper) {
}
bool G1CardTable::is_in_young(oop obj) const {
volatile jbyte* p = byte_for(obj);
volatile CardValue* p = byte_for(obj);
return *p == G1CardTable::g1_young_card_val();
}

View file

@ -62,7 +62,7 @@ public:
return _byte_map[card_index] == dirty_card_val();
}
static jbyte g1_young_card_val() { return g1_young_gen; }
static CardValue g1_young_card_val() { return g1_young_gen; }
/*
Claimed and deferred bits are used together in G1 during the evacuation
@ -78,7 +78,7 @@ public:
*/
bool is_card_claimed(size_t card_index) {
jbyte val = _byte_map[card_index];
CardValue val = _byte_map[card_index];
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
}
@ -90,7 +90,7 @@ public:
bool mark_card_deferred(size_t card_index);
bool is_card_deferred(size_t card_index) {
jbyte val = _byte_map[card_index];
CardValue val = _byte_map[card_index];
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
}

View file

@ -114,7 +114,7 @@ class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
G1CollectedHeap* _g1h;
G1CardTable* _g1_ct;
HeapRegion* region_for_card(jbyte* card_ptr) const {
HeapRegion* region_for_card(CardValue* card_ptr) const {
return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
}
@ -128,7 +128,7 @@ class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : G1CardTableEntryClosure(),
_num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
bool do_card_ptr(CardValue* card_ptr, uint worker_i) {
HeapRegion* hr = region_for_card(card_ptr);
// Should only dirty cards in regions that won't be freed.
@ -2726,7 +2726,7 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
HeapRegionRemSetIterator hrrs(r->rem_set());
size_t card_index;
while (hrrs.has_next(card_index)) {
jbyte* card_ptr = (jbyte*)ct->byte_for_index(card_index);
CardTable::CardValue* card_ptr = ct->byte_for_index(card_index);
// The remembered set might contain references to already freed
// regions. Filter out such entries to avoid failing card table
// verification.

View file

@ -44,7 +44,7 @@
// SuspendibleThreadSet after every card.
class G1RefineCardConcurrentlyClosure: public G1CardTableEntryClosure {
public:
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
bool do_card_ptr(CardValue* card_ptr, uint worker_i) {
G1CollectedHeap::heap()->rem_set()->refine_card_concurrently(card_ptr, worker_i);
if (SuspendibleThreadSet::should_yield()) {
@ -113,7 +113,7 @@ bool G1DirtyCardQueueSet::apply_closure_to_buffer(G1CardTableEntryClosure* cl,
size_t i = node->index();
size_t limit = buffer_size();
for ( ; i < limit; ++i) {
jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
CardTable::CardValue* card_ptr = static_cast<CardTable::CardValue*>(buf[i]);
assert(card_ptr != NULL, "invariant");
if (!cl->do_card_ptr(card_ptr, worker_i)) {
result = false; // Incomplete processing.

View file

@ -25,6 +25,7 @@
#ifndef SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
#define SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
#include "gc/shared/cardTable.hpp"
#include "gc/shared/ptrQueue.hpp"
#include "memory/allocation.hpp"
@ -37,9 +38,11 @@ class Monitor;
// require these closure objects to be stack-allocated.
class G1CardTableEntryClosure: public CHeapObj<mtGC> {
public:
typedef CardTable::CardValue CardValue;
// Process the card whose card table entry is "card_ptr". If returns
// "false", terminate the iteration early.
virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i) = 0;
virtual bool do_card_ptr(CardValue* card_ptr, uint worker_i) = 0;
};
// A ptrQueue whose elements are "oops", pointers to object heads.

View file

@ -64,7 +64,7 @@ public:
}
size_t card_index = _ct->index_for(p);
if (_ct->mark_card_deferred(card_index)) {
_dcq->enqueue((jbyte*)_ct->byte_for_index(card_index));
_dcq->enqueue(_ct->byte_for_index(card_index));
}
}
};

View file

@ -39,7 +39,7 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
_use_cache = true;
_hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize;
_hot_cache = ArrayAllocator<jbyte*>::allocate(_hot_cache_size, mtGC);
_hot_cache = ArrayAllocator<CardValue*>::allocate(_hot_cache_size, mtGC);
reset_hot_cache_internal();
@ -54,12 +54,12 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
G1HotCardCache::~G1HotCardCache() {
if (default_use_cache()) {
assert(_hot_cache != NULL, "Logic");
ArrayAllocator<jbyte*>::free(_hot_cache, _hot_cache_size);
ArrayAllocator<CardValue*>::free(_hot_cache, _hot_cache_size);
_hot_cache = NULL;
}
}
jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
CardTable::CardValue* G1HotCardCache::insert(CardValue* card_ptr) {
uint count = _card_counts.add_card_count(card_ptr);
if (!_card_counts.is_hot(count)) {
// The card is not hot so do not store it in the cache;
@ -69,7 +69,7 @@ jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
// Otherwise, the card is hot.
size_t index = Atomic::add(1u, &_hot_cache_idx) - 1;
size_t masked_index = index & (_hot_cache_size - 1);
jbyte* current_ptr = _hot_cache[masked_index];
CardValue* current_ptr = _hot_cache[masked_index];
// Try to store the new card pointer into the cache. Compare-and-swap to guard
// against the unlikely event of a race resulting in another card pointer to
@ -77,7 +77,7 @@ jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
// card_ptr in favor of the other option, which would be starting over. This
// should be OK since card_ptr will likely be the older card already when/if
// this ever happens.
jbyte* previous_ptr = Atomic::cmpxchg(card_ptr,
CardValue* previous_ptr = Atomic::cmpxchg(card_ptr,
&_hot_cache[masked_index],
current_ptr);
return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
@ -96,7 +96,7 @@ void G1HotCardCache::drain(G1CardTableEntryClosure* cl, uint worker_i) {
// The current worker has successfully claimed the chunk [start_idx..end_idx)
end_idx = MIN2(end_idx, _hot_cache_size);
for (size_t i = start_idx; i < end_idx; i++) {
jbyte* card_ptr = _hot_cache[i];
CardValue* card_ptr = _hot_cache[i];
if (card_ptr != NULL) {
bool result = cl->do_card_ptr(card_ptr, worker_i);
assert(result, "Closure should always return true");

View file

@ -53,7 +53,10 @@ class HeapRegion;
// code, increasing throughput.
class G1HotCardCache: public CHeapObj<mtGC> {
public:
typedef CardTable::CardValue CardValue;
private:
G1CollectedHeap* _g1h;
bool _use_cache;
@ -62,7 +65,7 @@ class G1HotCardCache: public CHeapObj<mtGC> {
// The card cache table
jbyte** _hot_cache;
CardValue** _hot_cache;
size_t _hot_cache_size;
@ -107,7 +110,7 @@ class G1HotCardCache: public CHeapObj<mtGC> {
// adding, NULL is returned and no further action in needed.
// If we evict a card from the cache to make room for the new card,
// the evicted card is then returned for refinement.
jbyte* insert(jbyte* card_ptr);
CardValue* insert(CardValue* card_ptr);
// Refine the cards that have delayed as a result of
// being in the cache.

View file

@ -120,7 +120,7 @@ public:
size_t card_index = ct()->index_for(p);
// If the card hasn't been added to the buffer, do it.
if (ct()->mark_card_deferred(card_index)) {
dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index));
dirty_card_queue().enqueue(ct()->byte_for_index(card_index));
}
}

View file

@ -470,7 +470,7 @@ public:
_g1rs(g1h->rem_set()), _update_rs_cl(update_rs_cl), _cards_scanned(0), _cards_skipped(0)
{}
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
bool do_card_ptr(CardValue* card_ptr, uint worker_i) {
// The only time we care about recording cards that
// contain references that point into the collection set
// is during RSet updating within an evacuation pause.
@ -538,7 +538,7 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() {
phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
}
inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
inline void check_card_ptr(CardTable::CardValue* card_ptr, G1CardTable* ct) {
#ifdef ASSERT
G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(g1h->is_in_exact(ct->addr_for(card_ptr)),
@ -550,7 +550,7 @@ inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
#endif
}
void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
void G1RemSet::refine_card_concurrently(CardValue* card_ptr,
uint worker_i) {
assert(!_g1h->is_gc_active(), "Only call concurrently");
@ -606,7 +606,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
if (_hot_card_cache->use_cache()) {
assert(!SafepointSynchronize::is_at_safepoint(), "sanity");
const jbyte* orig_card_ptr = card_ptr;
const CardValue* orig_card_ptr = card_ptr;
card_ptr = _hot_card_cache->insert(card_ptr);
if (card_ptr == NULL) {
// There was no eviction. Nothing to do.
@ -647,7 +647,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
// Okay to clean and process the card now. There are still some
// stale card cases that may be detected by iteration and dealt with
// as iteration failure.
*const_cast<volatile jbyte*>(card_ptr) = G1CardTable::clean_card_val();
*const_cast<volatile CardValue*>(card_ptr) = G1CardTable::clean_card_val();
// This fence serves two purposes. First, the card must be cleaned
// before processing the contents. Second, we can't proceed with
@ -689,7 +689,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
}
}
bool G1RemSet::refine_card_during_gc(jbyte* card_ptr,
bool G1RemSet::refine_card_during_gc(CardValue* card_ptr,
G1ScanObjsDuringUpdateRSClosure* update_rs_cl) {
assert(_g1h->is_gc_active(), "Only call during GC");

View file

@ -76,6 +76,8 @@ private:
G1HotCardCache* _hot_card_cache;
public:
typedef CardTable::CardValue CardValue;
// Gives an approximation on how many threads can be expected to add records to
// a remembered set in parallel. This can be used for sizing data structures to
// decrease performance losses due to data structure sharing.
@ -108,13 +110,13 @@ public:
// Refine the card corresponding to "card_ptr". Safe to be called concurrently
// to the mutator.
void refine_card_concurrently(jbyte* card_ptr,
void refine_card_concurrently(CardValue* card_ptr,
uint worker_i);
// Refine the card corresponding to "card_ptr", applying the given closure to
// all references found. Must only be called during gc.
// Returns whether the card has been scanned.
bool refine_card_during_gc(jbyte* card_ptr, G1ScanObjsDuringUpdateRSClosure* update_rs_cl);
bool refine_card_during_gc(CardValue* card_ptr, G1ScanObjsDuringUpdateRSClosure* update_rs_cl);
// Print accumulated summary info from the start of the VM.
void print_summary_info();

View file

@ -140,19 +140,19 @@ void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array,
// It is a waste to get here if empty.
assert(sp->bottom() < sp->top(), "Should not be called if empty");
oop* sp_top = (oop*)space_top;
jbyte* start_card = byte_for(sp->bottom());
jbyte* end_card = byte_for(sp_top - 1) + 1;
CardValue* start_card = byte_for(sp->bottom());
CardValue* end_card = byte_for(sp_top - 1) + 1;
oop* last_scanned = NULL; // Prevent scanning objects more than once
// The width of the stripe ssize*stripe_total must be
// consistent with the number of stripes so that the complete slice
// is covered.
size_t slice_width = ssize * stripe_total;
for (jbyte* slice = start_card; slice < end_card; slice += slice_width) {
jbyte* worker_start_card = slice + stripe_number * ssize;
for (CardValue* slice = start_card; slice < end_card; slice += slice_width) {
CardValue* worker_start_card = slice + stripe_number * ssize;
if (worker_start_card >= end_card)
return; // We're done.
jbyte* worker_end_card = worker_start_card + ssize;
CardValue* worker_end_card = worker_start_card + ssize;
if (worker_end_card > end_card)
worker_end_card = end_card;
@ -209,13 +209,13 @@ void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array,
assert(worker_start_card <= end_card, "worker start card beyond end card");
assert(worker_end_card <= end_card, "worker end card beyond end card");
jbyte* current_card = worker_start_card;
CardValue* current_card = worker_start_card;
while (current_card < worker_end_card) {
// Find an unclean card.
while (current_card < worker_end_card && card_is_clean(*current_card)) {
current_card++;
}
jbyte* first_unclean_card = current_card;
CardValue* first_unclean_card = current_card;
// Find the end of a run of contiguous unclean cards
while (current_card < worker_end_card && !card_is_clean(*current_card)) {
@ -232,7 +232,7 @@ void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array,
HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
CardValue* ending_card_of_last_object = byte_for(end_of_last_object);
assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
if (ending_card_of_last_object > current_card) {
// This means the object spans the next complete card.
@ -241,7 +241,7 @@ void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array,
}
}
}
jbyte* following_clean_card = current_card;
CardValue* following_clean_card = current_card;
if (first_unclean_card < worker_end_card) {
oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
@ -342,8 +342,8 @@ void PSCardTable::verify_all_young_refs_precise() {
}
void PSCardTable::verify_all_young_refs_precise_helper(MemRegion mr) {
jbyte* bot = byte_for(mr.start());
jbyte* top = byte_for(mr.end());
CardValue* bot = byte_for(mr.start());
CardValue* top = byte_for(mr.end());
while (bot <= top) {
assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
if (*bot == verify_card)
@ -353,8 +353,8 @@ void PSCardTable::verify_all_young_refs_precise_helper(MemRegion mr) {
}
bool PSCardTable::addr_is_marked_imprecise(void *addr) {
jbyte* p = byte_for(addr);
jbyte val = *p;
CardValue* p = byte_for(addr);
CardValue val = *p;
if (card_is_dirty(val))
return true;
@ -372,8 +372,8 @@ bool PSCardTable::addr_is_marked_imprecise(void *addr) {
// Also includes verify_card
bool PSCardTable::addr_is_marked_precise(void *addr) {
jbyte* p = byte_for(addr);
jbyte val = *p;
CardValue* p = byte_for(addr);
CardValue val = *p;
if (card_is_newgen(val))
return true;
@ -473,7 +473,7 @@ void PSCardTable::resize_covered_region_by_end(int changed_region,
log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
p2i(addr_for((CardValue*) _committed[ind].start())), p2i(addr_for((CardValue*) _committed[ind].last())));
debug_only(verify_guard();)
}
@ -503,7 +503,7 @@ bool PSCardTable::resize_commit_uncommit(int changed_region,
"Starts should have proper alignment");
#endif
jbyte* new_start = byte_for(new_region.start());
CardValue* new_start = byte_for(new_region.start());
// Round down because this is for the start address
HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size());
// The guard page is always committed and should not be committed over.
@ -575,7 +575,7 @@ bool PSCardTable::resize_commit_uncommit(int changed_region,
void PSCardTable::resize_update_committed_table(int changed_region,
MemRegion new_region) {
jbyte* new_start = byte_for(new_region.start());
CardValue* new_start = byte_for(new_region.start());
// Set the new start of the committed region
HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size());
MemRegion new_committed = MemRegion(new_start_aligned,
@ -590,13 +590,13 @@ void PSCardTable::resize_update_card_table_entries(int changed_region,
MemRegion original_covered = _covered[changed_region];
// Initialize the card entries. Only consider the
// region covered by the card table (_whole_heap)
jbyte* entry;
CardValue* entry;
if (new_region.start() < _whole_heap.start()) {
entry = byte_for(_whole_heap.start());
} else {
entry = byte_for(new_region.start());
}
jbyte* end = byte_for(original_covered.start());
CardValue* end = byte_for(original_covered.start());
// If _whole_heap starts at the original covered regions start,
// this loop will not execute.
while (entry < end) { *entry++ = clean_card; }

View file

@ -54,8 +54,8 @@ class PSCardTable: public CardTable {
public:
PSCardTable(MemRegion whole_heap) : CardTable(whole_heap, /* scanned_concurrently */ false) {}
static jbyte youngergen_card_val() { return youngergen_card; }
static jbyte verify_card_val() { return verify_card; }
static CardValue youngergen_card_val() { return youngergen_card; }
static CardValue verify_card_val() { return verify_card; }
// Scavenge support
void scavenge_contents_parallel(ObjectStartArray* start_array,
@ -68,7 +68,7 @@ class PSCardTable: public CardTable {
bool addr_is_marked_imprecise(void *addr);
bool addr_is_marked_precise(void *addr);
void set_card_newgen(void* addr) { jbyte* p = byte_for(addr); *p = verify_card; }
void set_card_newgen(void* addr) { CardValue* p = byte_for(addr); *p = verify_card; }
// Testers for entries
static bool card_is_dirty(int value) { return value == dirty_card; }
@ -78,7 +78,7 @@ class PSCardTable: public CardTable {
// Card marking
void inline_write_ref_field_gc(void* field, oop new_val) {
jbyte* byte = byte_for(field);
CardValue* byte = byte_for(field);
*byte = youngergen_card;
}
@ -99,7 +99,7 @@ class PSCardTable: public CardTable {
HeapWord* lowest_prev_committed_start(int ind) const;
#ifdef ASSERT
bool is_valid_card_address(jbyte* addr) {
bool is_valid_card_address(CardValue* addr) {
return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size);
}
#endif // ASSERT

View file

@ -45,7 +45,6 @@ void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, L
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*(ct->byte_map_base())) == sizeof(jbyte), "adjust this code");
LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base());
if (addr->is_address()) {
LIR_Address* address = addr->as_address_ptr();

View file

@ -37,7 +37,7 @@
Node* CardTableBarrierSetC2::byte_map_base_node(GraphKit* kit) const {
// Get base of card map
jbyte* card_table_base = ci_card_table_address();
CardTable::CardValue* card_table_base = ci_card_table_address();
if (card_table_base != NULL) {
return kit->makecon(TypeRawPtr::make((address)card_table_base));
} else {

View file

@ -109,12 +109,12 @@ void CardTable::initialize() {
// then add it to _byte_map_base, i.e.
//
// _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
_byte_map = (jbyte*) heap_rs.base();
_byte_map = (CardValue*) heap_rs.base();
_byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
jbyte* guard_card = &_byte_map[_guard_index];
CardValue* guard_card = &_byte_map[_guard_index];
HeapWord* guard_page = align_down((HeapWord*)guard_card, _page_size);
_guard_region = MemRegion(guard_page, _page_size);
os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
@ -145,7 +145,7 @@ int CardTable::find_covering_region_by_base(HeapWord* base) {
_cur_covered_regions++;
_covered[res].set_start(base);
_covered[res].set_word_size(0);
jbyte* ct_start = byte_for(base);
CardValue* ct_start = byte_for(base);
HeapWord* ct_start_aligned = align_down((HeapWord*)ct_start, _page_size);
_committed[res].set_start(ct_start_aligned);
_committed[res].set_word_size(0);
@ -302,7 +302,7 @@ void CardTable::resize_covered_region(MemRegion new_region) {
#endif
// The default of 0 is not necessarily clean cards.
jbyte* entry;
CardValue* entry;
if (old_region.last() < _whole_heap.start()) {
entry = byte_for(_whole_heap.start());
} else {
@ -312,8 +312,8 @@ void CardTable::resize_covered_region(MemRegion new_region) {
"The guard card will be overwritten");
// This line commented out cleans the newly expanded region and
// not the aligned up expanded region.
// jbyte* const end = byte_after(new_region.last());
jbyte* const end = (jbyte*) new_end_for_commit;
// CardValue* const end = byte_after(new_region.last());
CardValue* const end = (CardValue*) new_end_for_commit;
assert((end >= byte_after(new_region.last())) || collided || guarded,
"Expect to be beyond new region unless impacting another region");
// do nothing if we resized downward.
@ -330,7 +330,7 @@ void CardTable::resize_covered_region(MemRegion new_region) {
}
#endif
if (entry < end) {
memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
memset(entry, clean_card, pointer_delta(end, entry, sizeof(CardValue)));
}
}
// In any case, the covered size changes.
@ -344,7 +344,7 @@ void CardTable::resize_covered_region(MemRegion new_region) {
log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
p2i(addr_for((CardValue*) _committed[ind].start())), p2i(addr_for((CardValue*) _committed[ind].last())));
// Touch the last card of the covered region to show that it
// is committed (or SEGV).
@ -357,8 +357,8 @@ void CardTable::resize_covered_region(MemRegion new_region) {
void CardTable::dirty_MemRegion(MemRegion mr) {
assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
jbyte* cur = byte_for(mr.start());
jbyte* last = byte_after(mr.last());
CardValue* cur = byte_for(mr.start());
CardValue* last = byte_after(mr.last());
while (cur < last) {
*cur = dirty_card;
cur++;
@ -368,15 +368,15 @@ void CardTable::dirty_MemRegion(MemRegion mr) {
void CardTable::clear_MemRegion(MemRegion mr) {
// Be conservative: only clean cards entirely contained within the
// region.
jbyte* cur;
CardValue* cur;
if (mr.start() == _whole_heap.start()) {
cur = byte_for(mr.start());
} else {
assert(mr.start() > _whole_heap.start(), "mr is not covered.");
cur = byte_after(mr.start() - 1);
}
jbyte* last = byte_after(mr.last());
memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
CardValue* last = byte_after(mr.last());
memset(cur, clean_card, pointer_delta(last, cur, sizeof(CardValue)));
}
void CardTable::clear(MemRegion mr) {
@ -387,8 +387,8 @@ void CardTable::clear(MemRegion mr) {
}
void CardTable::dirty(MemRegion mr) {
jbyte* first = byte_for(mr.start());
jbyte* last = byte_after(mr.last());
CardValue* first = byte_for(mr.start());
CardValue* last = byte_after(mr.last());
memset(first, dirty_card, last-first);
}
@ -398,7 +398,7 @@ void CardTable::dirty_card_iterate(MemRegion mr, MemRegionClosure* cl) {
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) {
jbyte *cur_entry, *next_entry, *limit;
CardValue *cur_entry, *next_entry, *limit;
for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
cur_entry <= limit;
cur_entry = next_entry) {
@ -424,7 +424,7 @@ MemRegion CardTable::dirty_card_range_after_reset(MemRegion mr,
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) {
jbyte* cur_entry, *next_entry, *limit;
CardValue* cur_entry, *next_entry, *limit;
for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
cur_entry <= limit;
cur_entry = next_entry) {
@ -474,13 +474,12 @@ void CardTable::verify() {
}
#ifndef PRODUCT
void CardTable::verify_region(MemRegion mr,
jbyte val, bool val_equals) {
jbyte* start = byte_for(mr.start());
jbyte* end = byte_for(mr.last());
void CardTable::verify_region(MemRegion mr, CardValue val, bool val_equals) {
CardValue* start = byte_for(mr.start());
CardValue* end = byte_for(mr.last());
bool failures = false;
for (jbyte* curr = start; curr <= end; ++curr) {
jbyte curr_val = *curr;
for (CardValue* curr = start; curr <= end; ++curr) {
CardValue curr_val = *curr;
bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
if (failed) {
if (!failures) {

View file

@ -32,6 +32,14 @@
class CardTable: public CHeapObj<mtGC> {
friend class VMStructs;
public:
typedef uint8_t CardValue;
// All code generators assume that the size of a card table entry is one byte.
// They need to be updated to reflect any change to this.
// This code can typically be found by searching for the byte_map_base() method.
STATIC_ASSERT(sizeof(CardValue) == 1);
protected:
// The declaration order of these const fields is important; see the
// constructor before changing.
@ -43,8 +51,8 @@ protected:
size_t _last_valid_index; // index of the last valid element
const size_t _page_size; // page size used when mapping _byte_map
size_t _byte_map_size; // in bytes
jbyte* _byte_map; // the card marking array
jbyte* _byte_map_base;
CardValue* _byte_map; // the card marking array
CardValue* _byte_map_base;
int _cur_covered_regions;
@ -94,7 +102,7 @@ protected:
static const int _max_covered_regions = 2;
enum CardValues {
clean_card = -1,
clean_card = (CardValue)-1,
// The mask contains zeros in places for all other values.
clean_card_mask = clean_card - 31,
@ -145,17 +153,17 @@ public:
// Return true if "p" is at the start of a card.
bool is_card_aligned(HeapWord* p) {
jbyte* pcard = byte_for(p);
CardValue* pcard = byte_for(p);
return (addr_for(pcard) == p);
}
// Mapping from address to card marking array entry
jbyte* byte_for(const void* p) const {
CardValue* byte_for(const void* p) const {
assert(_whole_heap.contains(p),
"Attempt to access p = " PTR_FORMAT " out of bounds of "
" card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
jbyte* result = &_byte_map_base[uintptr_t(p) >> card_shift];
CardValue* result = &_byte_map_base[uintptr_t(p) >> card_shift];
assert(result >= _byte_map && result < _byte_map + _byte_map_size,
"out of bounds accessor for card marking array");
return result;
@ -164,7 +172,7 @@ public:
// The card table byte one after the card marking array
// entry for argument address. Typically used for higher bounds
// for loops iterating through the card table.
jbyte* byte_after(const void* p) const {
CardValue* byte_after(const void* p) const {
return byte_for(p) + 1;
}
@ -173,20 +181,20 @@ public:
void dirty(MemRegion mr);
// Provide read-only access to the card table array.
const jbyte* byte_for_const(const void* p) const {
const CardValue* byte_for_const(const void* p) const {
return byte_for(p);
}
const jbyte* byte_after_const(const void* p) const {
const CardValue* byte_after_const(const void* p) const {
return byte_after(p);
}
// Mapping from card marking array entry to address of first word
HeapWord* addr_for(const jbyte* p) const {
HeapWord* addr_for(const CardValue* p) const {
assert(p >= _byte_map && p < _byte_map + _byte_map_size,
"out of bounds access to card marking array. p: " PTR_FORMAT
" _byte_map: " PTR_FORMAT " _byte_map + _byte_map_size: " PTR_FORMAT,
p2i(p), p2i(_byte_map), p2i(_byte_map + _byte_map_size));
size_t delta = pointer_delta(p, _byte_map_base, sizeof(jbyte));
size_t delta = pointer_delta(p, _byte_map_base, sizeof(CardValue));
HeapWord* result = (HeapWord*) (delta << card_shift);
assert(_whole_heap.contains(result),
"Returning result = " PTR_FORMAT " out of bounds of "
@ -204,7 +212,7 @@ public:
return byte_for(p) - _byte_map;
}
const jbyte* byte_for_index(const size_t card_index) const {
CardValue* byte_for_index(const size_t card_index) const {
return _byte_map + card_index;
}
@ -233,19 +241,19 @@ public:
card_size_in_words = card_size / sizeof(HeapWord)
};
static jbyte clean_card_val() { return clean_card; }
static jbyte clean_card_mask_val() { return clean_card_mask; }
static jbyte dirty_card_val() { return dirty_card; }
static jbyte claimed_card_val() { return claimed_card; }
static jbyte precleaned_card_val() { return precleaned_card; }
static jbyte deferred_card_val() { return deferred_card; }
static CardValue clean_card_val() { return clean_card; }
static CardValue clean_card_mask_val() { return clean_card_mask; }
static CardValue dirty_card_val() { return dirty_card; }
static CardValue claimed_card_val() { return claimed_card; }
static CardValue precleaned_card_val() { return precleaned_card; }
static CardValue deferred_card_val() { return deferred_card; }
static intptr_t clean_card_row_val() { return clean_card_row; }
// Card marking array base (adjusted for heap low boundary)
// This would be the 0th element of _byte_map, if the heap started at 0x0.
// But since the heap starts at some higher address, this points to somewhere
// before the beginning of the actual _byte_map.
jbyte* byte_map_base() const { return _byte_map_base; }
CardValue* byte_map_base() const { return _byte_map_base; }
bool scanned_concurrently() const { return _scanned_concurrently; }
virtual bool is_in_young(oop obj) const = 0;
@ -258,7 +266,7 @@ public:
// val_equals -> it will check that all cards covered by mr equal val
// !val_equals -> it will check that all cards covered by mr do not equal val
void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
void verify_region(MemRegion mr, CardValue val, bool val_equals) PRODUCT_RETURN;
void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
};

View file

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSetAssembler.hpp"
#include "gc/shared/cardTableBarrierSet.inline.hpp"
#include "gc/shared/collectedHeap.hpp"

View file

@ -25,11 +25,10 @@
#ifndef SHARE_GC_SHARED_CARDTABLEBARRIERSET_HPP
#define SHARE_GC_SHARED_CARDTABLEBARRIERSET_HPP
#include "gc/shared/cardTable.hpp"
#include "gc/shared/modRefBarrierSet.hpp"
#include "utilities/align.hpp"
class CardTable;
// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
// enumerate ref fields that have been modified (since the last
// enumeration.)
@ -45,8 +44,11 @@ class CardTable;
class CardTableBarrierSet: public ModRefBarrierSet {
// Some classes get to look at some private stuff.
friend class VMStructs;
protected:
public:
typedef CardTable::CardValue CardValue;
protected:
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
// or INCLUDE_JVMCI is being used
bool _defer_initial_card_mark;

View file

@ -31,7 +31,7 @@
template <DecoratorSet decorators, typename T>
inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) {
volatile jbyte* byte = _card_table->byte_for(field);
volatile CardValue* byte = _card_table->byte_for(field);
if (_card_table->scanned_concurrently()) {
// Perform a releasing store if the card table is scanned concurrently
OrderAccess::release_store(byte, CardTable::dirty_card_val());

View file

@ -78,9 +78,8 @@ void CLDRemSet::clear_mod_union() {
ClassLoaderDataGraph::cld_do(&closure);
}
jbyte CardTableRS::find_unused_youngergenP_card_value() {
for (jbyte v = youngergenP1_card;
CardTable::CardValue CardTableRS::find_unused_youngergenP_card_value() {
for (CardValue v = youngergenP1_card;
v < cur_youngergen_and_prev_nonclean_card;
v++) {
bool seen = false;
@ -122,7 +121,7 @@ void CardTableRS::younger_refs_iterate(Generation* g,
g->younger_refs_iterate(blk, n_threads);
}
inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
inline bool ClearNoncleanCardWrapper::clear_card(CardValue* entry) {
if (_is_par) {
return clear_card_parallel(entry);
} else {
@ -130,16 +129,16 @@ inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
}
}
inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) {
inline bool ClearNoncleanCardWrapper::clear_card_parallel(CardValue* entry) {
while (true) {
// In the parallel case, we may have to do this several times.
jbyte entry_val = *entry;
CardValue entry_val = *entry;
assert(entry_val != CardTableRS::clean_card_val(),
"We shouldn't be looking at clean cards, and this should "
"be the only place they get cleaned.");
if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
|| _ct->is_prev_youngergen_card_val(entry_val)) {
jbyte res =
CardValue res =
Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
if (res == entry_val) {
break;
@ -167,8 +166,8 @@ inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) {
}
inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) {
jbyte entry_val = *entry;
inline bool ClearNoncleanCardWrapper::clear_card_serial(CardValue* entry) {
CardValue entry_val = *entry;
assert(entry_val != CardTableRS::clean_card_val(),
"We shouldn't be looking at clean cards, and this should "
"be the only place they get cleaned.");
@ -183,7 +182,7 @@ ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
_dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) {
}
bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) {
bool ClearNoncleanCardWrapper::is_word_aligned(CardTable::CardValue* entry) {
return (((intptr_t)entry) & (BytesPerWord-1)) == 0;
}
@ -195,8 +194,8 @@ void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
assert(mr.word_size() > 0, "Error");
assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned");
// mr.end() may not necessarily be card aligned.
jbyte* cur_entry = _ct->byte_for(mr.last());
const jbyte* limit = _ct->byte_for(mr.start());
CardValue* cur_entry = _ct->byte_for(mr.last());
const CardValue* limit = _ct->byte_for(mr.start());
HeapWord* end_of_non_clean = mr.end();
HeapWord* start_of_non_clean = end_of_non_clean;
while (cur_entry >= limit) {
@ -215,7 +214,7 @@ void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
// fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary
if (is_word_aligned(cur_entry)) {
jbyte* cur_row = cur_entry - BytesPerWord;
CardValue* cur_row = cur_entry - BytesPerWord;
while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row_val()) {
cur_row -= BytesPerWord;
}
@ -252,9 +251,9 @@ void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
// cur-younger-gen ==> cur_younger_gen
// cur_youngergen_and_prev_nonclean_card ==> no change.
void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
volatile jbyte* entry = byte_for(field);
volatile CardValue* entry = byte_for(field);
do {
jbyte entry_val = *entry;
CardValue entry_val = *entry;
// We put this first because it's probably the most common case.
if (entry_val == clean_card_val()) {
// No threat of contention with cleaning threads.
@ -264,8 +263,8 @@ void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
|| is_prev_youngergen_card_val(entry_val)) {
// Mark it as both cur and prev youngergen; card cleaning thread will
// eventually remove the previous stuff.
jbyte new_val = cur_youngergen_and_prev_nonclean_card;
jbyte res = Atomic::cmpxchg(new_val, entry, entry_val);
CardValue new_val = cur_youngergen_and_prev_nonclean_card;
CardValue res = Atomic::cmpxchg(new_val, entry, entry_val);
// Did the CAS succeed?
if (res == entry_val) return;
// Otherwise, retry, to see the new value.
@ -395,11 +394,11 @@ void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
if (s->end() <= gen_boundary) return;
MemRegion used = s->used_region();
jbyte* cur_entry = byte_for(used.start());
jbyte* limit = byte_after(used.last());
CardValue* cur_entry = byte_for(used.start());
CardValue* limit = byte_after(used.last());
while (cur_entry < limit) {
if (*cur_entry == clean_card_val()) {
jbyte* first_dirty = cur_entry+1;
CardValue* first_dirty = cur_entry+1;
while (first_dirty < limit &&
*first_dirty == clean_card_val()) {
first_dirty++;
@ -614,7 +613,7 @@ CardTableRS::CardTableRS(MemRegion whole_heap, bool scanned_concurrently) :
// max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
// (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
uint max_gens = 2;
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1,
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(CardValue, max_gens + 1,
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_last_cur_val_in_gen == NULL) {
vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
@ -626,7 +625,7 @@ CardTableRS::CardTableRS(MemRegion whole_heap, bool scanned_concurrently) :
CardTableRS::~CardTableRS() {
if (_last_cur_val_in_gen) {
FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen);
FREE_C_HEAP_ARRAY(CardValue, _last_cur_val_in_gen);
_last_cur_val_in_gen = NULL;
}
if (_lowest_non_clean) {
@ -669,11 +668,11 @@ void CardTableRS::initialize() {
}
}
bool CardTableRS::card_will_be_scanned(jbyte cv) {
bool CardTableRS::card_will_be_scanned(CardValue cv) {
return card_is_dirty_wrt_gen_iter(cv) || is_prev_nonclean_card_val(cv);
}
bool CardTableRS::card_may_have_been_dirty(jbyte cv) {
bool CardTableRS::card_may_have_been_dirty(CardValue cv) {
return
cv != clean_card &&
(card_is_dirty_wrt_gen_iter(cv) ||

View file

@ -76,27 +76,27 @@ class CardTableRS: public CardTable {
// used as the current value for a younger_refs_do iteration of that
// portion of the table. The perm gen is index 0. The young gen is index 1,
// but will always have the value "clean_card". The old gen is index 2.
jbyte* _last_cur_val_in_gen;
CardValue* _last_cur_val_in_gen;
jbyte _cur_youngergen_card_val;
CardValue _cur_youngergen_card_val;
// Number of generations, plus one for lingering PermGen issues in CardTableRS.
static const int _regions_to_iterate = 3;
jbyte cur_youngergen_card_val() {
CardValue cur_youngergen_card_val() {
return _cur_youngergen_card_val;
}
void set_cur_youngergen_card_val(jbyte v) {
void set_cur_youngergen_card_val(CardValue v) {
_cur_youngergen_card_val = v;
}
bool is_prev_youngergen_card_val(jbyte v) {
bool is_prev_youngergen_card_val(CardValue v) {
return
youngergen_card <= v &&
v < cur_youngergen_and_prev_nonclean_card &&
v != _cur_youngergen_card_val;
}
// Return a youngergen_card_value that is not currently in use.
jbyte find_unused_youngergenP_card_value();
CardValue find_unused_youngergenP_card_value();
public:
CardTableRS(MemRegion whole_heap, bool scanned_concurrently);
@ -117,7 +117,7 @@ public:
void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads);
void inline_write_ref_field_gc(void* field, oop new_val) {
jbyte* byte = byte_for(field);
CardValue* byte = byte_for(field);
*byte = youngergen_card;
}
void write_ref_field_gc_work(void* field, oop new_val) {
@ -140,32 +140,32 @@ public:
void invalidate_or_clear(Generation* old_gen);
bool is_prev_nonclean_card_val(jbyte v) {
bool is_prev_nonclean_card_val(CardValue v) {
return
youngergen_card <= v &&
v <= cur_youngergen_and_prev_nonclean_card &&
v != _cur_youngergen_card_val;
}
static bool youngergen_may_have_been_dirty(jbyte cv) {
static bool youngergen_may_have_been_dirty(CardValue cv) {
return cv == CardTableRS::cur_youngergen_and_prev_nonclean_card;
}
// *** Support for parallel card scanning.
// dirty and precleaned are equivalent wrt younger_refs_iter.
static bool card_is_dirty_wrt_gen_iter(jbyte cv) {
static bool card_is_dirty_wrt_gen_iter(CardValue cv) {
return cv == dirty_card || cv == precleaned_card;
}
// Returns "true" iff the value "cv" will cause the card containing it
// to be scanned in the current traversal. May be overridden by
// subtypes.
bool card_will_be_scanned(jbyte cv);
bool card_will_be_scanned(CardValue cv);
// Returns "true" iff the value "cv" may have represented a dirty card at
// some point.
bool card_may_have_been_dirty(jbyte cv);
bool card_may_have_been_dirty(CardValue cv);
// Iterate over the portion of the card-table which covers the given
// region mr in the given space and apply cl to any dirty sub-regions
@ -185,7 +185,7 @@ public:
// covered region. Each entry of these arrays is the lowest non-clean
// card of the corresponding chunk containing part of an object from the
// previous chunk, or else NULL.
typedef jbyte* CardPtr;
typedef CardValue* CardPtr;
typedef CardPtr* CardArr;
CardArr* _lowest_non_clean;
size_t* _lowest_non_clean_chunk_size;
@ -199,15 +199,19 @@ class ClearNoncleanCardWrapper: public MemRegionClosure {
DirtyCardToOopClosure* _dirty_card_closure;
CardTableRS* _ct;
bool _is_par;
public:
typedef CardTable::CardValue CardValue;
private:
// Clears the given card, return true if the corresponding card should be
// processed.
inline bool clear_card(jbyte* entry);
inline bool clear_card(CardValue* entry);
// Work methods called by the clear_card()
inline bool clear_card_serial(jbyte* entry);
inline bool clear_card_parallel(jbyte* entry);
inline bool clear_card_serial(CardValue* entry);
inline bool clear_card_parallel(CardValue* entry);
// check alignment of pointer
bool is_word_aligned(jbyte* entry);
bool is_word_aligned(CardValue* entry);
public:
ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par);

View file

@ -27,6 +27,7 @@
#include "gc/shared/ageTable.hpp"
#include "gc/shared/cardGeneration.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/genCollectedHeap.hpp"
@ -119,12 +120,12 @@
nonstatic_field(CardTable, _last_valid_index, const size_t) \
nonstatic_field(CardTable, _page_size, const size_t) \
nonstatic_field(CardTable, _byte_map_size, const size_t) \
nonstatic_field(CardTable, _byte_map, jbyte*) \
nonstatic_field(CardTable, _byte_map, CardTable::CardValue*) \
nonstatic_field(CardTable, _cur_covered_regions, int) \
nonstatic_field(CardTable, _covered, MemRegion*) \
nonstatic_field(CardTable, _committed, MemRegion*) \
nonstatic_field(CardTable, _guard_region, MemRegion) \
nonstatic_field(CardTable, _byte_map_base, jbyte*) \
nonstatic_field(CardTable, _byte_map_base, CardTable::CardValue*) \
nonstatic_field(CardTableBarrierSet, _defer_initial_card_mark, bool) \
nonstatic_field(CardTableBarrierSet, _card_table, CardTable*) \
\
@ -217,6 +218,7 @@
/* Miscellaneous other GC types */ \
\
declare_toplevel_type(AgeTable) \
declare_toplevel_type(CardTable::CardValue) \
declare_toplevel_type(Generation::StatRecord) \
declare_toplevel_type(GenerationSpec) \
declare_toplevel_type(HeapWord) \

View file

@ -93,7 +93,7 @@ class CompilerToVM {
static int _max_oop_map_stack_offset;
static int _fields_annotations_base_offset;
static jbyte* cardtable_start_address;
static CardTable::CardValue* cardtable_start_address;
static int cardtable_shift;
static int vm_page_size;

View file

@ -24,6 +24,7 @@
// no precompiled headers
#include "ci/ciUtilities.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "memory/oopFactory.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "jvmci/jvmciRuntime.hpp"
@ -63,7 +64,7 @@ HeapWord* volatile* CompilerToVM::Data::_heap_top_addr;
int CompilerToVM::Data::_max_oop_map_stack_offset;
int CompilerToVM::Data::_fields_annotations_base_offset;
jbyte* CompilerToVM::Data::cardtable_start_address;
CardTable::CardValue* CompilerToVM::Data::cardtable_start_address;
int CompilerToVM::Data::cardtable_shift;
int CompilerToVM::Data::vm_page_size;
@ -126,7 +127,7 @@ void CompilerToVM::Data::initialize(TRAPS) {
BarrierSet* bs = BarrierSet::barrier_set();
if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
jbyte* base = ci_card_table_address();
CardTable::CardValue* base = ci_card_table_address();
assert(base != NULL, "unexpected byte_map_base");
cardtable_start_address = base;
cardtable_shift = CardTable::card_shift;