8220301: Remove jbyte use in CardTable

Use CardTable::CardValue aliased to uint8_t instead.

Reviewed-by: kbarrett, shade
This commit is contained in:
Thomas Schatzl 2019-03-13 21:01:56 +01:00
parent 4df6db5e3f
commit ece7e8a2a1
50 changed files with 255 additions and 251 deletions

View file

@ -4246,7 +4246,7 @@ operand immByteMapBase()
%{ %{
// Get base of card map // Get base of card map
predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) && predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
(jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base()); (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
match(ConP); match(ConP);
op_cost(0); op_cost(0);

View file

@ -193,7 +193,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
BarrierSet* bs = BarrierSet::barrier_set(); BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label done; Label done;
Label runtime; Label runtime;
@ -211,7 +210,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
// storing region crossing non-NULL, is card already dirty? // storing region crossing non-NULL, is card already dirty?
ExternalAddress cardtable((address) ct->byte_map_base()); ExternalAddress cardtable((address) ct->byte_map_base());
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
const Register card_addr = tmp; const Register card_addr = tmp;
__ lsr(card_addr, store_addr, CardTable::card_shift); __ lsr(card_addr, store_addr, CardTable::card_shift);
@ -417,7 +415,6 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
BarrierSet* bs = BarrierSet::barrier_set(); BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label done; Label done;
Label runtime; Label runtime;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,6 @@
#define __ masm-> #define __ masm->
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst) { void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst) {
BarrierSet* bs = BarrierSet::barrier_set(); BarrierSet* bs = BarrierSet::barrier_set();
@ -40,7 +39,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
__ lsr(obj, obj, CardTable::card_shift); __ lsr(obj, obj, CardTable::card_shift);
@ -68,7 +66,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
BarrierSet* bs = BarrierSet::barrier_set(); BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_loop; Label L_loop;

View file

@ -4305,7 +4305,7 @@ void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byt
} }
void MacroAssembler::load_byte_map_base(Register reg) { void MacroAssembler::load_byte_map_base(Register reg) {
jbyte *byte_map_base = CardTable::CardValue* byte_map_base =
((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base(); ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
if (is_valid_AArch64_address((address)byte_map_base)) { if (is_valid_AArch64_address((address)byte_map_base)) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -216,7 +216,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
// storing region crossing non-NULL, is card already dirty? // storing region crossing non-NULL, is card already dirty?
const Register card_addr = tmp1; const Register card_addr = tmp1;
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
__ mov_address(tmp2, (address)ct->byte_map_base()); __ mov_address(tmp2, (address)ct->byte_map_base());
__ add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTable::card_shift)); __ add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTable::card_shift));

View file

@ -47,7 +47,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
BarrierSet* bs = BarrierSet::barrier_set(); BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_cardtable_loop, L_done; Label L_cardtable_loop, L_done;
@ -102,7 +101,6 @@ void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Regis
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code");
// Load card table base address. // Load card table base address.
@ -132,7 +130,6 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code");
assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations."); assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift); Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift);

View file

@ -213,7 +213,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
assert_different_registers(store_addr, new_val, tmp1, tmp2); assert_different_registers(store_addr, new_val, tmp1, tmp2);
CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
// Does store cross heap regions? // Does store cross heap regions?
__ xorr(tmp1, store_addr, new_val); __ xorr(tmp1, store_addr, new_val);
@ -478,7 +477,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
Register tmp = R0; Register tmp = R0;
Register addr = R14; Register addr = R14;
Register tmp2 = R15; Register tmp2 = R15;
jbyte* byte_map_base = bs->card_table()->byte_map_base(); CardTable::CardValue* byte_map_base = bs->card_table()->byte_map_base();
Label restart, refill, ret; Label restart, refill, ret;

View file

@ -45,7 +45,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
Register count, Register preserve) { Register count, Register preserve) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(addr, count, R0); assert_different_registers(addr, count, R0);
Label Lskip_loop, Lstore_loop; Label Lskip_loop, Lstore_loop;
@ -73,7 +72,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
} }
void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm, void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
jbyte* byte_map_base, CardTable::CardValue* byte_map_base,
Register tmp, Register obj) { Register tmp, Register obj) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();

View file

@ -34,7 +34,7 @@ protected:
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register preserve); Register addr, Register count, Register preserve);
void card_table_write(MacroAssembler* masm, jbyte* byte_map_base, Register tmp, Register obj); void card_table_write(MacroAssembler* masm, CardTable::CardValue* byte_map_base, Register tmp, Register obj);
void card_write_barrier_post(MacroAssembler* masm, Register store_addr, Register tmp); void card_write_barrier_post(MacroAssembler* masm, Register store_addr, Register tmp);

View file

@ -269,7 +269,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
Label callRuntime, filtered; Label callRuntime, filtered;
CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
BLOCK_COMMENT("g1_write_barrier_post {"); BLOCK_COMMENT("g1_write_barrier_post {");
@ -298,7 +297,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
Rnew_val = noreg; // end of lifetime Rnew_val = noreg; // end of lifetime
// Storing region crossing non-NULL, is card already dirty? // Storing region crossing non-NULL, is card already dirty?
assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(Rtmp1, Rtmp2, Rtmp3); assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
// Make sure not to use Z_R0 for any of these registers. // Make sure not to use Z_R0 for any of these registers.
Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3; Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
@ -542,7 +540,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card. Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card.
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
jbyte* byte_map_base = ct->byte_map_base(); CardTable::CardValue* byte_map_base = ct->byte_map_base();
// Save registers used below (see assertion in G1PreBarrierStub::emit_code()). // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
__ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP); __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);

View file

@ -47,7 +47,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
bool do_return) { bool do_return) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
NearLabel doXC, done; NearLabel doXC, done;
assert_different_registers(Z_R0, Z_R1, addr, count); assert_different_registers(Z_R0, Z_R1, addr, count);
@ -144,7 +143,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register st
// register obj is destroyed afterwards. // register obj is destroyed afterwards.
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(store_addr, tmp); assert_different_registers(store_addr, tmp);

View file

@ -275,7 +275,7 @@ static address dirty_card_log_enqueue = 0;
static u_char* dirty_card_log_enqueue_end = 0; static u_char* dirty_card_log_enqueue_end = 0;
// This gets to assume that o0 contains the object address. // This gets to assume that o0 contains the object address.
static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { static void generate_dirty_card_log_enqueue(CardTable::CardValue* byte_map_base) {
BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2);
CodeBuffer buf(bb); CodeBuffer buf(bb);
MacroAssembler masm(&buf); MacroAssembler masm(&buf);
@ -626,7 +626,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
Register cardtable = G5; Register cardtable = G5;
Register tmp = G1_scratch; Register tmp = G1_scratch;
Register tmp2 = G3_scratch; Register tmp2 = G3_scratch;
jbyte* byte_map_base = bs->card_table()->byte_map_base(); CardTable::CardValue* byte_map_base = bs->card_table()->byte_map_base();
Label not_already_dirty, restart, refill, young_card; Label not_already_dirty, restart, refill, young_card;

View file

@ -44,7 +44,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
Register addr, Register count, Register tmp) { Register addr, Register count, Register tmp) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(addr, count, tmp); assert_different_registers(addr, count, tmp);
Label L_loop, L_done; Label L_loop, L_done;
@ -70,7 +69,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
} }
void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm, void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
jbyte* byte_map_base, CardTable::CardValue* byte_map_base,
Register tmp, Register obj) { Register tmp, Register obj) {
__ srlx(obj, CardTable::card_shift, obj); __ srlx(obj, CardTable::card_shift, obj);
assert(tmp != obj, "need separate temp reg"); assert(tmp != obj, "need separate temp reg");

View file

@ -26,6 +26,7 @@
#define CPU_SPARC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_SPARC_HPP #define CPU_SPARC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_SPARC_HPP
#include "asm/macroAssembler.hpp" #include "asm/macroAssembler.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp" #include "gc/shared/modRefBarrierSetAssembler.hpp"
class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler { class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
@ -33,7 +34,7 @@ protected:
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp); Register addr, Register count, Register tmp);
void card_table_write(MacroAssembler* masm, jbyte* byte_map_base, Register tmp, Register obj); void card_table_write(MacroAssembler* masm, CardTable::CardValue* byte_map_base, Register tmp, Register obj);
void card_write_barrier_post(MacroAssembler* masm, Register store_addr, Register new_val, Register tmp); void card_write_barrier_post(MacroAssembler* masm, Register store_addr, Register new_val, Register tmp);

View file

@ -273,7 +273,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
CardTableBarrierSet* ct = CardTableBarrierSet* ct =
barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label done; Label done;
Label runtime; Label runtime;
@ -522,7 +521,6 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
CardTableBarrierSet* ct = CardTableBarrierSet* ct =
barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set()); barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label done; Label done;
Label enqueued; Label enqueued;

View file

@ -46,7 +46,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
BarrierSet *bs = BarrierSet::barrier_set(); BarrierSet *bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
intptr_t disp = (intptr_t) ct->byte_map_base(); intptr_t disp = (intptr_t) ct->byte_map_base();
Label L_loop, L_done; Label L_loop, L_done;
@ -92,7 +91,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
__ shrptr(obj, CardTable::card_shift); __ shrptr(obj, CardTable::card_shift);

View file

@ -43,10 +43,9 @@ const char* basictype_to_str(BasicType t) {
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// card_table_base // card_table_base
jbyte *ci_card_table_address() { CardTable::CardValue* ci_card_table_address() {
BarrierSet* bs = BarrierSet::barrier_set(); BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust users of this code");
return ct->byte_map_base(); return ct->byte_map_base();
} }

View file

@ -26,6 +26,7 @@
#define SHARE_CI_CIUTILITIES_HPP #define SHARE_CI_CIUTILITIES_HPP
#include "ci/ciEnv.hpp" #include "ci/ciEnv.hpp"
#include "gc/shared/cardTable.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
// The following routines and definitions are used internally in the // The following routines and definitions are used internally in the
@ -50,7 +51,7 @@ inline const char* bool_to_str(bool b) {
const char* basictype_to_str(BasicType t); const char* basictype_to_str(BasicType t);
jbyte *ci_card_table_address(); CardTable::CardValue* ci_card_table_address();
template <typename T> T ci_card_table_address_as() { template <typename T> T ci_card_table_address_as() {
return reinterpret_cast<T>(ci_card_table_address()); return reinterpret_cast<T>(ci_card_table_address());
} }

View file

@ -64,7 +64,7 @@ non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
"n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads); "n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads);
// Make sure the LNC array is valid for the space. // Make sure the LNC array is valid for the space.
jbyte** lowest_non_clean; CardValue** lowest_non_clean;
uintptr_t lowest_non_clean_base_chunk_index; uintptr_t lowest_non_clean_base_chunk_index;
size_t lowest_non_clean_chunk_size; size_t lowest_non_clean_chunk_size;
get_LNC_array_for_space(sp, lowest_non_clean, get_LNC_array_for_space(sp, lowest_non_clean,
@ -106,7 +106,7 @@ process_stride(Space* sp,
jint stride, int n_strides, jint stride, int n_strides,
OopsInGenClosure* cl, OopsInGenClosure* cl,
CardTableRS* ct, CardTableRS* ct,
jbyte** lowest_non_clean, CardValue** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index, uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size) { size_t lowest_non_clean_chunk_size) {
// We go from higher to lower addresses here; it wouldn't help that much // We go from higher to lower addresses here; it wouldn't help that much
@ -114,21 +114,19 @@ process_stride(Space* sp,
// Find the first card address of the first chunk in the stride that is // Find the first card address of the first chunk in the stride that is
// at least "bottom" of the used region. // at least "bottom" of the used region.
jbyte* start_card = byte_for(used.start()); CardValue* start_card = byte_for(used.start());
jbyte* end_card = byte_after(used.last()); CardValue* end_card = byte_after(used.last());
uintptr_t start_chunk = addr_to_chunk_index(used.start()); uintptr_t start_chunk = addr_to_chunk_index(used.start());
uintptr_t start_chunk_stride_num = start_chunk % n_strides; uintptr_t start_chunk_stride_num = start_chunk % n_strides;
jbyte* chunk_card_start; CardValue* chunk_card_start;
if ((uintptr_t)stride >= start_chunk_stride_num) { if ((uintptr_t)stride >= start_chunk_stride_num) {
chunk_card_start = (jbyte*)(start_card + chunk_card_start = (start_card +
(stride - start_chunk_stride_num) * (stride - start_chunk_stride_num) * ParGCCardsPerStrideChunk);
ParGCCardsPerStrideChunk);
} else { } else {
// Go ahead to the next chunk group boundary, then to the requested stride. // Go ahead to the next chunk group boundary, then to the requested stride.
chunk_card_start = (jbyte*)(start_card + chunk_card_start = (start_card +
(n_strides - start_chunk_stride_num + stride) * (n_strides - start_chunk_stride_num + stride) * ParGCCardsPerStrideChunk);
ParGCCardsPerStrideChunk);
} }
while (chunk_card_start < end_card) { while (chunk_card_start < end_card) {
@ -139,7 +137,7 @@ process_stride(Space* sp,
// by suitably initializing the "min_done" field in process_chunk_boundaries() // by suitably initializing the "min_done" field in process_chunk_boundaries()
// below, together with the dirty region extension accomplished in // below, together with the dirty region extension accomplished in
// DirtyCardToOopClosure::do_MemRegion(). // DirtyCardToOopClosure::do_MemRegion().
jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk; CardValue* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
// Invariant: chunk_mr should be fully contained within the "used" region. // Invariant: chunk_mr should be fully contained within the "used" region.
MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start), MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
chunk_card_end >= end_card ? chunk_card_end >= end_card ?
@ -185,7 +183,7 @@ process_chunk_boundaries(Space* sp,
DirtyCardToOopClosure* dcto_cl, DirtyCardToOopClosure* dcto_cl,
MemRegion chunk_mr, MemRegion chunk_mr,
MemRegion used, MemRegion used,
jbyte** lowest_non_clean, CardValue** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index, uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size) size_t lowest_non_clean_chunk_size)
{ {
@ -224,21 +222,20 @@ process_chunk_boundaries(Space* sp,
// does not scan an object straddling the mutual boundary // does not scan an object straddling the mutual boundary
// too far to the right, and attempt to scan a portion of // too far to the right, and attempt to scan a portion of
// that object twice. // that object twice.
jbyte* first_dirty_card = NULL; CardValue* first_dirty_card = NULL;
jbyte* last_card_of_first_obj = CardValue* last_card_of_first_obj =
byte_for(first_block + sp->block_size(first_block) - 1); byte_for(first_block + sp->block_size(first_block) - 1);
jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); CardValue* first_card_of_cur_chunk = byte_for(chunk_mr.start());
jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last()); CardValue* last_card_of_cur_chunk = byte_for(chunk_mr.last());
jbyte* last_card_to_check = CardValue* last_card_to_check = MIN2(last_card_of_cur_chunk, last_card_of_first_obj);
(jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
(intptr_t) last_card_of_first_obj);
// Note that this does not need to go beyond our last card // Note that this does not need to go beyond our last card
// if our first object completely straddles this chunk. // if our first object completely straddles this chunk.
for (jbyte* cur = first_card_of_cur_chunk; for (CardValue* cur = first_card_of_cur_chunk;
cur <= last_card_to_check; cur++) { cur <= last_card_to_check; cur++) {
jbyte val = *cur; CardValue val = *cur;
if (card_will_be_scanned(val)) { if (card_will_be_scanned(val)) {
first_dirty_card = cur; break; first_dirty_card = cur;
break;
} else { } else {
assert(!card_may_have_been_dirty(val), "Error"); assert(!card_may_have_been_dirty(val), "Error");
} }
@ -253,7 +250,7 @@ process_chunk_boundaries(Space* sp,
// In this case we can help our neighbor by just asking them // In this case we can help our neighbor by just asking them
// to stop at our first card (even though it may not be dirty). // to stop at our first card (even though it may not be dirty).
assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter"); assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); CardValue* first_card_of_cur_chunk = byte_for(chunk_mr.start());
lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk; lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk;
} }
@ -278,8 +275,8 @@ process_chunk_boundaries(Space* sp,
// last_obj_card is the card corresponding to the start of the last object // last_obj_card is the card corresponding to the start of the last object
// in the chunk. Note that the last object may not start in // in the chunk. Note that the last object may not start in
// the chunk. // the chunk.
jbyte* const last_obj_card = byte_for(last_block); CardValue* const last_obj_card = byte_for(last_block);
const jbyte val = *last_obj_card; const CardValue val = *last_obj_card;
if (!card_will_be_scanned(val)) { if (!card_will_be_scanned(val)) {
assert(!card_may_have_been_dirty(val), "Error"); assert(!card_may_have_been_dirty(val), "Error");
// The card containing the head is not dirty. Any marks on // The card containing the head is not dirty. Any marks on
@ -290,20 +287,20 @@ process_chunk_boundaries(Space* sp,
// The last object must be considered dirty, and extends onto the // The last object must be considered dirty, and extends onto the
// following chunk. Look for a dirty card in that chunk that will // following chunk. Look for a dirty card in that chunk that will
// bound our processing. // bound our processing.
jbyte* limit_card = NULL; CardValue* limit_card = NULL;
const size_t last_block_size = sp->block_size(last_block); const size_t last_block_size = sp->block_size(last_block);
jbyte* const last_card_of_last_obj = CardValue* const last_card_of_last_obj =
byte_for(last_block + last_block_size - 1); byte_for(last_block + last_block_size - 1);
jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end()); CardValue* const first_card_of_next_chunk = byte_for(chunk_mr.end());
// This search potentially goes a long distance looking // This search potentially goes a long distance looking
// for the next card that will be scanned, terminating // for the next card that will be scanned, terminating
// at the end of the last_block, if no earlier dirty card // at the end of the last_block, if no earlier dirty card
// is found. // is found.
assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk, assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk,
"last card of next chunk may be wrong"); "last card of next chunk may be wrong");
for (jbyte* cur = first_card_of_next_chunk; for (CardValue* cur = first_card_of_next_chunk;
cur <= last_card_of_last_obj; cur++) { cur <= last_card_of_last_obj; cur++) {
const jbyte val = *cur; const CardValue val = *cur;
if (card_will_be_scanned(val)) { if (card_will_be_scanned(val)) {
limit_card = cur; break; limit_card = cur; break;
} else { } else {
@ -359,7 +356,7 @@ process_chunk_boundaries(Space* sp,
for (uintptr_t lnc_index = cur_chunk_index + 1; for (uintptr_t lnc_index = cur_chunk_index + 1;
lnc_index <= last_chunk_index_to_check; lnc_index <= last_chunk_index_to_check;
lnc_index++) { lnc_index++) {
jbyte* lnc_card = lowest_non_clean[lnc_index]; CardValue* lnc_card = lowest_non_clean[lnc_index];
if (lnc_card != NULL) { if (lnc_card != NULL) {
// we can stop at the first non-NULL entry we find // we can stop at the first non-NULL entry we find
if (lnc_card <= limit_card) { if (lnc_card <= limit_card) {
@ -391,7 +388,7 @@ process_chunk_boundaries(Space* sp,
void void
CMSCardTable:: CMSCardTable::
get_LNC_array_for_space(Space* sp, get_LNC_array_for_space(Space* sp,
jbyte**& lowest_non_clean, CardValue**& lowest_non_clean,
uintptr_t& lowest_non_clean_base_chunk_index, uintptr_t& lowest_non_clean_base_chunk_index,
size_t& lowest_non_clean_chunk_size) { size_t& lowest_non_clean_chunk_size) {

View file

@ -48,7 +48,7 @@ private:
// Ensures that these arrays are of sufficient size, allocating if necessary. // Ensures that these arrays are of sufficient size, allocating if necessary.
// May be called by several threads concurrently. // May be called by several threads concurrently.
void get_LNC_array_for_space(Space* sp, void get_LNC_array_for_space(Space* sp,
jbyte**& lowest_non_clean, CardValue**& lowest_non_clean,
uintptr_t& lowest_non_clean_base_chunk_index, uintptr_t& lowest_non_clean_base_chunk_index,
size_t& lowest_non_clean_chunk_size); size_t& lowest_non_clean_chunk_size);
@ -59,7 +59,7 @@ private:
jint stride, int n_strides, jint stride, int n_strides,
OopsInGenClosure* cl, OopsInGenClosure* cl,
CardTableRS* ct, CardTableRS* ct,
jbyte** lowest_non_clean, CardValue** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index, uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size); size_t lowest_non_clean_chunk_size);
@ -70,7 +70,7 @@ private:
DirtyCardToOopClosure* dcto_cl, DirtyCardToOopClosure* dcto_cl,
MemRegion chunk_mr, MemRegion chunk_mr,
MemRegion used, MemRegion used,
jbyte** lowest_non_clean, CardValue** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index, uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size); size_t lowest_non_clean_chunk_size);

View file

@ -91,7 +91,7 @@ void G1BarrierSet::write_ref_array_pre(narrowOop* dst, size_t count, bool dest_u
} }
} }
void G1BarrierSet::write_ref_field_post_slow(volatile jbyte* byte) { void G1BarrierSet::write_ref_field_post_slow(volatile CardValue* byte) {
// In the slow path, we know a card is not young // In the slow path, we know a card is not young
assert(*byte != G1CardTable::g1_young_card_val(), "slow path invoked without filtering"); assert(*byte != G1CardTable::g1_young_card_val(), "slow path invoked without filtering");
OrderAccess::storeload(); OrderAccess::storeload();
@ -106,8 +106,8 @@ void G1BarrierSet::invalidate(MemRegion mr) {
if (mr.is_empty()) { if (mr.is_empty()) {
return; return;
} }
volatile jbyte* byte = _card_table->byte_for(mr.start()); volatile CardValue* byte = _card_table->byte_for(mr.start());
jbyte* last_byte = _card_table->byte_for(mr.last()); CardValue* last_byte = _card_table->byte_for(mr.last());
// skip initial young cards // skip initial young cards
for (; byte <= last_byte && *byte == G1CardTable::g1_young_card_val(); byte++); for (; byte <= last_byte && *byte == G1CardTable::g1_young_card_val(); byte++);
@ -117,7 +117,7 @@ void G1BarrierSet::invalidate(MemRegion mr) {
Thread* thr = Thread::current(); Thread* thr = Thread::current();
G1DirtyCardQueue& queue = G1ThreadLocalData::dirty_card_queue(thr); G1DirtyCardQueue& queue = G1ThreadLocalData::dirty_card_queue(thr);
for (; byte <= last_byte; byte++) { for (; byte <= last_byte; byte++) {
jbyte bv = *byte; CardValue bv = *byte;
if ((bv != G1CardTable::g1_young_card_val()) && if ((bv != G1CardTable::g1_young_card_val()) &&
(bv != G1CardTable::dirty_card_val())) { (bv != G1CardTable::dirty_card_val())) {
*byte = G1CardTable::dirty_card_val(); *byte = G1CardTable::dirty_card_val();

View file

@ -27,9 +27,9 @@
#include "gc/g1/g1DirtyCardQueue.hpp" #include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1SATBMarkQueueSet.hpp" #include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/cardTableBarrierSet.hpp"
class CardTable;
class G1CardTable; class G1CardTable;
// This barrier is specialized to use a logging barrier to support // This barrier is specialized to use a logging barrier to support
@ -73,7 +73,7 @@ class G1BarrierSet: public CardTableBarrierSet {
template <DecoratorSet decorators, typename T> template <DecoratorSet decorators, typename T>
void write_ref_field_post(T* field, oop new_val); void write_ref_field_post(T* field, oop new_val);
void write_ref_field_post_slow(volatile jbyte* byte); void write_ref_field_post_slow(volatile CardValue* byte);
virtual void on_thread_create(Thread* thread); virtual void on_thread_create(Thread* thread);
virtual void on_thread_destroy(Thread* thread); virtual void on_thread_destroy(Thread* thread);

View file

@ -47,7 +47,7 @@ inline void G1BarrierSet::write_ref_field_pre(T* field) {
template <DecoratorSet decorators, typename T> template <DecoratorSet decorators, typename T>
inline void G1BarrierSet::write_ref_field_post(T* field, oop new_val) { inline void G1BarrierSet::write_ref_field_post(T* field, oop new_val) {
volatile jbyte* byte = _card_table->byte_for(field); volatile CardValue* byte = _card_table->byte_for(field);
if (*byte != G1CardTable::g1_young_card_val()) { if (*byte != G1CardTable::g1_young_card_val()) {
// Take a slow path for cards in old // Take a slow path for cards in old
write_ref_field_post_slow(byte); write_ref_field_post_slow(byte);

View file

@ -81,7 +81,7 @@ void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
} }
} }
uint G1CardCounts::add_card_count(jbyte* card_ptr) { uint G1CardCounts::add_card_count(CardValue* card_ptr) {
// Returns the number of times the card has been refined. // Returns the number of times the card has been refined.
// If we failed to reserve/commit the counts table, return 0. // If we failed to reserve/commit the counts table, return 0.
// If card_ptr is beyond the committed end of the counts table, // If card_ptr is beyond the committed end of the counts table,
@ -116,11 +116,11 @@ void G1CardCounts::clear_region(HeapRegion* hr) {
void G1CardCounts::clear_range(MemRegion mr) { void G1CardCounts::clear_range(MemRegion mr) {
if (has_count_table()) { if (has_count_table()) {
const jbyte* from_card_ptr = _ct->byte_for_const(mr.start()); const CardValue* from_card_ptr = _ct->byte_for_const(mr.start());
// We use the last address in the range as the range could represent the // We use the last address in the range as the range could represent the
// last region in the heap. In which case trying to find the card will be an // last region in the heap. In which case trying to find the card will be an
// OOB access to the card table. // OOB access to the card table.
const jbyte* last_card_ptr = _ct->byte_for_const(mr.last()); const CardValue* last_card_ptr = _ct->byte_for_const(mr.last());
#ifdef ASSERT #ifdef ASSERT
HeapWord* start_addr = _ct->addr_for(from_card_ptr); HeapWord* start_addr = _ct->addr_for(from_card_ptr);

View file

@ -54,19 +54,23 @@ class G1CardCountsMappingChangedListener : public G1MappingChangedListener {
// is 'drained' during the next evacuation pause. // is 'drained' during the next evacuation pause.
class G1CardCounts: public CHeapObj<mtGC> { class G1CardCounts: public CHeapObj<mtGC> {
public:
typedef CardTable::CardValue CardValue;
private:
G1CardCountsMappingChangedListener _listener; G1CardCountsMappingChangedListener _listener;
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
G1CardTable* _ct; G1CardTable* _ct;
// The table of counts // The table of counts
jubyte* _card_counts; uint8_t* _card_counts;
// Max capacity of the reserved space for the counts table // Max capacity of the reserved space for the counts table
size_t _reserved_max_card_num; size_t _reserved_max_card_num;
// CardTable bottom. // CardTable bottom.
const jbyte* _ct_bot; const CardValue* _ct_bot;
// Returns true if the card counts table has been reserved. // Returns true if the card counts table has been reserved.
bool has_reserved_count_table() { return _card_counts != NULL; } bool has_reserved_count_table() { return _card_counts != NULL; }
@ -76,22 +80,22 @@ class G1CardCounts: public CHeapObj<mtGC> {
return has_reserved_count_table(); return has_reserved_count_table();
} }
size_t ptr_2_card_num(const jbyte* card_ptr) { size_t ptr_2_card_num(const CardValue* card_ptr) {
assert(card_ptr >= _ct_bot, assert(card_ptr >= _ct_bot,
"Invalid card pointer: " "Invalid card pointer: "
"card_ptr: " PTR_FORMAT ", " "card_ptr: " PTR_FORMAT ", "
"_ct_bot: " PTR_FORMAT, "_ct_bot: " PTR_FORMAT,
p2i(card_ptr), p2i(_ct_bot)); p2i(card_ptr), p2i(_ct_bot));
size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte)); size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(CardValue));
assert(card_num < _reserved_max_card_num, assert(card_num < _reserved_max_card_num,
"card pointer out of range: " PTR_FORMAT, p2i(card_ptr)); "card pointer out of range: " PTR_FORMAT, p2i(card_ptr));
return card_num; return card_num;
} }
jbyte* card_num_2_ptr(size_t card_num) { CardValue* card_num_2_ptr(size_t card_num) {
assert(card_num < _reserved_max_card_num, assert(card_num < _reserved_max_card_num,
"card num out of range: " SIZE_FORMAT, card_num); "card num out of range: " SIZE_FORMAT, card_num);
return (jbyte*) (_ct_bot + card_num); return (CardValue*) (_ct_bot + card_num);
} }
// Clear the counts table for the given (exclusive) index range. // Clear the counts table for the given (exclusive) index range.
@ -112,7 +116,7 @@ class G1CardCounts: public CHeapObj<mtGC> {
// Increments the refinement count for the given card. // Increments the refinement count for the given card.
// Returns the pre-increment count value. // Returns the pre-increment count value.
uint add_card_count(jbyte* card_ptr); uint add_card_count(CardValue* card_ptr);
// Returns true if the given count is high enough to be considered // Returns true if the given count is high enough to be considered
// 'hot'; false otherwise. // 'hot'; false otherwise.

View file

@ -31,19 +31,19 @@
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
bool G1CardTable::mark_card_deferred(size_t card_index) { bool G1CardTable::mark_card_deferred(size_t card_index) {
jbyte val = _byte_map[card_index]; CardValue val = _byte_map[card_index];
// It's already processed // It's already processed
if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
return false; return false;
} }
// Cached bit can be installed either on a clean card or on a claimed card. // Cached bit can be installed either on a clean card or on a claimed card.
jbyte new_val = val; CardValue new_val = val;
if (val == clean_card_val()) { if (val == clean_card_val()) {
new_val = (jbyte)deferred_card_val(); new_val = deferred_card_val();
} else { } else {
if (val & claimed_card_val()) { if (val & claimed_card_val()) {
new_val = val | (jbyte)deferred_card_val(); new_val = val | deferred_card_val();
} }
} }
if (new_val != val) { if (new_val != val) {
@ -53,8 +53,8 @@ bool G1CardTable::mark_card_deferred(size_t card_index) {
} }
void G1CardTable::g1_mark_as_young(const MemRegion& mr) { void G1CardTable::g1_mark_as_young(const MemRegion& mr) {
jbyte *const first = byte_for(mr.start()); CardValue *const first = byte_for(mr.start());
jbyte *const last = byte_after(mr.last()); CardValue *const last = byte_after(mr.last());
memset_with_concurrent_readers(first, g1_young_gen, last - first); memset_with_concurrent_readers(first, g1_young_gen, last - first);
} }
@ -85,7 +85,7 @@ void G1CardTable::initialize(G1RegionToSpaceMapper* mapper) {
_cur_covered_regions = 1; _cur_covered_regions = 1;
_covered[0] = _whole_heap; _covered[0] = _whole_heap;
_byte_map = (jbyte*) mapper->reserved().start(); _byte_map = (CardValue*) mapper->reserved().start();
_byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
@ -97,6 +97,6 @@ void G1CardTable::initialize(G1RegionToSpaceMapper* mapper) {
} }
bool G1CardTable::is_in_young(oop obj) const { bool G1CardTable::is_in_young(oop obj) const {
volatile jbyte* p = byte_for(obj); volatile CardValue* p = byte_for(obj);
return *p == G1CardTable::g1_young_card_val(); return *p == G1CardTable::g1_young_card_val();
} }

View file

@ -62,7 +62,7 @@ public:
return _byte_map[card_index] == dirty_card_val(); return _byte_map[card_index] == dirty_card_val();
} }
static jbyte g1_young_card_val() { return g1_young_gen; } static CardValue g1_young_card_val() { return g1_young_gen; }
/* /*
Claimed and deferred bits are used together in G1 during the evacuation Claimed and deferred bits are used together in G1 during the evacuation
@ -78,7 +78,7 @@ public:
*/ */
bool is_card_claimed(size_t card_index) { bool is_card_claimed(size_t card_index) {
jbyte val = _byte_map[card_index]; CardValue val = _byte_map[card_index];
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val(); return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
} }
@ -90,7 +90,7 @@ public:
bool mark_card_deferred(size_t card_index); bool mark_card_deferred(size_t card_index);
bool is_card_deferred(size_t card_index) { bool is_card_deferred(size_t card_index) {
jbyte val = _byte_map[card_index]; CardValue val = _byte_map[card_index];
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val(); return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
} }

View file

@ -114,7 +114,7 @@ class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
G1CardTable* _g1_ct; G1CardTable* _g1_ct;
HeapRegion* region_for_card(jbyte* card_ptr) const { HeapRegion* region_for_card(CardValue* card_ptr) const {
return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr)); return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
} }
@ -128,7 +128,7 @@ class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : G1CardTableEntryClosure(), RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : G1CardTableEntryClosure(),
_num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { } _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
bool do_card_ptr(jbyte* card_ptr, uint worker_i) { bool do_card_ptr(CardValue* card_ptr, uint worker_i) {
HeapRegion* hr = region_for_card(card_ptr); HeapRegion* hr = region_for_card(card_ptr);
// Should only dirty cards in regions that won't be freed. // Should only dirty cards in regions that won't be freed.
@ -2726,7 +2726,7 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
HeapRegionRemSetIterator hrrs(r->rem_set()); HeapRegionRemSetIterator hrrs(r->rem_set());
size_t card_index; size_t card_index;
while (hrrs.has_next(card_index)) { while (hrrs.has_next(card_index)) {
jbyte* card_ptr = (jbyte*)ct->byte_for_index(card_index); CardTable::CardValue* card_ptr = ct->byte_for_index(card_index);
// The remembered set might contain references to already freed // The remembered set might contain references to already freed
// regions. Filter out such entries to avoid failing card table // regions. Filter out such entries to avoid failing card table
// verification. // verification.

View file

@ -44,7 +44,7 @@
// SuspendibleThreadSet after every card. // SuspendibleThreadSet after every card.
class G1RefineCardConcurrentlyClosure: public G1CardTableEntryClosure { class G1RefineCardConcurrentlyClosure: public G1CardTableEntryClosure {
public: public:
bool do_card_ptr(jbyte* card_ptr, uint worker_i) { bool do_card_ptr(CardValue* card_ptr, uint worker_i) {
G1CollectedHeap::heap()->rem_set()->refine_card_concurrently(card_ptr, worker_i); G1CollectedHeap::heap()->rem_set()->refine_card_concurrently(card_ptr, worker_i);
if (SuspendibleThreadSet::should_yield()) { if (SuspendibleThreadSet::should_yield()) {
@ -113,7 +113,7 @@ bool G1DirtyCardQueueSet::apply_closure_to_buffer(G1CardTableEntryClosure* cl,
size_t i = node->index(); size_t i = node->index();
size_t limit = buffer_size(); size_t limit = buffer_size();
for ( ; i < limit; ++i) { for ( ; i < limit; ++i) {
jbyte* card_ptr = static_cast<jbyte*>(buf[i]); CardTable::CardValue* card_ptr = static_cast<CardTable::CardValue*>(buf[i]);
assert(card_ptr != NULL, "invariant"); assert(card_ptr != NULL, "invariant");
if (!cl->do_card_ptr(card_ptr, worker_i)) { if (!cl->do_card_ptr(card_ptr, worker_i)) {
result = false; // Incomplete processing. result = false; // Incomplete processing.

View file

@ -25,6 +25,7 @@
#ifndef SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP #ifndef SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
#define SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP #define SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
#include "gc/shared/cardTable.hpp"
#include "gc/shared/ptrQueue.hpp" #include "gc/shared/ptrQueue.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
@ -37,9 +38,11 @@ class Monitor;
// require these closure objects to be stack-allocated. // require these closure objects to be stack-allocated.
class G1CardTableEntryClosure: public CHeapObj<mtGC> { class G1CardTableEntryClosure: public CHeapObj<mtGC> {
public: public:
typedef CardTable::CardValue CardValue;
// Process the card whose card table entry is "card_ptr". If returns // Process the card whose card table entry is "card_ptr". If returns
// "false", terminate the iteration early. // "false", terminate the iteration early.
virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i) = 0; virtual bool do_card_ptr(CardValue* card_ptr, uint worker_i) = 0;
}; };
// A ptrQueue whose elements are "oops", pointers to object heads. // A ptrQueue whose elements are "oops", pointers to object heads.

View file

@ -64,7 +64,7 @@ public:
} }
size_t card_index = _ct->index_for(p); size_t card_index = _ct->index_for(p);
if (_ct->mark_card_deferred(card_index)) { if (_ct->mark_card_deferred(card_index)) {
_dcq->enqueue((jbyte*)_ct->byte_for_index(card_index)); _dcq->enqueue(_ct->byte_for_index(card_index));
} }
} }
}; };

View file

@ -39,7 +39,7 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
_use_cache = true; _use_cache = true;
_hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize; _hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize;
_hot_cache = ArrayAllocator<jbyte*>::allocate(_hot_cache_size, mtGC); _hot_cache = ArrayAllocator<CardValue*>::allocate(_hot_cache_size, mtGC);
reset_hot_cache_internal(); reset_hot_cache_internal();
@ -54,12 +54,12 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
G1HotCardCache::~G1HotCardCache() { G1HotCardCache::~G1HotCardCache() {
if (default_use_cache()) { if (default_use_cache()) {
assert(_hot_cache != NULL, "Logic"); assert(_hot_cache != NULL, "Logic");
ArrayAllocator<jbyte*>::free(_hot_cache, _hot_cache_size); ArrayAllocator<CardValue*>::free(_hot_cache, _hot_cache_size);
_hot_cache = NULL; _hot_cache = NULL;
} }
} }
jbyte* G1HotCardCache::insert(jbyte* card_ptr) { CardTable::CardValue* G1HotCardCache::insert(CardValue* card_ptr) {
uint count = _card_counts.add_card_count(card_ptr); uint count = _card_counts.add_card_count(card_ptr);
if (!_card_counts.is_hot(count)) { if (!_card_counts.is_hot(count)) {
// The card is not hot so do not store it in the cache; // The card is not hot so do not store it in the cache;
@ -69,7 +69,7 @@ jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
// Otherwise, the card is hot. // Otherwise, the card is hot.
size_t index = Atomic::add(1u, &_hot_cache_idx) - 1; size_t index = Atomic::add(1u, &_hot_cache_idx) - 1;
size_t masked_index = index & (_hot_cache_size - 1); size_t masked_index = index & (_hot_cache_size - 1);
jbyte* current_ptr = _hot_cache[masked_index]; CardValue* current_ptr = _hot_cache[masked_index];
// Try to store the new card pointer into the cache. Compare-and-swap to guard // Try to store the new card pointer into the cache. Compare-and-swap to guard
// against the unlikely event of a race resulting in another card pointer to // against the unlikely event of a race resulting in another card pointer to
@ -77,7 +77,7 @@ jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
// card_ptr in favor of the other option, which would be starting over. This // card_ptr in favor of the other option, which would be starting over. This
// should be OK since card_ptr will likely be the older card already when/if // should be OK since card_ptr will likely be the older card already when/if
// this ever happens. // this ever happens.
jbyte* previous_ptr = Atomic::cmpxchg(card_ptr, CardValue* previous_ptr = Atomic::cmpxchg(card_ptr,
&_hot_cache[masked_index], &_hot_cache[masked_index],
current_ptr); current_ptr);
return (previous_ptr == current_ptr) ? previous_ptr : card_ptr; return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
@ -96,7 +96,7 @@ void G1HotCardCache::drain(G1CardTableEntryClosure* cl, uint worker_i) {
// The current worker has successfully claimed the chunk [start_idx..end_idx) // The current worker has successfully claimed the chunk [start_idx..end_idx)
end_idx = MIN2(end_idx, _hot_cache_size); end_idx = MIN2(end_idx, _hot_cache_size);
for (size_t i = start_idx; i < end_idx; i++) { for (size_t i = start_idx; i < end_idx; i++) {
jbyte* card_ptr = _hot_cache[i]; CardValue* card_ptr = _hot_cache[i];
if (card_ptr != NULL) { if (card_ptr != NULL) {
bool result = cl->do_card_ptr(card_ptr, worker_i); bool result = cl->do_card_ptr(card_ptr, worker_i);
assert(result, "Closure should always return true"); assert(result, "Closure should always return true");

View file

@ -53,7 +53,10 @@ class HeapRegion;
// code, increasing throughput. // code, increasing throughput.
class G1HotCardCache: public CHeapObj<mtGC> { class G1HotCardCache: public CHeapObj<mtGC> {
public:
typedef CardTable::CardValue CardValue;
private:
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
bool _use_cache; bool _use_cache;
@ -62,7 +65,7 @@ class G1HotCardCache: public CHeapObj<mtGC> {
// The card cache table // The card cache table
jbyte** _hot_cache; CardValue** _hot_cache;
size_t _hot_cache_size; size_t _hot_cache_size;
@ -107,7 +110,7 @@ class G1HotCardCache: public CHeapObj<mtGC> {
// adding, NULL is returned and no further action in needed. // adding, NULL is returned and no further action in needed.
// If we evict a card from the cache to make room for the new card, // If we evict a card from the cache to make room for the new card,
// the evicted card is then returned for refinement. // the evicted card is then returned for refinement.
jbyte* insert(jbyte* card_ptr); CardValue* insert(CardValue* card_ptr);
// Refine the cards that have delayed as a result of // Refine the cards that have delayed as a result of
// being in the cache. // being in the cache.

View file

@ -120,7 +120,7 @@ public:
size_t card_index = ct()->index_for(p); size_t card_index = ct()->index_for(p);
// If the card hasn't been added to the buffer, do it. // If the card hasn't been added to the buffer, do it.
if (ct()->mark_card_deferred(card_index)) { if (ct()->mark_card_deferred(card_index)) {
dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index)); dirty_card_queue().enqueue(ct()->byte_for_index(card_index));
} }
} }

View file

@ -470,7 +470,7 @@ public:
_g1rs(g1h->rem_set()), _update_rs_cl(update_rs_cl), _cards_scanned(0), _cards_skipped(0) _g1rs(g1h->rem_set()), _update_rs_cl(update_rs_cl), _cards_scanned(0), _cards_skipped(0)
{} {}
bool do_card_ptr(jbyte* card_ptr, uint worker_i) { bool do_card_ptr(CardValue* card_ptr, uint worker_i) {
// The only time we care about recording cards that // The only time we care about recording cards that
// contain references that point into the collection set // contain references that point into the collection set
// is during RSet updating within an evacuation pause. // is during RSet updating within an evacuation pause.
@ -538,7 +538,7 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() {
phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0); phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
} }
inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) { inline void check_card_ptr(CardTable::CardValue* card_ptr, G1CardTable* ct) {
#ifdef ASSERT #ifdef ASSERT
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(g1h->is_in_exact(ct->addr_for(card_ptr)), assert(g1h->is_in_exact(ct->addr_for(card_ptr)),
@ -550,7 +550,7 @@ inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
#endif #endif
} }
void G1RemSet::refine_card_concurrently(jbyte* card_ptr, void G1RemSet::refine_card_concurrently(CardValue* card_ptr,
uint worker_i) { uint worker_i) {
assert(!_g1h->is_gc_active(), "Only call concurrently"); assert(!_g1h->is_gc_active(), "Only call concurrently");
@ -606,7 +606,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
if (_hot_card_cache->use_cache()) { if (_hot_card_cache->use_cache()) {
assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); assert(!SafepointSynchronize::is_at_safepoint(), "sanity");
const jbyte* orig_card_ptr = card_ptr; const CardValue* orig_card_ptr = card_ptr;
card_ptr = _hot_card_cache->insert(card_ptr); card_ptr = _hot_card_cache->insert(card_ptr);
if (card_ptr == NULL) { if (card_ptr == NULL) {
// There was no eviction. Nothing to do. // There was no eviction. Nothing to do.
@ -647,7 +647,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
// Okay to clean and process the card now. There are still some // Okay to clean and process the card now. There are still some
// stale card cases that may be detected by iteration and dealt with // stale card cases that may be detected by iteration and dealt with
// as iteration failure. // as iteration failure.
*const_cast<volatile jbyte*>(card_ptr) = G1CardTable::clean_card_val(); *const_cast<volatile CardValue*>(card_ptr) = G1CardTable::clean_card_val();
// This fence serves two purposes. First, the card must be cleaned // This fence serves two purposes. First, the card must be cleaned
// before processing the contents. Second, we can't proceed with // before processing the contents. Second, we can't proceed with
@ -689,7 +689,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
} }
} }
bool G1RemSet::refine_card_during_gc(jbyte* card_ptr, bool G1RemSet::refine_card_during_gc(CardValue* card_ptr,
G1ScanObjsDuringUpdateRSClosure* update_rs_cl) { G1ScanObjsDuringUpdateRSClosure* update_rs_cl) {
assert(_g1h->is_gc_active(), "Only call during GC"); assert(_g1h->is_gc_active(), "Only call during GC");

View file

@ -76,6 +76,8 @@ private:
G1HotCardCache* _hot_card_cache; G1HotCardCache* _hot_card_cache;
public: public:
typedef CardTable::CardValue CardValue;
// Gives an approximation on how many threads can be expected to add records to // Gives an approximation on how many threads can be expected to add records to
// a remembered set in parallel. This can be used for sizing data structures to // a remembered set in parallel. This can be used for sizing data structures to
// decrease performance losses due to data structure sharing. // decrease performance losses due to data structure sharing.
@ -108,13 +110,13 @@ public:
// Refine the card corresponding to "card_ptr". Safe to be called concurrently // Refine the card corresponding to "card_ptr". Safe to be called concurrently
// to the mutator. // to the mutator.
void refine_card_concurrently(jbyte* card_ptr, void refine_card_concurrently(CardValue* card_ptr,
uint worker_i); uint worker_i);
// Refine the card corresponding to "card_ptr", applying the given closure to // Refine the card corresponding to "card_ptr", applying the given closure to
// all references found. Must only be called during gc. // all references found. Must only be called during gc.
// Returns whether the card has been scanned. // Returns whether the card has been scanned.
bool refine_card_during_gc(jbyte* card_ptr, G1ScanObjsDuringUpdateRSClosure* update_rs_cl); bool refine_card_during_gc(CardValue* card_ptr, G1ScanObjsDuringUpdateRSClosure* update_rs_cl);
// Print accumulated summary info from the start of the VM. // Print accumulated summary info from the start of the VM.
void print_summary_info(); void print_summary_info();

View file

@ -140,19 +140,19 @@ void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array,
// It is a waste to get here if empty. // It is a waste to get here if empty.
assert(sp->bottom() < sp->top(), "Should not be called if empty"); assert(sp->bottom() < sp->top(), "Should not be called if empty");
oop* sp_top = (oop*)space_top; oop* sp_top = (oop*)space_top;
jbyte* start_card = byte_for(sp->bottom()); CardValue* start_card = byte_for(sp->bottom());
jbyte* end_card = byte_for(sp_top - 1) + 1; CardValue* end_card = byte_for(sp_top - 1) + 1;
oop* last_scanned = NULL; // Prevent scanning objects more than once oop* last_scanned = NULL; // Prevent scanning objects more than once
// The width of the stripe ssize*stripe_total must be // The width of the stripe ssize*stripe_total must be
// consistent with the number of stripes so that the complete slice // consistent with the number of stripes so that the complete slice
// is covered. // is covered.
size_t slice_width = ssize * stripe_total; size_t slice_width = ssize * stripe_total;
for (jbyte* slice = start_card; slice < end_card; slice += slice_width) { for (CardValue* slice = start_card; slice < end_card; slice += slice_width) {
jbyte* worker_start_card = slice + stripe_number * ssize; CardValue* worker_start_card = slice + stripe_number * ssize;
if (worker_start_card >= end_card) if (worker_start_card >= end_card)
return; // We're done. return; // We're done.
jbyte* worker_end_card = worker_start_card + ssize; CardValue* worker_end_card = worker_start_card + ssize;
if (worker_end_card > end_card) if (worker_end_card > end_card)
worker_end_card = end_card; worker_end_card = end_card;
@ -209,13 +209,13 @@ void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array,
assert(worker_start_card <= end_card, "worker start card beyond end card"); assert(worker_start_card <= end_card, "worker start card beyond end card");
assert(worker_end_card <= end_card, "worker end card beyond end card"); assert(worker_end_card <= end_card, "worker end card beyond end card");
jbyte* current_card = worker_start_card; CardValue* current_card = worker_start_card;
while (current_card < worker_end_card) { while (current_card < worker_end_card) {
// Find an unclean card. // Find an unclean card.
while (current_card < worker_end_card && card_is_clean(*current_card)) { while (current_card < worker_end_card && card_is_clean(*current_card)) {
current_card++; current_card++;
} }
jbyte* first_unclean_card = current_card; CardValue* first_unclean_card = current_card;
// Find the end of a run of contiguous unclean cards // Find the end of a run of contiguous unclean cards
while (current_card < worker_end_card && !card_is_clean(*current_card)) { while (current_card < worker_end_card && !card_is_clean(*current_card)) {
@ -232,7 +232,7 @@ void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array,
HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1); HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
size_t size_of_last_object = oop(last_object_in_dirty_region)->size(); size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object; HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
jbyte* ending_card_of_last_object = byte_for(end_of_last_object); CardValue* ending_card_of_last_object = byte_for(end_of_last_object);
assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card"); assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
if (ending_card_of_last_object > current_card) { if (ending_card_of_last_object > current_card) {
// This means the object spans the next complete card. // This means the object spans the next complete card.
@ -241,7 +241,7 @@ void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array,
} }
} }
} }
jbyte* following_clean_card = current_card; CardValue* following_clean_card = current_card;
if (first_unclean_card < worker_end_card) { if (first_unclean_card < worker_end_card) {
oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card)); oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
@ -342,8 +342,8 @@ void PSCardTable::verify_all_young_refs_precise() {
} }
void PSCardTable::verify_all_young_refs_precise_helper(MemRegion mr) { void PSCardTable::verify_all_young_refs_precise_helper(MemRegion mr) {
jbyte* bot = byte_for(mr.start()); CardValue* bot = byte_for(mr.start());
jbyte* top = byte_for(mr.end()); CardValue* top = byte_for(mr.end());
while (bot <= top) { while (bot <= top) {
assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark"); assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
if (*bot == verify_card) if (*bot == verify_card)
@ -353,8 +353,8 @@ void PSCardTable::verify_all_young_refs_precise_helper(MemRegion mr) {
} }
bool PSCardTable::addr_is_marked_imprecise(void *addr) { bool PSCardTable::addr_is_marked_imprecise(void *addr) {
jbyte* p = byte_for(addr); CardValue* p = byte_for(addr);
jbyte val = *p; CardValue val = *p;
if (card_is_dirty(val)) if (card_is_dirty(val))
return true; return true;
@ -372,8 +372,8 @@ bool PSCardTable::addr_is_marked_imprecise(void *addr) {
// Also includes verify_card // Also includes verify_card
bool PSCardTable::addr_is_marked_precise(void *addr) { bool PSCardTable::addr_is_marked_precise(void *addr) {
jbyte* p = byte_for(addr); CardValue* p = byte_for(addr);
jbyte val = *p; CardValue val = *p;
if (card_is_newgen(val)) if (card_is_newgen(val))
return true; return true;
@ -473,7 +473,7 @@ void PSCardTable::resize_covered_region_by_end(int changed_region,
log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT, log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last()))); p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT, log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last()))); p2i(addr_for((CardValue*) _committed[ind].start())), p2i(addr_for((CardValue*) _committed[ind].last())));
debug_only(verify_guard();) debug_only(verify_guard();)
} }
@ -503,7 +503,7 @@ bool PSCardTable::resize_commit_uncommit(int changed_region,
"Starts should have proper alignment"); "Starts should have proper alignment");
#endif #endif
jbyte* new_start = byte_for(new_region.start()); CardValue* new_start = byte_for(new_region.start());
// Round down because this is for the start address // Round down because this is for the start address
HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size()); HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size());
// The guard page is always committed and should not be committed over. // The guard page is always committed and should not be committed over.
@ -575,7 +575,7 @@ bool PSCardTable::resize_commit_uncommit(int changed_region,
void PSCardTable::resize_update_committed_table(int changed_region, void PSCardTable::resize_update_committed_table(int changed_region,
MemRegion new_region) { MemRegion new_region) {
jbyte* new_start = byte_for(new_region.start()); CardValue* new_start = byte_for(new_region.start());
// Set the new start of the committed region // Set the new start of the committed region
HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size()); HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size());
MemRegion new_committed = MemRegion(new_start_aligned, MemRegion new_committed = MemRegion(new_start_aligned,
@ -590,13 +590,13 @@ void PSCardTable::resize_update_card_table_entries(int changed_region,
MemRegion original_covered = _covered[changed_region]; MemRegion original_covered = _covered[changed_region];
// Initialize the card entries. Only consider the // Initialize the card entries. Only consider the
// region covered by the card table (_whole_heap) // region covered by the card table (_whole_heap)
jbyte* entry; CardValue* entry;
if (new_region.start() < _whole_heap.start()) { if (new_region.start() < _whole_heap.start()) {
entry = byte_for(_whole_heap.start()); entry = byte_for(_whole_heap.start());
} else { } else {
entry = byte_for(new_region.start()); entry = byte_for(new_region.start());
} }
jbyte* end = byte_for(original_covered.start()); CardValue* end = byte_for(original_covered.start());
// If _whole_heap starts at the original covered regions start, // If _whole_heap starts at the original covered regions start,
// this loop will not execute. // this loop will not execute.
while (entry < end) { *entry++ = clean_card; } while (entry < end) { *entry++ = clean_card; }

View file

@ -54,8 +54,8 @@ class PSCardTable: public CardTable {
public: public:
PSCardTable(MemRegion whole_heap) : CardTable(whole_heap, /* scanned_concurrently */ false) {} PSCardTable(MemRegion whole_heap) : CardTable(whole_heap, /* scanned_concurrently */ false) {}
static jbyte youngergen_card_val() { return youngergen_card; } static CardValue youngergen_card_val() { return youngergen_card; }
static jbyte verify_card_val() { return verify_card; } static CardValue verify_card_val() { return verify_card; }
// Scavenge support // Scavenge support
void scavenge_contents_parallel(ObjectStartArray* start_array, void scavenge_contents_parallel(ObjectStartArray* start_array,
@ -68,7 +68,7 @@ class PSCardTable: public CardTable {
bool addr_is_marked_imprecise(void *addr); bool addr_is_marked_imprecise(void *addr);
bool addr_is_marked_precise(void *addr); bool addr_is_marked_precise(void *addr);
void set_card_newgen(void* addr) { jbyte* p = byte_for(addr); *p = verify_card; } void set_card_newgen(void* addr) { CardValue* p = byte_for(addr); *p = verify_card; }
// Testers for entries // Testers for entries
static bool card_is_dirty(int value) { return value == dirty_card; } static bool card_is_dirty(int value) { return value == dirty_card; }
@ -78,7 +78,7 @@ class PSCardTable: public CardTable {
// Card marking // Card marking
void inline_write_ref_field_gc(void* field, oop new_val) { void inline_write_ref_field_gc(void* field, oop new_val) {
jbyte* byte = byte_for(field); CardValue* byte = byte_for(field);
*byte = youngergen_card; *byte = youngergen_card;
} }
@ -99,7 +99,7 @@ class PSCardTable: public CardTable {
HeapWord* lowest_prev_committed_start(int ind) const; HeapWord* lowest_prev_committed_start(int ind) const;
#ifdef ASSERT #ifdef ASSERT
bool is_valid_card_address(jbyte* addr) { bool is_valid_card_address(CardValue* addr) {
return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size); return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size);
} }
#endif // ASSERT #endif // ASSERT

View file

@ -45,7 +45,6 @@ void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, L
BarrierSet* bs = BarrierSet::barrier_set(); BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table(); CardTable* ct = ctbs->card_table();
assert(sizeof(*(ct->byte_map_base())) == sizeof(jbyte), "adjust this code");
LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base()); LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base());
if (addr->is_address()) { if (addr->is_address()) {
LIR_Address* address = addr->as_address_ptr(); LIR_Address* address = addr->as_address_ptr();

View file

@ -37,7 +37,7 @@
Node* CardTableBarrierSetC2::byte_map_base_node(GraphKit* kit) const { Node* CardTableBarrierSetC2::byte_map_base_node(GraphKit* kit) const {
// Get base of card map // Get base of card map
jbyte* card_table_base = ci_card_table_address(); CardTable::CardValue* card_table_base = ci_card_table_address();
if (card_table_base != NULL) { if (card_table_base != NULL) {
return kit->makecon(TypeRawPtr::make((address)card_table_base)); return kit->makecon(TypeRawPtr::make((address)card_table_base));
} else { } else {

View file

@ -109,12 +109,12 @@ void CardTable::initialize() {
// then add it to _byte_map_base, i.e. // then add it to _byte_map_base, i.e.
// //
// _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift) // _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
_byte_map = (jbyte*) heap_rs.base(); _byte_map = (CardValue*) heap_rs.base();
_byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
jbyte* guard_card = &_byte_map[_guard_index]; CardValue* guard_card = &_byte_map[_guard_index];
HeapWord* guard_page = align_down((HeapWord*)guard_card, _page_size); HeapWord* guard_page = align_down((HeapWord*)guard_card, _page_size);
_guard_region = MemRegion(guard_page, _page_size); _guard_region = MemRegion(guard_page, _page_size);
os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
@ -145,7 +145,7 @@ int CardTable::find_covering_region_by_base(HeapWord* base) {
_cur_covered_regions++; _cur_covered_regions++;
_covered[res].set_start(base); _covered[res].set_start(base);
_covered[res].set_word_size(0); _covered[res].set_word_size(0);
jbyte* ct_start = byte_for(base); CardValue* ct_start = byte_for(base);
HeapWord* ct_start_aligned = align_down((HeapWord*)ct_start, _page_size); HeapWord* ct_start_aligned = align_down((HeapWord*)ct_start, _page_size);
_committed[res].set_start(ct_start_aligned); _committed[res].set_start(ct_start_aligned);
_committed[res].set_word_size(0); _committed[res].set_word_size(0);
@ -302,7 +302,7 @@ void CardTable::resize_covered_region(MemRegion new_region) {
#endif #endif
// The default of 0 is not necessarily clean cards. // The default of 0 is not necessarily clean cards.
jbyte* entry; CardValue* entry;
if (old_region.last() < _whole_heap.start()) { if (old_region.last() < _whole_heap.start()) {
entry = byte_for(_whole_heap.start()); entry = byte_for(_whole_heap.start());
} else { } else {
@ -312,8 +312,8 @@ void CardTable::resize_covered_region(MemRegion new_region) {
"The guard card will be overwritten"); "The guard card will be overwritten");
// This line commented out cleans the newly expanded region and // This line commented out cleans the newly expanded region and
// not the aligned up expanded region. // not the aligned up expanded region.
// jbyte* const end = byte_after(new_region.last()); // CardValue* const end = byte_after(new_region.last());
jbyte* const end = (jbyte*) new_end_for_commit; CardValue* const end = (CardValue*) new_end_for_commit;
assert((end >= byte_after(new_region.last())) || collided || guarded, assert((end >= byte_after(new_region.last())) || collided || guarded,
"Expect to be beyond new region unless impacting another region"); "Expect to be beyond new region unless impacting another region");
// do nothing if we resized downward. // do nothing if we resized downward.
@ -330,7 +330,7 @@ void CardTable::resize_covered_region(MemRegion new_region) {
} }
#endif #endif
if (entry < end) { if (entry < end) {
memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); memset(entry, clean_card, pointer_delta(end, entry, sizeof(CardValue)));
} }
} }
// In any case, the covered size changes. // In any case, the covered size changes.
@ -344,7 +344,7 @@ void CardTable::resize_covered_region(MemRegion new_region) {
log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT, log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last()))); p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT, log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last()))); p2i(addr_for((CardValue*) _committed[ind].start())), p2i(addr_for((CardValue*) _committed[ind].last())));
// Touch the last card of the covered region to show that it // Touch the last card of the covered region to show that it
// is committed (or SEGV). // is committed (or SEGV).
@ -357,8 +357,8 @@ void CardTable::resize_covered_region(MemRegion new_region) {
void CardTable::dirty_MemRegion(MemRegion mr) { void CardTable::dirty_MemRegion(MemRegion mr) {
assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
jbyte* cur = byte_for(mr.start()); CardValue* cur = byte_for(mr.start());
jbyte* last = byte_after(mr.last()); CardValue* last = byte_after(mr.last());
while (cur < last) { while (cur < last) {
*cur = dirty_card; *cur = dirty_card;
cur++; cur++;
@ -368,15 +368,15 @@ void CardTable::dirty_MemRegion(MemRegion mr) {
void CardTable::clear_MemRegion(MemRegion mr) { void CardTable::clear_MemRegion(MemRegion mr) {
// Be conservative: only clean cards entirely contained within the // Be conservative: only clean cards entirely contained within the
// region. // region.
jbyte* cur; CardValue* cur;
if (mr.start() == _whole_heap.start()) { if (mr.start() == _whole_heap.start()) {
cur = byte_for(mr.start()); cur = byte_for(mr.start());
} else { } else {
assert(mr.start() > _whole_heap.start(), "mr is not covered."); assert(mr.start() > _whole_heap.start(), "mr is not covered.");
cur = byte_after(mr.start() - 1); cur = byte_after(mr.start() - 1);
} }
jbyte* last = byte_after(mr.last()); CardValue* last = byte_after(mr.last());
memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); memset(cur, clean_card, pointer_delta(last, cur, sizeof(CardValue)));
} }
void CardTable::clear(MemRegion mr) { void CardTable::clear(MemRegion mr) {
@ -387,8 +387,8 @@ void CardTable::clear(MemRegion mr) {
} }
void CardTable::dirty(MemRegion mr) { void CardTable::dirty(MemRegion mr) {
jbyte* first = byte_for(mr.start()); CardValue* first = byte_for(mr.start());
jbyte* last = byte_after(mr.last()); CardValue* last = byte_after(mr.last());
memset(first, dirty_card, last-first); memset(first, dirty_card, last-first);
} }
@ -398,7 +398,7 @@ void CardTable::dirty_card_iterate(MemRegion mr, MemRegionClosure* cl) {
for (int i = 0; i < _cur_covered_regions; i++) { for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]); MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) { if (!mri.is_empty()) {
jbyte *cur_entry, *next_entry, *limit; CardValue *cur_entry, *next_entry, *limit;
for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
cur_entry <= limit; cur_entry <= limit;
cur_entry = next_entry) { cur_entry = next_entry) {
@ -424,7 +424,7 @@ MemRegion CardTable::dirty_card_range_after_reset(MemRegion mr,
for (int i = 0; i < _cur_covered_regions; i++) { for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]); MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) { if (!mri.is_empty()) {
jbyte* cur_entry, *next_entry, *limit; CardValue* cur_entry, *next_entry, *limit;
for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
cur_entry <= limit; cur_entry <= limit;
cur_entry = next_entry) { cur_entry = next_entry) {
@ -474,13 +474,12 @@ void CardTable::verify() {
} }
#ifndef PRODUCT #ifndef PRODUCT
void CardTable::verify_region(MemRegion mr, void CardTable::verify_region(MemRegion mr, CardValue val, bool val_equals) {
jbyte val, bool val_equals) { CardValue* start = byte_for(mr.start());
jbyte* start = byte_for(mr.start()); CardValue* end = byte_for(mr.last());
jbyte* end = byte_for(mr.last());
bool failures = false; bool failures = false;
for (jbyte* curr = start; curr <= end; ++curr) { for (CardValue* curr = start; curr <= end; ++curr) {
jbyte curr_val = *curr; CardValue curr_val = *curr;
bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
if (failed) { if (failed) {
if (!failures) { if (!failures) {

View file

@ -32,6 +32,14 @@
class CardTable: public CHeapObj<mtGC> { class CardTable: public CHeapObj<mtGC> {
friend class VMStructs; friend class VMStructs;
public:
typedef uint8_t CardValue;
// All code generators assume that the size of a card table entry is one byte.
// They need to be updated to reflect any change to this.
// This code can typically be found by searching for the byte_map_base() method.
STATIC_ASSERT(sizeof(CardValue) == 1);
protected: protected:
// The declaration order of these const fields is important; see the // The declaration order of these const fields is important; see the
// constructor before changing. // constructor before changing.
@ -43,8 +51,8 @@ protected:
size_t _last_valid_index; // index of the last valid element size_t _last_valid_index; // index of the last valid element
const size_t _page_size; // page size used when mapping _byte_map const size_t _page_size; // page size used when mapping _byte_map
size_t _byte_map_size; // in bytes size_t _byte_map_size; // in bytes
jbyte* _byte_map; // the card marking array CardValue* _byte_map; // the card marking array
jbyte* _byte_map_base; CardValue* _byte_map_base;
int _cur_covered_regions; int _cur_covered_regions;
@ -94,7 +102,7 @@ protected:
static const int _max_covered_regions = 2; static const int _max_covered_regions = 2;
enum CardValues { enum CardValues {
clean_card = -1, clean_card = (CardValue)-1,
// The mask contains zeros in places for all other values. // The mask contains zeros in places for all other values.
clean_card_mask = clean_card - 31, clean_card_mask = clean_card - 31,
@ -145,17 +153,17 @@ public:
// Return true if "p" is at the start of a card. // Return true if "p" is at the start of a card.
bool is_card_aligned(HeapWord* p) { bool is_card_aligned(HeapWord* p) {
jbyte* pcard = byte_for(p); CardValue* pcard = byte_for(p);
return (addr_for(pcard) == p); return (addr_for(pcard) == p);
} }
// Mapping from address to card marking array entry // Mapping from address to card marking array entry
jbyte* byte_for(const void* p) const { CardValue* byte_for(const void* p) const {
assert(_whole_heap.contains(p), assert(_whole_heap.contains(p),
"Attempt to access p = " PTR_FORMAT " out of bounds of " "Attempt to access p = " PTR_FORMAT " out of bounds of "
" card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")", " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end())); p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
jbyte* result = &_byte_map_base[uintptr_t(p) >> card_shift]; CardValue* result = &_byte_map_base[uintptr_t(p) >> card_shift];
assert(result >= _byte_map && result < _byte_map + _byte_map_size, assert(result >= _byte_map && result < _byte_map + _byte_map_size,
"out of bounds accessor for card marking array"); "out of bounds accessor for card marking array");
return result; return result;
@ -164,7 +172,7 @@ public:
// The card table byte one after the card marking array // The card table byte one after the card marking array
// entry for argument address. Typically used for higher bounds // entry for argument address. Typically used for higher bounds
// for loops iterating through the card table. // for loops iterating through the card table.
jbyte* byte_after(const void* p) const { CardValue* byte_after(const void* p) const {
return byte_for(p) + 1; return byte_for(p) + 1;
} }
@ -173,20 +181,20 @@ public:
void dirty(MemRegion mr); void dirty(MemRegion mr);
// Provide read-only access to the card table array. // Provide read-only access to the card table array.
const jbyte* byte_for_const(const void* p) const { const CardValue* byte_for_const(const void* p) const {
return byte_for(p); return byte_for(p);
} }
const jbyte* byte_after_const(const void* p) const { const CardValue* byte_after_const(const void* p) const {
return byte_after(p); return byte_after(p);
} }
// Mapping from card marking array entry to address of first word // Mapping from card marking array entry to address of first word
HeapWord* addr_for(const jbyte* p) const { HeapWord* addr_for(const CardValue* p) const {
assert(p >= _byte_map && p < _byte_map + _byte_map_size, assert(p >= _byte_map && p < _byte_map + _byte_map_size,
"out of bounds access to card marking array. p: " PTR_FORMAT "out of bounds access to card marking array. p: " PTR_FORMAT
" _byte_map: " PTR_FORMAT " _byte_map + _byte_map_size: " PTR_FORMAT, " _byte_map: " PTR_FORMAT " _byte_map + _byte_map_size: " PTR_FORMAT,
p2i(p), p2i(_byte_map), p2i(_byte_map + _byte_map_size)); p2i(p), p2i(_byte_map), p2i(_byte_map + _byte_map_size));
size_t delta = pointer_delta(p, _byte_map_base, sizeof(jbyte)); size_t delta = pointer_delta(p, _byte_map_base, sizeof(CardValue));
HeapWord* result = (HeapWord*) (delta << card_shift); HeapWord* result = (HeapWord*) (delta << card_shift);
assert(_whole_heap.contains(result), assert(_whole_heap.contains(result),
"Returning result = " PTR_FORMAT " out of bounds of " "Returning result = " PTR_FORMAT " out of bounds of "
@ -204,7 +212,7 @@ public:
return byte_for(p) - _byte_map; return byte_for(p) - _byte_map;
} }
const jbyte* byte_for_index(const size_t card_index) const { CardValue* byte_for_index(const size_t card_index) const {
return _byte_map + card_index; return _byte_map + card_index;
} }
@ -233,19 +241,19 @@ public:
card_size_in_words = card_size / sizeof(HeapWord) card_size_in_words = card_size / sizeof(HeapWord)
}; };
static jbyte clean_card_val() { return clean_card; } static CardValue clean_card_val() { return clean_card; }
static jbyte clean_card_mask_val() { return clean_card_mask; } static CardValue clean_card_mask_val() { return clean_card_mask; }
static jbyte dirty_card_val() { return dirty_card; } static CardValue dirty_card_val() { return dirty_card; }
static jbyte claimed_card_val() { return claimed_card; } static CardValue claimed_card_val() { return claimed_card; }
static jbyte precleaned_card_val() { return precleaned_card; } static CardValue precleaned_card_val() { return precleaned_card; }
static jbyte deferred_card_val() { return deferred_card; } static CardValue deferred_card_val() { return deferred_card; }
static intptr_t clean_card_row_val() { return clean_card_row; } static intptr_t clean_card_row_val() { return clean_card_row; }
// Card marking array base (adjusted for heap low boundary) // Card marking array base (adjusted for heap low boundary)
// This would be the 0th element of _byte_map, if the heap started at 0x0. // This would be the 0th element of _byte_map, if the heap started at 0x0.
// But since the heap starts at some higher address, this points to somewhere // But since the heap starts at some higher address, this points to somewhere
// before the beginning of the actual _byte_map. // before the beginning of the actual _byte_map.
jbyte* byte_map_base() const { return _byte_map_base; } CardValue* byte_map_base() const { return _byte_map_base; }
bool scanned_concurrently() const { return _scanned_concurrently; } bool scanned_concurrently() const { return _scanned_concurrently; }
virtual bool is_in_young(oop obj) const = 0; virtual bool is_in_young(oop obj) const = 0;
@ -258,7 +266,7 @@ public:
// val_equals -> it will check that all cards covered by mr equal val // val_equals -> it will check that all cards covered by mr equal val
// !val_equals -> it will check that all cards covered by mr do not equal val // !val_equals -> it will check that all cards covered by mr do not equal val
void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN; void verify_region(MemRegion mr, CardValue val, bool val_equals) PRODUCT_RETURN;
void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN; void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
}; };

View file

@ -23,6 +23,7 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSetAssembler.hpp" #include "gc/shared/cardTableBarrierSetAssembler.hpp"
#include "gc/shared/cardTableBarrierSet.inline.hpp" #include "gc/shared/cardTableBarrierSet.inline.hpp"
#include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectedHeap.hpp"

View file

@ -25,11 +25,10 @@
#ifndef SHARE_GC_SHARED_CARDTABLEBARRIERSET_HPP #ifndef SHARE_GC_SHARED_CARDTABLEBARRIERSET_HPP
#define SHARE_GC_SHARED_CARDTABLEBARRIERSET_HPP #define SHARE_GC_SHARED_CARDTABLEBARRIERSET_HPP
#include "gc/shared/cardTable.hpp"
#include "gc/shared/modRefBarrierSet.hpp" #include "gc/shared/modRefBarrierSet.hpp"
#include "utilities/align.hpp" #include "utilities/align.hpp"
class CardTable;
// This kind of "BarrierSet" allows a "CollectedHeap" to detect and // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
// enumerate ref fields that have been modified (since the last // enumerate ref fields that have been modified (since the last
// enumeration.) // enumeration.)
@ -45,8 +44,11 @@ class CardTable;
class CardTableBarrierSet: public ModRefBarrierSet { class CardTableBarrierSet: public ModRefBarrierSet {
// Some classes get to look at some private stuff. // Some classes get to look at some private stuff.
friend class VMStructs; friend class VMStructs;
protected:
public:
typedef CardTable::CardValue CardValue;
protected:
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
// or INCLUDE_JVMCI is being used // or INCLUDE_JVMCI is being used
bool _defer_initial_card_mark; bool _defer_initial_card_mark;

View file

@ -31,7 +31,7 @@
template <DecoratorSet decorators, typename T> template <DecoratorSet decorators, typename T>
inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) { inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) {
volatile jbyte* byte = _card_table->byte_for(field); volatile CardValue* byte = _card_table->byte_for(field);
if (_card_table->scanned_concurrently()) { if (_card_table->scanned_concurrently()) {
// Perform a releasing store if the card table is scanned concurrently // Perform a releasing store if the card table is scanned concurrently
OrderAccess::release_store(byte, CardTable::dirty_card_val()); OrderAccess::release_store(byte, CardTable::dirty_card_val());

View file

@ -78,9 +78,8 @@ void CLDRemSet::clear_mod_union() {
ClassLoaderDataGraph::cld_do(&closure); ClassLoaderDataGraph::cld_do(&closure);
} }
CardTable::CardValue CardTableRS::find_unused_youngergenP_card_value() {
jbyte CardTableRS::find_unused_youngergenP_card_value() { for (CardValue v = youngergenP1_card;
for (jbyte v = youngergenP1_card;
v < cur_youngergen_and_prev_nonclean_card; v < cur_youngergen_and_prev_nonclean_card;
v++) { v++) {
bool seen = false; bool seen = false;
@ -122,7 +121,7 @@ void CardTableRS::younger_refs_iterate(Generation* g,
g->younger_refs_iterate(blk, n_threads); g->younger_refs_iterate(blk, n_threads);
} }
inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) { inline bool ClearNoncleanCardWrapper::clear_card(CardValue* entry) {
if (_is_par) { if (_is_par) {
return clear_card_parallel(entry); return clear_card_parallel(entry);
} else { } else {
@ -130,16 +129,16 @@ inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
} }
} }
inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) { inline bool ClearNoncleanCardWrapper::clear_card_parallel(CardValue* entry) {
while (true) { while (true) {
// In the parallel case, we may have to do this several times. // In the parallel case, we may have to do this several times.
jbyte entry_val = *entry; CardValue entry_val = *entry;
assert(entry_val != CardTableRS::clean_card_val(), assert(entry_val != CardTableRS::clean_card_val(),
"We shouldn't be looking at clean cards, and this should " "We shouldn't be looking at clean cards, and this should "
"be the only place they get cleaned."); "be the only place they get cleaned.");
if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val) if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
|| _ct->is_prev_youngergen_card_val(entry_val)) { || _ct->is_prev_youngergen_card_val(entry_val)) {
jbyte res = CardValue res =
Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val); Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
if (res == entry_val) { if (res == entry_val) {
break; break;
@ -167,8 +166,8 @@ inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) {
} }
inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) { inline bool ClearNoncleanCardWrapper::clear_card_serial(CardValue* entry) {
jbyte entry_val = *entry; CardValue entry_val = *entry;
assert(entry_val != CardTableRS::clean_card_val(), assert(entry_val != CardTableRS::clean_card_val(),
"We shouldn't be looking at clean cards, and this should " "We shouldn't be looking at clean cards, and this should "
"be the only place they get cleaned."); "be the only place they get cleaned.");
@ -183,7 +182,7 @@ ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
_dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) { _dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) {
} }
bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) { bool ClearNoncleanCardWrapper::is_word_aligned(CardTable::CardValue* entry) {
return (((intptr_t)entry) & (BytesPerWord-1)) == 0; return (((intptr_t)entry) & (BytesPerWord-1)) == 0;
} }
@ -195,8 +194,8 @@ void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
assert(mr.word_size() > 0, "Error"); assert(mr.word_size() > 0, "Error");
assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned"); assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned");
// mr.end() may not necessarily be card aligned. // mr.end() may not necessarily be card aligned.
jbyte* cur_entry = _ct->byte_for(mr.last()); CardValue* cur_entry = _ct->byte_for(mr.last());
const jbyte* limit = _ct->byte_for(mr.start()); const CardValue* limit = _ct->byte_for(mr.start());
HeapWord* end_of_non_clean = mr.end(); HeapWord* end_of_non_clean = mr.end();
HeapWord* start_of_non_clean = end_of_non_clean; HeapWord* start_of_non_clean = end_of_non_clean;
while (cur_entry >= limit) { while (cur_entry >= limit) {
@ -215,7 +214,7 @@ void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
// fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary
if (is_word_aligned(cur_entry)) { if (is_word_aligned(cur_entry)) {
jbyte* cur_row = cur_entry - BytesPerWord; CardValue* cur_row = cur_entry - BytesPerWord;
while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row_val()) { while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row_val()) {
cur_row -= BytesPerWord; cur_row -= BytesPerWord;
} }
@ -252,9 +251,9 @@ void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
// cur-younger-gen ==> cur_younger_gen // cur-younger-gen ==> cur_younger_gen
// cur_youngergen_and_prev_nonclean_card ==> no change. // cur_youngergen_and_prev_nonclean_card ==> no change.
void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) { void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
volatile jbyte* entry = byte_for(field); volatile CardValue* entry = byte_for(field);
do { do {
jbyte entry_val = *entry; CardValue entry_val = *entry;
// We put this first because it's probably the most common case. // We put this first because it's probably the most common case.
if (entry_val == clean_card_val()) { if (entry_val == clean_card_val()) {
// No threat of contention with cleaning threads. // No threat of contention with cleaning threads.
@ -264,8 +263,8 @@ void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
|| is_prev_youngergen_card_val(entry_val)) { || is_prev_youngergen_card_val(entry_val)) {
// Mark it as both cur and prev youngergen; card cleaning thread will // Mark it as both cur and prev youngergen; card cleaning thread will
// eventually remove the previous stuff. // eventually remove the previous stuff.
jbyte new_val = cur_youngergen_and_prev_nonclean_card; CardValue new_val = cur_youngergen_and_prev_nonclean_card;
jbyte res = Atomic::cmpxchg(new_val, entry, entry_val); CardValue res = Atomic::cmpxchg(new_val, entry, entry_val);
// Did the CAS succeed? // Did the CAS succeed?
if (res == entry_val) return; if (res == entry_val) return;
// Otherwise, retry, to see the new value. // Otherwise, retry, to see the new value.
@ -395,11 +394,11 @@ void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
if (s->end() <= gen_boundary) return; if (s->end() <= gen_boundary) return;
MemRegion used = s->used_region(); MemRegion used = s->used_region();
jbyte* cur_entry = byte_for(used.start()); CardValue* cur_entry = byte_for(used.start());
jbyte* limit = byte_after(used.last()); CardValue* limit = byte_after(used.last());
while (cur_entry < limit) { while (cur_entry < limit) {
if (*cur_entry == clean_card_val()) { if (*cur_entry == clean_card_val()) {
jbyte* first_dirty = cur_entry+1; CardValue* first_dirty = cur_entry+1;
while (first_dirty < limit && while (first_dirty < limit &&
*first_dirty == clean_card_val()) { *first_dirty == clean_card_val()) {
first_dirty++; first_dirty++;
@ -614,7 +613,7 @@ CardTableRS::CardTableRS(MemRegion whole_heap, bool scanned_concurrently) :
// max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations() // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
// (which is always 2, young & old), but GenCollectedHeap has not been initialized yet. // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
uint max_gens = 2; uint max_gens = 2;
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1, _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(CardValue, max_gens + 1,
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_last_cur_val_in_gen == NULL) { if (_last_cur_val_in_gen == NULL) {
vm_exit_during_initialization("Could not create last_cur_val_in_gen array."); vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
@ -626,7 +625,7 @@ CardTableRS::CardTableRS(MemRegion whole_heap, bool scanned_concurrently) :
CardTableRS::~CardTableRS() { CardTableRS::~CardTableRS() {
if (_last_cur_val_in_gen) { if (_last_cur_val_in_gen) {
FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen); FREE_C_HEAP_ARRAY(CardValue, _last_cur_val_in_gen);
_last_cur_val_in_gen = NULL; _last_cur_val_in_gen = NULL;
} }
if (_lowest_non_clean) { if (_lowest_non_clean) {
@ -669,11 +668,11 @@ void CardTableRS::initialize() {
} }
} }
bool CardTableRS::card_will_be_scanned(jbyte cv) { bool CardTableRS::card_will_be_scanned(CardValue cv) {
return card_is_dirty_wrt_gen_iter(cv) || is_prev_nonclean_card_val(cv); return card_is_dirty_wrt_gen_iter(cv) || is_prev_nonclean_card_val(cv);
} }
bool CardTableRS::card_may_have_been_dirty(jbyte cv) { bool CardTableRS::card_may_have_been_dirty(CardValue cv) {
return return
cv != clean_card && cv != clean_card &&
(card_is_dirty_wrt_gen_iter(cv) || (card_is_dirty_wrt_gen_iter(cv) ||

View file

@ -76,27 +76,27 @@ class CardTableRS: public CardTable {
// used as the current value for a younger_refs_do iteration of that // used as the current value for a younger_refs_do iteration of that
// portion of the table. The perm gen is index 0. The young gen is index 1, // portion of the table. The perm gen is index 0. The young gen is index 1,
// but will always have the value "clean_card". The old gen is index 2. // but will always have the value "clean_card". The old gen is index 2.
jbyte* _last_cur_val_in_gen; CardValue* _last_cur_val_in_gen;
jbyte _cur_youngergen_card_val; CardValue _cur_youngergen_card_val;
// Number of generations, plus one for lingering PermGen issues in CardTableRS. // Number of generations, plus one for lingering PermGen issues in CardTableRS.
static const int _regions_to_iterate = 3; static const int _regions_to_iterate = 3;
jbyte cur_youngergen_card_val() { CardValue cur_youngergen_card_val() {
return _cur_youngergen_card_val; return _cur_youngergen_card_val;
} }
void set_cur_youngergen_card_val(jbyte v) { void set_cur_youngergen_card_val(CardValue v) {
_cur_youngergen_card_val = v; _cur_youngergen_card_val = v;
} }
bool is_prev_youngergen_card_val(jbyte v) { bool is_prev_youngergen_card_val(CardValue v) {
return return
youngergen_card <= v && youngergen_card <= v &&
v < cur_youngergen_and_prev_nonclean_card && v < cur_youngergen_and_prev_nonclean_card &&
v != _cur_youngergen_card_val; v != _cur_youngergen_card_val;
} }
// Return a youngergen_card_value that is not currently in use. // Return a youngergen_card_value that is not currently in use.
jbyte find_unused_youngergenP_card_value(); CardValue find_unused_youngergenP_card_value();
public: public:
CardTableRS(MemRegion whole_heap, bool scanned_concurrently); CardTableRS(MemRegion whole_heap, bool scanned_concurrently);
@ -117,7 +117,7 @@ public:
void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads); void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads);
void inline_write_ref_field_gc(void* field, oop new_val) { void inline_write_ref_field_gc(void* field, oop new_val) {
jbyte* byte = byte_for(field); CardValue* byte = byte_for(field);
*byte = youngergen_card; *byte = youngergen_card;
} }
void write_ref_field_gc_work(void* field, oop new_val) { void write_ref_field_gc_work(void* field, oop new_val) {
@ -140,32 +140,32 @@ public:
void invalidate_or_clear(Generation* old_gen); void invalidate_or_clear(Generation* old_gen);
bool is_prev_nonclean_card_val(jbyte v) { bool is_prev_nonclean_card_val(CardValue v) {
return return
youngergen_card <= v && youngergen_card <= v &&
v <= cur_youngergen_and_prev_nonclean_card && v <= cur_youngergen_and_prev_nonclean_card &&
v != _cur_youngergen_card_val; v != _cur_youngergen_card_val;
} }
static bool youngergen_may_have_been_dirty(jbyte cv) { static bool youngergen_may_have_been_dirty(CardValue cv) {
return cv == CardTableRS::cur_youngergen_and_prev_nonclean_card; return cv == CardTableRS::cur_youngergen_and_prev_nonclean_card;
} }
// *** Support for parallel card scanning. // *** Support for parallel card scanning.
// dirty and precleaned are equivalent wrt younger_refs_iter. // dirty and precleaned are equivalent wrt younger_refs_iter.
static bool card_is_dirty_wrt_gen_iter(jbyte cv) { static bool card_is_dirty_wrt_gen_iter(CardValue cv) {
return cv == dirty_card || cv == precleaned_card; return cv == dirty_card || cv == precleaned_card;
} }
// Returns "true" iff the value "cv" will cause the card containing it // Returns "true" iff the value "cv" will cause the card containing it
// to be scanned in the current traversal. May be overridden by // to be scanned in the current traversal. May be overridden by
// subtypes. // subtypes.
bool card_will_be_scanned(jbyte cv); bool card_will_be_scanned(CardValue cv);
// Returns "true" iff the value "cv" may have represented a dirty card at // Returns "true" iff the value "cv" may have represented a dirty card at
// some point. // some point.
bool card_may_have_been_dirty(jbyte cv); bool card_may_have_been_dirty(CardValue cv);
// Iterate over the portion of the card-table which covers the given // Iterate over the portion of the card-table which covers the given
// region mr in the given space and apply cl to any dirty sub-regions // region mr in the given space and apply cl to any dirty sub-regions
@ -185,7 +185,7 @@ public:
// covered region. Each entry of these arrays is the lowest non-clean // covered region. Each entry of these arrays is the lowest non-clean
// card of the corresponding chunk containing part of an object from the // card of the corresponding chunk containing part of an object from the
// previous chunk, or else NULL. // previous chunk, or else NULL.
typedef jbyte* CardPtr; typedef CardValue* CardPtr;
typedef CardPtr* CardArr; typedef CardPtr* CardArr;
CardArr* _lowest_non_clean; CardArr* _lowest_non_clean;
size_t* _lowest_non_clean_chunk_size; size_t* _lowest_non_clean_chunk_size;
@ -199,15 +199,19 @@ class ClearNoncleanCardWrapper: public MemRegionClosure {
DirtyCardToOopClosure* _dirty_card_closure; DirtyCardToOopClosure* _dirty_card_closure;
CardTableRS* _ct; CardTableRS* _ct;
bool _is_par; bool _is_par;
public:
typedef CardTable::CardValue CardValue;
private: private:
// Clears the given card, return true if the corresponding card should be // Clears the given card, return true if the corresponding card should be
// processed. // processed.
inline bool clear_card(jbyte* entry); inline bool clear_card(CardValue* entry);
// Work methods called by the clear_card() // Work methods called by the clear_card()
inline bool clear_card_serial(jbyte* entry); inline bool clear_card_serial(CardValue* entry);
inline bool clear_card_parallel(jbyte* entry); inline bool clear_card_parallel(CardValue* entry);
// check alignment of pointer // check alignment of pointer
bool is_word_aligned(jbyte* entry); bool is_word_aligned(CardValue* entry);
public: public:
ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par); ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par);

View file

@ -27,6 +27,7 @@
#include "gc/shared/ageTable.hpp" #include "gc/shared/ageTable.hpp"
#include "gc/shared/cardGeneration.hpp" #include "gc/shared/cardGeneration.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableRS.hpp" #include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectedHeap.hpp"
#include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genCollectedHeap.hpp"
@ -119,12 +120,12 @@
nonstatic_field(CardTable, _last_valid_index, const size_t) \ nonstatic_field(CardTable, _last_valid_index, const size_t) \
nonstatic_field(CardTable, _page_size, const size_t) \ nonstatic_field(CardTable, _page_size, const size_t) \
nonstatic_field(CardTable, _byte_map_size, const size_t) \ nonstatic_field(CardTable, _byte_map_size, const size_t) \
nonstatic_field(CardTable, _byte_map, jbyte*) \ nonstatic_field(CardTable, _byte_map, CardTable::CardValue*) \
nonstatic_field(CardTable, _cur_covered_regions, int) \ nonstatic_field(CardTable, _cur_covered_regions, int) \
nonstatic_field(CardTable, _covered, MemRegion*) \ nonstatic_field(CardTable, _covered, MemRegion*) \
nonstatic_field(CardTable, _committed, MemRegion*) \ nonstatic_field(CardTable, _committed, MemRegion*) \
nonstatic_field(CardTable, _guard_region, MemRegion) \ nonstatic_field(CardTable, _guard_region, MemRegion) \
nonstatic_field(CardTable, _byte_map_base, jbyte*) \ nonstatic_field(CardTable, _byte_map_base, CardTable::CardValue*) \
nonstatic_field(CardTableBarrierSet, _defer_initial_card_mark, bool) \ nonstatic_field(CardTableBarrierSet, _defer_initial_card_mark, bool) \
nonstatic_field(CardTableBarrierSet, _card_table, CardTable*) \ nonstatic_field(CardTableBarrierSet, _card_table, CardTable*) \
\ \
@ -217,6 +218,7 @@
/* Miscellaneous other GC types */ \ /* Miscellaneous other GC types */ \
\ \
declare_toplevel_type(AgeTable) \ declare_toplevel_type(AgeTable) \
declare_toplevel_type(CardTable::CardValue) \
declare_toplevel_type(Generation::StatRecord) \ declare_toplevel_type(Generation::StatRecord) \
declare_toplevel_type(GenerationSpec) \ declare_toplevel_type(GenerationSpec) \
declare_toplevel_type(HeapWord) \ declare_toplevel_type(HeapWord) \

View file

@ -93,7 +93,7 @@ class CompilerToVM {
static int _max_oop_map_stack_offset; static int _max_oop_map_stack_offset;
static int _fields_annotations_base_offset; static int _fields_annotations_base_offset;
static jbyte* cardtable_start_address; static CardTable::CardValue* cardtable_start_address;
static int cardtable_shift; static int cardtable_shift;
static int vm_page_size; static int vm_page_size;

View file

@ -24,6 +24,7 @@
// no precompiled headers // no precompiled headers
#include "ci/ciUtilities.hpp" #include "ci/ciUtilities.hpp"
#include "gc/shared/barrierSet.hpp" #include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "memory/oopFactory.hpp" #include "memory/oopFactory.hpp"
#include "oops/objArrayOop.inline.hpp" #include "oops/objArrayOop.inline.hpp"
#include "jvmci/jvmciRuntime.hpp" #include "jvmci/jvmciRuntime.hpp"
@ -63,7 +64,7 @@ HeapWord* volatile* CompilerToVM::Data::_heap_top_addr;
int CompilerToVM::Data::_max_oop_map_stack_offset; int CompilerToVM::Data::_max_oop_map_stack_offset;
int CompilerToVM::Data::_fields_annotations_base_offset; int CompilerToVM::Data::_fields_annotations_base_offset;
jbyte* CompilerToVM::Data::cardtable_start_address; CardTable::CardValue* CompilerToVM::Data::cardtable_start_address;
int CompilerToVM::Data::cardtable_shift; int CompilerToVM::Data::cardtable_shift;
int CompilerToVM::Data::vm_page_size; int CompilerToVM::Data::vm_page_size;
@ -126,7 +127,7 @@ void CompilerToVM::Data::initialize(TRAPS) {
BarrierSet* bs = BarrierSet::barrier_set(); BarrierSet* bs = BarrierSet::barrier_set();
if (bs->is_a(BarrierSet::CardTableBarrierSet)) { if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
jbyte* base = ci_card_table_address(); CardTable::CardValue* base = ci_card_table_address();
assert(base != NULL, "unexpected byte_map_base"); assert(base != NULL, "unexpected byte_map_base");
cardtable_start_address = base; cardtable_start_address = base;
cardtable_shift = CardTable::card_shift; cardtable_shift = CardTable::card_shift;