This commit is contained in:
Antonios Printezis 2008-08-21 23:36:31 -04:00
commit 615777b6da
2482 changed files with 117959 additions and 31338 deletions

View file

@ -1,5 +1,5 @@
/*
* Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -136,20 +136,18 @@ class Space: public CHeapObj {
// any purpose. The "mr" arguments gives the bounds of the space, and
// the "clear_space" argument should be true unless the memory in "mr" is
// known to be zeroed.
virtual void initialize(MemRegion mr, bool clear_space);
// Sets the bounds (bottom and end) of the current space to those of "mr."
void set_bounds(MemRegion mr);
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
// The "clear" method must be called on a region that may have
// had allocation performed in it, but is now to be considered empty.
virtual void clear();
virtual void clear(bool mangle_space);
// For detecting GC bugs. Should only be called at GC boundaries, since
// some unused space may be used as scratch space during GC's.
// Default implementation does nothing. We also call this when expanding
// a space to satisfy an allocation request. See bug #4668531
virtual void mangle_unused_area() {}
virtual void mangle_unused_area_complete() {}
virtual void mangle_region(MemRegion mr) {}
// Testers
@ -376,8 +374,8 @@ public:
CompactibleSpace() :
_compaction_top(NULL), _next_compaction_space(NULL) {}
virtual void initialize(MemRegion mr, bool clear_space);
virtual void clear();
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
virtual void clear(bool mangle_space);
// Used temporarily during a compaction phase to hold the value
// top should have when compaction is complete.
@ -661,100 +659,102 @@ protected:
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
debug_only(prev_q = q); \
q += size; \
} else { \
/* q is not a live object, so its mark should point at the next \
* live object */ \
debug_only(prev_q = q); \
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
assert(q > prev_q, "we should be moving forward through memory"); \
} \
} \
} else { \
/* q is not a live object, so its mark should point at the next \
* live object */ \
debug_only(prev_q = q); \
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
assert(q > prev_q, "we should be moving forward through memory"); \
} \
} \
\
assert(q == t, "just checking"); \
assert(q == t, "just checking"); \
}
#define SCAN_AND_COMPACT(obj_size) { \
#define SCAN_AND_COMPACT(obj_size) { \
/* Copy all live objects to their new location \
* Used by MarkSweep::mark_sweep_phase4() */ \
* Used by MarkSweep::mark_sweep_phase4() */ \
\
HeapWord* q = bottom(); \
HeapWord* const t = _end_of_live; \
debug_only(HeapWord* prev_q = NULL); \
HeapWord* q = bottom(); \
HeapWord* const t = _end_of_live; \
debug_only(HeapWord* prev_q = NULL); \
\
if (q < t && _first_dead > q && \
if (q < t && _first_dead > q && \
!oop(q)->is_gc_marked()) { \
debug_only( \
debug_only( \
/* we have a chunk of the space which hasn't moved and we've reinitialized \
* the mark word during the previous pass, so we can't use is_gc_marked for \
* the traversal. */ \
HeapWord* const end = _first_dead; \
\
while (q < end) { \
HeapWord* const end = _first_dead; \
\
while (q < end) { \
size_t size = obj_size(q); \
assert(!oop(q)->is_gc_marked(), \
"should be unmarked (special dense prefix handling)"); \
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \
debug_only(prev_q = q); \
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \
debug_only(prev_q = q); \
q += size; \
} \
) /* debug_only */ \
\
if (_first_dead == t) { \
q = t; \
} else { \
/* $$$ Funky */ \
q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
} \
} \
} \
) /* debug_only */ \
\
const intx scan_interval = PrefetchScanIntervalInBytes; \
const intx copy_interval = PrefetchCopyIntervalInBytes; \
while (q < t) { \
if (!oop(q)->is_gc_marked()) { \
/* mark is pointer to next marked oop */ \
debug_only(prev_q = q); \
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
assert(q > prev_q, "we should be moving forward through memory"); \
} else { \
/* prefetch beyond q */ \
if (_first_dead == t) { \
q = t; \
} else { \
/* $$$ Funky */ \
q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
} \
} \
\
const intx scan_interval = PrefetchScanIntervalInBytes; \
const intx copy_interval = PrefetchCopyIntervalInBytes; \
while (q < t) { \
if (!oop(q)->is_gc_marked()) { \
/* mark is pointer to next marked oop */ \
debug_only(prev_q = q); \
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
assert(q > prev_q, "we should be moving forward through memory"); \
} else { \
/* prefetch beyond q */ \
Prefetch::read(q, scan_interval); \
\
/* size and destination */ \
size_t size = obj_size(q); \
HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
\
/* prefetch beyond compaction_top */ \
/* prefetch beyond compaction_top */ \
Prefetch::write(compaction_top, copy_interval); \
\
/* copy object and reinit its mark */ \
/* copy object and reinit its mark */ \
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \
compaction_top)); \
assert(q != compaction_top, "everything in this pass should be moving"); \
Copy::aligned_conjoint_words(q, compaction_top, size); \
oop(compaction_top)->init_mark(); \
assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
assert(q != compaction_top, "everything in this pass should be moving"); \
Copy::aligned_conjoint_words(q, compaction_top, size); \
oop(compaction_top)->init_mark(); \
assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
\
debug_only(prev_q = q); \
debug_only(prev_q = q); \
q += size; \
} \
} \
} \
} \
\
/* Let's remember if we were empty before we did the compaction. */ \
bool was_empty = used_region().is_empty(); \
/* Reset space after compaction is complete */ \
reset_after_compaction(); \
reset_after_compaction(); \
/* We do this clear, below, since it has overloaded meanings for some */ \
/* space subtypes. For example, OffsetTableContigSpace's that were */ \
/* compacted into will have had their offset table thresholds updated */ \
/* continuously, but those that weren't need to have their thresholds */ \
/* re-initialized. Also mangles unused area for debugging. */ \
if (used_region().is_empty()) { \
if (!was_empty) clear(); \
if (!was_empty) clear(SpaceDecorator::Mangle); \
} else { \
if (ZapUnusedHeapArea) mangle_unused_area(); \
} \
}
class GenSpaceMangler;
// A space in which the free area is contiguous. It therefore supports
// faster allocation, and compaction.
class ContiguousSpace: public CompactibleSpace {
@ -763,17 +763,21 @@ class ContiguousSpace: public CompactibleSpace {
protected:
HeapWord* _top;
HeapWord* _concurrent_iteration_safe_limit;
// A helper for mangling the unused area of the space in debug builds.
GenSpaceMangler* _mangler;
GenSpaceMangler* mangler() { return _mangler; }
// Allocation helpers (return NULL if full).
inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
public:
ContiguousSpace() :
_top(NULL),
_concurrent_iteration_safe_limit(NULL) {}
ContiguousSpace();
~ContiguousSpace();
virtual void initialize(MemRegion mr, bool clear_space);
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
virtual void clear(bool mangle_space);
// Accessors
HeapWord* top() const { return _top; }
@ -782,15 +786,32 @@ class ContiguousSpace: public CompactibleSpace {
virtual void set_saved_mark() { _saved_mark_word = top(); }
void reset_saved_mark() { _saved_mark_word = bottom(); }
virtual void clear();
WaterMark bottom_mark() { return WaterMark(this, bottom()); }
WaterMark top_mark() { return WaterMark(this, top()); }
WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
bool saved_mark_at_top() const { return saved_mark_word() == top(); }
void mangle_unused_area();
void mangle_region(MemRegion mr);
// In debug mode mangle (write it with a particular bit
// pattern) the unused part of a space.
// Used to save the an address in a space for later use during mangling.
void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
// Used to save the space's current top for later use during mangling.
void set_top_for_allocations() PRODUCT_RETURN;
// Mangle regions in the space from the current top up to the
// previously mangled part of the space.
void mangle_unused_area() PRODUCT_RETURN;
// Mangle [top, end)
void mangle_unused_area_complete() PRODUCT_RETURN;
// Mangle the given MemRegion.
void mangle_region(MemRegion mr) PRODUCT_RETURN;
// Do some sparse checking on the area that should have been mangled.
void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
// Check the complete area that should have been mangled.
// This code may be NULL depending on the macro DEBUG_MANGLING.
void check_mangled_unused_area_complete() PRODUCT_RETURN;
// Size computations: sizes in bytes.
size_t capacity() const { return byte_size(bottom(), end()); }
@ -986,7 +1007,7 @@ class EdenSpace : public ContiguousSpace {
void set_soft_end(HeapWord* value) { _soft_end = value; }
// Override.
void clear();
void clear(bool mangle_space);
// Set both the 'hard' and 'soft' limits (_end and _soft_end).
void set_end(HeapWord* value) {
@ -1030,8 +1051,7 @@ class OffsetTableContigSpace: public ContiguousSpace {
void set_bottom(HeapWord* value);
void set_end(HeapWord* value);
virtual void initialize(MemRegion mr, bool clear_space);
void clear();
void clear(bool mangle_space);
inline HeapWord* block_start_const(const void* p) const;