8196876: OopStorage::assert_at_safepoint clashes with assert_at_safepoint macros in g1CollectedHeap.hpp

Add shared safepoint state assertion macros.

Reviewed-by: coleenp, eosterlund
This commit is contained in:
Kim Barrett 2018-03-03 23:56:08 -05:00
parent efcc8d2558
commit d1bd39385f
11 changed files with 47 additions and 44 deletions

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -250,7 +250,7 @@ bool DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size
} }
bool DirtyCardQueueSet::apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i) { bool DirtyCardQueueSet::apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i) {
assert_at_safepoint(false); assert_at_safepoint();
return apply_closure_to_completed_buffer(cl, worker_i, 0, true); return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -87,7 +87,7 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
} }
void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
assert_at_safepoint(true /* should_be_vm_thread */); assert_at_safepoint_on_vm_thread();
_survivor_is_full = false; _survivor_is_full = false;
_old_is_full = false; _old_is_full = false;

View file

@ -566,7 +566,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
} }
void G1CollectedHeap::begin_archive_alloc_range(bool open) { void G1CollectedHeap::begin_archive_alloc_range(bool open) {
assert_at_safepoint(true /* should_be_vm_thread */); assert_at_safepoint_on_vm_thread();
if (_archive_allocator == NULL) { if (_archive_allocator == NULL) {
_archive_allocator = G1ArchiveAllocator::create_allocator(this, open); _archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
} }
@ -580,7 +580,7 @@ bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
} }
HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) { HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
assert_at_safepoint(true /* should_be_vm_thread */); assert_at_safepoint_on_vm_thread();
assert(_archive_allocator != NULL, "_archive_allocator not initialized"); assert(_archive_allocator != NULL, "_archive_allocator not initialized");
if (is_archive_alloc_too_large(word_size)) { if (is_archive_alloc_too_large(word_size)) {
return NULL; return NULL;
@ -590,7 +590,7 @@ HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges, void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
size_t end_alignment_in_bytes) { size_t end_alignment_in_bytes) {
assert_at_safepoint(true /* should_be_vm_thread */); assert_at_safepoint_on_vm_thread();
assert(_archive_allocator != NULL, "_archive_allocator not initialized"); assert(_archive_allocator != NULL, "_archive_allocator not initialized");
// Call complete_archive to do the real work, filling in the MemRegion // Call complete_archive to do the real work, filling in the MemRegion
@ -983,7 +983,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
bool expect_null_mutator_alloc_region) { bool expect_null_mutator_alloc_region) {
assert_at_safepoint(true /* should_be_vm_thread */); assert_at_safepoint_on_vm_thread();
assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region, assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region,
"the current alloc region was unexpectedly found to be non-NULL"); "the current alloc region was unexpectedly found to be non-NULL");
@ -1154,7 +1154,7 @@ void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_tr
bool G1CollectedHeap::do_full_collection(bool explicit_gc, bool G1CollectedHeap::do_full_collection(bool explicit_gc,
bool clear_all_soft_refs) { bool clear_all_soft_refs) {
assert_at_safepoint(true /* should_be_vm_thread */); assert_at_safepoint_on_vm_thread();
if (GCLocker::check_active_before_gc()) { if (GCLocker::check_active_before_gc()) {
// Full GC was not completed. // Full GC was not completed.
@ -1295,7 +1295,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size, HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
bool* succeeded) { bool* succeeded) {
assert_at_safepoint(true /* should_be_vm_thread */); assert_at_safepoint_on_vm_thread();
// Attempts to allocate followed by Full GC. // Attempts to allocate followed by Full GC.
HeapWord* result = HeapWord* result =
@ -1347,7 +1347,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
// allocated block, or else "NULL". // allocated block, or else "NULL".
HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
assert_at_safepoint(true /* should_be_vm_thread */); assert_at_safepoint_on_vm_thread();
_verifier->verify_region_sets_optional(); _verifier->verify_region_sets_optional();
@ -2817,7 +2817,7 @@ void G1CollectedHeap::start_new_collection_set() {
bool bool
G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
assert_at_safepoint(true /* should_be_vm_thread */); assert_at_safepoint_on_vm_thread();
guarantee(!is_gc_active(), "collection is not reentrant"); guarantee(!is_gc_active(), "collection is not reentrant");
if (GCLocker::check_active_before_gc()) { if (GCLocker::check_active_before_gc()) {
@ -4847,7 +4847,7 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
}; };
void G1CollectedHeap::eagerly_reclaim_humongous_regions() { void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
assert_at_safepoint(true); assert_at_safepoint_on_vm_thread();
if (!G1EagerReclaimHumongousObjects || if (!G1EagerReclaimHumongousObjects ||
(!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) { (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
@ -5003,7 +5003,7 @@ public:
}; };
void G1CollectedHeap::tear_down_region_sets(bool free_list_only) { void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
assert_at_safepoint(true /* should_be_vm_thread */); assert_at_safepoint_on_vm_thread();
if (!free_list_only) { if (!free_list_only) {
TearDownRegionSetsClosure cl(&_old_set); TearDownRegionSetsClosure cl(&_old_set);
@ -5077,7 +5077,7 @@ public:
}; };
void G1CollectedHeap::rebuild_region_sets(bool free_list_only) { void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
assert_at_safepoint(true /* should_be_vm_thread */); assert_at_safepoint_on_vm_thread();
if (!free_list_only) { if (!free_list_only) {
_eden.clear(); _eden.clear();

View file

@ -364,17 +364,11 @@ private:
"should not be at a safepoint")); \ "should not be at a safepoint")); \
} while (0) } while (0)
#define assert_at_safepoint(_should_be_vm_thread_) \ #define assert_at_safepoint_on_vm_thread() \
do { \ do { \
assert(SafepointSynchronize::is_at_safepoint() && \ assert_at_safepoint(); \
((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \ assert(Thread::current_or_null() != NULL, "no current thread"); \
heap_locking_asserts_params("should be at a safepoint")); \ assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
} while (0)
#define assert_not_at_safepoint() \
do { \
assert(!SafepointSynchronize::is_at_safepoint(), \
heap_locking_asserts_params("should not be at a safepoint")); \
} while (0) } while (0)
protected: protected:

View file

@ -85,12 +85,12 @@ inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
} }
inline void G1CollectedHeap::reset_gc_time_stamp() { inline void G1CollectedHeap::reset_gc_time_stamp() {
assert_at_safepoint(true); assert_at_safepoint_on_vm_thread();
_gc_time_stamp = 0; _gc_time_stamp = 0;
} }
inline void G1CollectedHeap::increment_gc_time_stamp() { inline void G1CollectedHeap::increment_gc_time_stamp() {
assert_at_safepoint(true); assert_at_safepoint_on_vm_thread();
++_gc_time_stamp; ++_gc_time_stamp;
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -80,7 +80,7 @@ G1CollectionSet::~G1CollectionSet() {
void G1CollectionSet::init_region_lengths(uint eden_cset_region_length, void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length) { uint survivor_cset_region_length) {
assert_at_safepoint(true); assert_at_safepoint_on_vm_thread();
_eden_region_length = eden_cset_region_length; _eden_region_length = eden_cset_region_length;
_survivor_region_length = survivor_cset_region_length; _survivor_region_length = survivor_cset_region_length;
@ -103,7 +103,7 @@ void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
// Add the heap region at the head of the non-incremental collection set // Add the heap region at the head of the non-incremental collection set
void G1CollectionSet::add_old_region(HeapRegion* hr) { void G1CollectionSet::add_old_region(HeapRegion* hr) {
assert_at_safepoint(true); assert_at_safepoint_on_vm_thread();
assert(_inc_build_state == Active, "Precondition"); assert(_inc_build_state == Active, "Precondition");
assert(hr->is_old(), "the region should be old"); assert(hr->is_old(), "the region should be old");
@ -167,7 +167,7 @@ void G1CollectionSet::finalize_incremental_building() {
} }
void G1CollectionSet::clear() { void G1CollectionSet::clear() {
assert_at_safepoint(true); assert_at_safepoint_on_vm_thread();
_collection_set_cur_length = 0; _collection_set_cur_length = 0;
} }
@ -314,7 +314,7 @@ public:
}; };
bool G1CollectionSet::verify_young_ages() { bool G1CollectionSet::verify_young_ages() {
assert_at_safepoint(true); assert_at_safepoint_on_vm_thread();
G1VerifyYoungAgesClosure cl; G1VerifyYoungAgesClosure cl;
iterate(&cl); iterate(&cl);
@ -541,7 +541,7 @@ public:
}; };
void G1CollectionSet::verify_young_cset_indices() const { void G1CollectionSet::verify_young_cset_indices() const {
assert_at_safepoint(true); assert_at_safepoint_on_vm_thread();
G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length); G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length);
iterate(&cl); iterate(&cl);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -58,7 +58,7 @@ inline bool G1ConcurrentMark::mark_in_next_bitmap(HeapRegion* const hr, oop cons
#ifndef PRODUCT #ifndef PRODUCT
template<typename Fn> template<typename Fn>
inline void G1CMMarkStack::iterate(Fn fn) const { inline void G1CMMarkStack::iterate(Fn fn) const {
assert_at_safepoint(true); assert_at_safepoint_on_vm_thread();
size_t num_chunks = 0; size_t num_chunks = 0;

View file

@ -283,12 +283,6 @@ OopStorage::Block::block_for_ptr(const OopStorage* owner, const oop* ptr) {
return NULL; return NULL;
} }
#ifdef ASSERT
void OopStorage::assert_at_safepoint() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
}
#endif // ASSERT
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
// Allocation // Allocation
// //
@ -728,7 +722,9 @@ void OopStorage::BasicParState::update_iteration_state(bool value) {
} }
void OopStorage::BasicParState::ensure_iteration_started() { void OopStorage::BasicParState::ensure_iteration_started() {
if (!_concurrent) assert_at_safepoint(); if (!_concurrent) {
assert_at_safepoint();
}
assert(!_concurrent || _storage->_concurrent_iteration_active, "invariant"); assert(!_concurrent || _storage->_concurrent_iteration_active, "invariant");
// Ensure _next_block is not the not_started_marker, setting it to // Ensure _next_block is not the not_started_marker, setting it to
// the _active_head to start the iteration if necessary. // the _active_head to start the iteration if necessary.

View file

@ -241,8 +241,6 @@ private:
void delete_empty_block(const Block& block); void delete_empty_block(const Block& block);
bool reduce_deferred_updates(); bool reduce_deferred_updates();
static void assert_at_safepoint() NOT_DEBUG_RETURN;
template<typename F, typename Storage> template<typename F, typename Storage>
static bool iterate_impl(F f, Storage* storage); static bool iterate_impl(F f, Storage* storage);

View file

@ -30,6 +30,7 @@
#include "metaprogramming/conditional.hpp" #include "metaprogramming/conditional.hpp"
#include "metaprogramming/isConst.hpp" #include "metaprogramming/isConst.hpp"
#include "oops/oop.hpp" #include "oops/oop.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/count_trailing_zeros.hpp" #include "utilities/count_trailing_zeros.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"

View file

@ -209,6 +209,20 @@ public:
} }
}; };
// Some helper assert macros for safepoint checks.
#define assert_at_safepoint() \
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint")
#define assert_at_safepoint_msg(...) \
assert(SafepointSynchronize::is_at_safepoint(), __VA_ARGS__)
#define assert_not_at_safepoint() \
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint")
#define assert_not_at_safepoint_msg(...) \
assert(!SafepointSynchronize::is_at_safepoint(), __VA_ARGS__)
// State class for a thread suspended at a safepoint // State class for a thread suspended at a safepoint
class ThreadSafepointState: public CHeapObj<mtInternal> { class ThreadSafepointState: public CHeapObj<mtInternal> {
public: public: