mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-17 17:44:40 +02:00
8238854: Remove superfluous C heap allocation failure checks
Reviewed-by: kbarrett, sjohanss
This commit is contained in:
parent
00484e9446
commit
78f58c3e39
14 changed files with 7 additions and 60 deletions
|
@ -1799,8 +1799,8 @@ jint G1CollectedHeap::initialize() {
|
||||||
// Create the G1ConcurrentMark data structure and thread.
|
// Create the G1ConcurrentMark data structure and thread.
|
||||||
// (Must do this late, so that "max_regions" is defined.)
|
// (Must do this late, so that "max_regions" is defined.)
|
||||||
_cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
|
_cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
|
||||||
if (_cm == NULL || !_cm->completed_initialization()) {
|
if (!_cm->completed_initialization()) {
|
||||||
vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
|
vm_shutdown_during_initialization("Could not initialize G1ConcurrentMark");
|
||||||
return JNI_ENOMEM;
|
return JNI_ENOMEM;
|
||||||
}
|
}
|
||||||
_cm_thread = _cm->cm_thread();
|
_cm_thread = _cm->cm_thread();
|
||||||
|
|
|
@ -72,11 +72,7 @@ jint G1ConcurrentRefineThreadControl::initialize(G1ConcurrentRefine* cr, uint nu
|
||||||
_cr = cr;
|
_cr = cr;
|
||||||
_num_max_threads = num_max_threads;
|
_num_max_threads = num_max_threads;
|
||||||
|
|
||||||
_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(G1ConcurrentRefineThread*, num_max_threads, mtGC);
|
_threads = NEW_C_HEAP_ARRAY(G1ConcurrentRefineThread*, num_max_threads, mtGC);
|
||||||
if (_threads == NULL) {
|
|
||||||
vm_shutdown_during_initialization("Could not allocate thread holder array.");
|
|
||||||
return JNI_ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (uint i = 0; i < num_max_threads; i++) {
|
for (uint i = 0; i < num_max_threads; i++) {
|
||||||
if (UseDynamicNumberOfGCThreads && i != 0 /* Always start first thread. */) {
|
if (UseDynamicNumberOfGCThreads && i != 0 /* Always start first thread. */) {
|
||||||
|
@ -303,13 +299,6 @@ G1ConcurrentRefine* G1ConcurrentRefine::create(jint* ecode) {
|
||||||
yellow_zone,
|
yellow_zone,
|
||||||
red_zone,
|
red_zone,
|
||||||
min_yellow_zone_size);
|
min_yellow_zone_size);
|
||||||
|
|
||||||
if (cr == NULL) {
|
|
||||||
*ecode = JNI_ENOMEM;
|
|
||||||
vm_shutdown_during_initialization("Could not create G1ConcurrentRefine");
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
*ecode = cr->initialize();
|
*ecode = cr->initialize();
|
||||||
return cr;
|
return cr;
|
||||||
}
|
}
|
||||||
|
|
|
@ -92,14 +92,7 @@ OtherRegionsTable::OtherRegionsTable(Mutex* m) :
|
||||||
_fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
|
_fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
_fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
|
_fine_grain_regions = NEW_C_HEAP_ARRAY(PerRegionTablePtr, _max_fine_entries, mtGC);
|
||||||
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
|
||||||
|
|
||||||
if (_fine_grain_regions == NULL) {
|
|
||||||
vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
|
|
||||||
"Failed to allocate _fine_grain_entries.");
|
|
||||||
}
|
|
||||||
|
|
||||||
for (size_t i = 0; i < _max_fine_entries; i++) {
|
for (size_t i = 0; i < _max_fine_entries; i++) {
|
||||||
_fine_grain_regions[i] = NULL;
|
_fine_grain_regions[i] = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,8 +60,7 @@ void ASPSYoungGen::initialize_virtual_space(ReservedSpace rs,
|
||||||
assert(_init_gen_size != 0, "Should have a finite size");
|
assert(_init_gen_size != 0, "Should have a finite size");
|
||||||
_virtual_space = new PSVirtualSpaceHighToLow(rs, alignment);
|
_virtual_space = new PSVirtualSpaceHighToLow(rs, alignment);
|
||||||
if (!_virtual_space->expand_by(_init_gen_size)) {
|
if (!_virtual_space->expand_by(_init_gen_size)) {
|
||||||
vm_exit_during_initialization("Could not reserve enough space for "
|
vm_exit_during_initialization("Could not reserve enough space for object heap");
|
||||||
"object heap");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -78,16 +78,12 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||||
_manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC);
|
_manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC);
|
||||||
|
|
||||||
_oop_task_queues = new OopTaskQueueSet(parallel_gc_threads);
|
_oop_task_queues = new OopTaskQueueSet(parallel_gc_threads);
|
||||||
guarantee(_oop_task_queues != NULL, "Could not allocate oop task queues");
|
|
||||||
_objarray_task_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
|
_objarray_task_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
|
||||||
guarantee(_objarray_task_queues != NULL, "Could not allocate objarray task queues");
|
|
||||||
_region_task_queues = new RegionTaskQueueSet(parallel_gc_threads);
|
_region_task_queues = new RegionTaskQueueSet(parallel_gc_threads);
|
||||||
guarantee(_region_task_queues != NULL, "Could not allocate region task queues");
|
|
||||||
|
|
||||||
// Create and register the ParCompactionManager(s) for the worker threads.
|
// Create and register the ParCompactionManager(s) for the worker threads.
|
||||||
for(uint i=0; i<parallel_gc_threads; i++) {
|
for(uint i=0; i<parallel_gc_threads; i++) {
|
||||||
_manager_array[i] = new ParCompactionManager();
|
_manager_array[i] = new ParCompactionManager();
|
||||||
guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
|
|
||||||
oop_task_queues()->register_queue(i, _manager_array[i]->marking_stack());
|
oop_task_queues()->register_queue(i, _manager_array[i]->marking_stack());
|
||||||
_objarray_task_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
|
_objarray_task_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
|
||||||
region_task_queues()->register_queue(i, _manager_array[i]->region_stack());
|
region_task_queues()->register_queue(i, _manager_array[i]->region_stack());
|
||||||
|
@ -96,8 +92,6 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||||
// The VMThread gets its own ParCompactionManager, which is not available
|
// The VMThread gets its own ParCompactionManager, which is not available
|
||||||
// for work stealing.
|
// for work stealing.
|
||||||
_manager_array[parallel_gc_threads] = new ParCompactionManager();
|
_manager_array[parallel_gc_threads] = new ParCompactionManager();
|
||||||
guarantee(_manager_array[parallel_gc_threads] != NULL,
|
|
||||||
"Could not create ParCompactionManager");
|
|
||||||
assert(ParallelScavengeHeap::heap()->workers().total_workers() != 0,
|
assert(ParallelScavengeHeap::heap()->workers().total_workers() != 0,
|
||||||
"Not initialized?");
|
"Not initialized?");
|
||||||
|
|
||||||
|
|
|
@ -135,10 +135,6 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
|
||||||
//
|
//
|
||||||
|
|
||||||
_object_space = new MutableSpace(virtual_space()->alignment());
|
_object_space = new MutableSpace(virtual_space()->alignment());
|
||||||
|
|
||||||
if (_object_space == NULL)
|
|
||||||
vm_exit_during_initialization("Could not allocate an old gen space");
|
|
||||||
|
|
||||||
object_space()->initialize(cmr,
|
object_space()->initialize(cmr,
|
||||||
SpaceDecorator::Clear,
|
SpaceDecorator::Clear,
|
||||||
SpaceDecorator::Mangle);
|
SpaceDecorator::Mangle);
|
||||||
|
|
|
@ -60,10 +60,8 @@ void PSPromotionManager::initialize() {
|
||||||
// and make sure that the first instance starts at a cache line.
|
// and make sure that the first instance starts at a cache line.
|
||||||
assert(_manager_array == NULL, "Attempt to initialize twice");
|
assert(_manager_array == NULL, "Attempt to initialize twice");
|
||||||
_manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(promotion_manager_num);
|
_manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(promotion_manager_num);
|
||||||
guarantee(_manager_array != NULL, "Could not initialize promotion manager");
|
|
||||||
|
|
||||||
_stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
|
_stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
|
||||||
guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager");
|
|
||||||
|
|
||||||
// Create and register the PSPromotionManager(s) for the worker threads.
|
// Create and register the PSPromotionManager(s) for the worker threads.
|
||||||
for(uint i=0; i<ParallelGCThreads; i++) {
|
for(uint i=0; i<ParallelGCThreads; i++) {
|
||||||
|
@ -74,7 +72,6 @@ void PSPromotionManager::initialize() {
|
||||||
|
|
||||||
assert(_preserved_marks_set == NULL, "Attempt to initialize twice");
|
assert(_preserved_marks_set == NULL, "Attempt to initialize twice");
|
||||||
_preserved_marks_set = new PreservedMarksSet(true /* in_c_heap */);
|
_preserved_marks_set = new PreservedMarksSet(true /* in_c_heap */);
|
||||||
guarantee(_preserved_marks_set != NULL, "Could not initialize preserved marks set");
|
|
||||||
_preserved_marks_set->init(promotion_manager_num);
|
_preserved_marks_set->init(promotion_manager_num);
|
||||||
for (uint i = 0; i < promotion_manager_num; i += 1) {
|
for (uint i = 0; i < promotion_manager_num; i += 1) {
|
||||||
_manager_array[i].register_preserved_marks(_preserved_marks_set->get(i));
|
_manager_array[i].register_preserved_marks(_preserved_marks_set->get(i));
|
||||||
|
|
|
@ -88,10 +88,6 @@ void PSYoungGen::initialize_work() {
|
||||||
_from_space = new MutableSpace(virtual_space()->alignment());
|
_from_space = new MutableSpace(virtual_space()->alignment());
|
||||||
_to_space = new MutableSpace(virtual_space()->alignment());
|
_to_space = new MutableSpace(virtual_space()->alignment());
|
||||||
|
|
||||||
if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
|
|
||||||
vm_exit_during_initialization("Could not allocate a young gen space");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generation Counters - generation 0, 3 subspaces
|
// Generation Counters - generation 0, 3 subspaces
|
||||||
_gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size,
|
_gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size,
|
||||||
_max_gen_size, _virtual_space);
|
_max_gen_size, _virtual_space);
|
||||||
|
|
|
@ -168,10 +168,6 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
|
||||||
_from_space = new ContiguousSpace();
|
_from_space = new ContiguousSpace();
|
||||||
_to_space = new ContiguousSpace();
|
_to_space = new ContiguousSpace();
|
||||||
|
|
||||||
if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
|
|
||||||
vm_exit_during_initialization("Could not allocate a new gen space");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute the maximum eden and survivor space sizes. These sizes
|
// Compute the maximum eden and survivor space sizes. These sizes
|
||||||
// are computed assuming the entire reserved space is committed.
|
// are computed assuming the entire reserved space is committed.
|
||||||
// These values are exported as performance counters.
|
// These values are exported as performance counters.
|
||||||
|
|
|
@ -53,9 +53,6 @@ CardGeneration::CardGeneration(ReservedSpace rs,
|
||||||
heap_word_size(initial_byte_size));
|
heap_word_size(initial_byte_size));
|
||||||
MemRegion committed_mr(start, heap_word_size(initial_byte_size));
|
MemRegion committed_mr(start, heap_word_size(initial_byte_size));
|
||||||
_rs->resize_covered_region(committed_mr);
|
_rs->resize_covered_region(committed_mr);
|
||||||
if (_bts == NULL) {
|
|
||||||
vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that the start and end of this generation is the start of a card.
|
// Verify that the start and end of this generation is the start of a card.
|
||||||
// If this wasn't true, a single card could span more than on generation,
|
// If this wasn't true, a single card could span more than on generation,
|
||||||
|
|
|
@ -60,7 +60,7 @@ CardTable::CardTable(MemRegion whole_heap, bool conc_scan) :
|
||||||
|
|
||||||
assert(card_size <= 512, "card_size must be less than 512"); // why?
|
assert(card_size <= 512, "card_size must be less than 512"); // why?
|
||||||
|
|
||||||
_covered = new MemRegion[_max_covered_regions];
|
_covered = new MemRegion[_max_covered_regions];
|
||||||
if (_covered == NULL) {
|
if (_covered == NULL) {
|
||||||
vm_exit_during_initialization("Could not allocate card table covered region set.");
|
vm_exit_during_initialization("Could not allocate card table covered region set.");
|
||||||
}
|
}
|
||||||
|
|
|
@ -579,11 +579,7 @@ CardTableRS::CardTableRS(MemRegion whole_heap, bool scanned_concurrently) :
|
||||||
// max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
|
// max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
|
||||||
// (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
|
// (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
|
||||||
uint max_gens = 2;
|
uint max_gens = 2;
|
||||||
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(CardValue, max_gens + 1,
|
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY(CardValue, max_gens + 1, mtGC);
|
||||||
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
|
||||||
if (_last_cur_val_in_gen == NULL) {
|
|
||||||
vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
|
|
||||||
}
|
|
||||||
for (uint i = 0; i < max_gens + 1; i++) {
|
for (uint i = 0; i < max_gens + 1; i++) {
|
||||||
_last_cur_val_in_gen[i] = clean_card_val();
|
_last_cur_val_in_gen[i] = clean_card_val();
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,9 +79,6 @@ void Generation::ref_processor_init() {
|
||||||
assert(!_reserved.is_empty(), "empty generation?");
|
assert(!_reserved.is_empty(), "empty generation?");
|
||||||
_span_based_discoverer.set_span(_reserved);
|
_span_based_discoverer.set_span(_reserved);
|
||||||
_ref_processor = new ReferenceProcessor(&_span_based_discoverer); // a vanilla reference processor
|
_ref_processor = new ReferenceProcessor(&_span_based_discoverer); // a vanilla reference processor
|
||||||
if (_ref_processor == NULL) {
|
|
||||||
vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Generation::print() const { print_on(tty); }
|
void Generation::print() const { print_on(tty); }
|
||||||
|
|
|
@ -64,9 +64,6 @@ void ReferenceProcessor::init_statics() {
|
||||||
} else {
|
} else {
|
||||||
_default_soft_ref_policy = new LRUCurrentHeapPolicy();
|
_default_soft_ref_policy = new LRUCurrentHeapPolicy();
|
||||||
}
|
}
|
||||||
if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
|
|
||||||
vm_exit_during_initialization("Could not allocate reference policy object");
|
|
||||||
}
|
|
||||||
guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
|
guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
|
||||||
RefDiscoveryPolicy == ReferentBasedDiscovery,
|
RefDiscoveryPolicy == ReferentBasedDiscovery,
|
||||||
"Unrecognized RefDiscoveryPolicy");
|
"Unrecognized RefDiscoveryPolicy");
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue