8300087: Replace NULL with nullptr in share/cds/

Reviewed-by: dholmes, iklam
This commit is contained in:
Johan Sjölen 2023-01-20 09:57:20 +00:00
parent 49d60fee49
commit eca64795be
31 changed files with 516 additions and 516 deletions

View file

@ -73,16 +73,16 @@ struct ArchivableStaticFieldInfo {
BasicType type;
ArchivableStaticFieldInfo(const char* k, const char* f)
: klass_name(k), field_name(f), klass(NULL), offset(0), type(T_ILLEGAL) {}
: klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
bool valid() {
return klass_name != NULL;
return klass_name != nullptr;
}
};
bool HeapShared::_disable_writing = false;
DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL;
GrowableArrayCHeap<Metadata**, mtClassShared>* HeapShared::_native_pointers = NULL;
DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
GrowableArrayCHeap<Metadata**, mtClassShared>* HeapShared::_native_pointers = nullptr;
size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
@ -91,10 +91,10 @@ size_t HeapShared::_total_obj_size;
#ifndef PRODUCT
#define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
static Array<char>* _archived_ArchiveHeapTestClass = NULL;
static const char* _test_class_name = NULL;
static const Klass* _test_class = NULL;
static const ArchivedKlassSubGraphInfoRecord* _test_class_record = NULL;
static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
static const char* _test_class_name = nullptr;
static const Klass* _test_class = nullptr;
static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
#endif
@ -113,7 +113,7 @@ static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
{"java/lang/Character$CharacterCache", "archivedCache"},
{"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
{"sun/util/locale/BaseLocale", "constantBaseLocales"},
{NULL, NULL},
{nullptr, nullptr},
};
// Entry fields for subgraphs archived in the open archive heap region.
static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
@ -123,9 +123,9 @@ static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
{"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
{"jdk/internal/math/FDBigInteger", "archivedCaches"},
#ifndef PRODUCT
{NULL, NULL}, // Extra slot for -XX:ArchiveHeapTestClass
{nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
#endif
{NULL, NULL},
{nullptr, nullptr},
};
// Entry fields for subgraphs archived in the open archive heap region (full module graph).
@ -133,14 +133,14 @@ static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = {
{"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
{"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"},
{"java/lang/Module$ArchivedData", "archivedData"},
{NULL, NULL},
{nullptr, nullptr},
};
KlassSubGraphInfo* HeapShared::_default_subgraph_info;
GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = NULL;
GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
OopHandle HeapShared::_roots;
OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
KlassToOopHandleTable* HeapShared::_scratch_java_mirror_table = NULL;
KlassToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
#ifdef ASSERT
bool HeapShared::is_archived_object_during_dumptime(oop p) {
@ -177,9 +177,9 @@ static void reset_states(oop obj, TRAPS) {
TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
Symbol* method_sig = vmSymbols::void_method_signature();
while (klass != NULL) {
while (klass != nullptr) {
Method* method = klass->find_method(method_name, method_sig);
if (method != NULL) {
if (method != nullptr) {
assert(method->is_private(), "must be");
if (log_is_enabled(Debug, cds)) {
ResourceMark rm(THREAD);
@ -218,16 +218,16 @@ void HeapShared::reset_archived_object_states(TRAPS) {
reset_states(boot_loader(), CHECK);
}
HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
HeapShared::OriginalObjectTable* HeapShared::_original_object_table = NULL;
HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
HeapShared::OriginalObjectTable* HeapShared::_original_object_table = nullptr;
oop HeapShared::find_archived_heap_object(oop obj) {
assert(DumpSharedSpaces, "dump-time only");
ArchivedObjectCache* cache = archived_object_cache();
CachedOopInfo* p = cache->get(obj);
if (p != NULL) {
if (p != nullptr) {
return p->_obj;
} else {
return NULL;
return nullptr;
}
}
@ -237,7 +237,7 @@ int HeapShared::append_root(oop obj) {
// No GC should happen since we aren't scanning _pending_roots.
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
if (_pending_roots == NULL) {
if (_pending_roots == nullptr) {
_pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
}
@ -248,14 +248,14 @@ objArrayOop HeapShared::roots() {
if (DumpSharedSpaces) {
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
if (!HeapShared::can_write()) {
return NULL;
return nullptr;
}
} else {
assert(UseSharedSpaces, "must be");
}
objArrayOop roots = (objArrayOop)_roots.resolve();
assert(roots != NULL, "should have been initialized");
assert(roots != nullptr, "should have been initialized");
return roots;
}
@ -264,7 +264,7 @@ oop HeapShared::get_root(int index, bool clear) {
assert(index >= 0, "sanity");
if (DumpSharedSpaces) {
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(_pending_roots != NULL, "sanity");
assert(_pending_roots != nullptr, "sanity");
return _pending_roots->at(index);
} else {
assert(UseSharedSpaces, "must be");
@ -285,7 +285,7 @@ void HeapShared::clear_root(int index) {
oop old = roots()->obj_at(index);
log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
}
roots()->obj_at_put(index, NULL);
roots()->obj_at_put(index, nullptr);
}
}
@ -295,7 +295,7 @@ oop HeapShared::archive_object(oop obj) {
assert(!obj->is_stackChunk(), "do not archive stack chunks");
oop ao = find_archived_heap_object(obj);
if (ao != NULL) {
if (ao != nullptr) {
// already archived
return ao;
}
@ -304,11 +304,11 @@ oop HeapShared::archive_object(oop obj) {
if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
p2i(obj), (size_t)obj->size());
return NULL;
return nullptr;
}
oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len));
if (archived_oop != NULL) {
if (archived_oop != nullptr) {
count_allocation(len);
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
// Reinitialize markword to remove age/marking/locking/etc.
@ -327,7 +327,7 @@ oop HeapShared::archive_object(oop obj) {
ArchivedObjectCache* cache = archived_object_cache();
CachedOopInfo info = make_cached_oop_info(archived_oop);
cache->put(obj, info);
if (_original_object_table != NULL) {
if (_original_object_table != nullptr) {
_original_object_table->put(archived_oop, obj);
}
mark_native_pointers(obj, archived_oop);
@ -355,10 +355,10 @@ public:
oop get_oop(Klass* k) {
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
OopHandle* handle = get(k);
if (handle != NULL) {
if (handle != nullptr) {
return handle->resolve();
} else {
return NULL;
return nullptr;
}
}
void set_oop(Klass* k, oop o) {
@ -370,7 +370,7 @@ public:
void remove_oop(Klass* k) {
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
OopHandle* handle = get(k);
if (handle != NULL) {
if (handle != nullptr) {
handle->release(Universe::vm_global());
remove(k);
}
@ -413,9 +413,9 @@ void HeapShared::archive_java_mirrors() {
BasicType bt = (BasicType)i;
if (!is_reference_type(bt)) {
oop m = _scratch_basic_type_mirrors[i].resolve();
assert(m != NULL, "sanity");
assert(m != nullptr, "sanity");
oop archived_m = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
assert(archived_m != NULL, "sanity");
assert(archived_m != nullptr, "sanity");
log_trace(cds, heap, mirror)(
"Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT,
@ -426,14 +426,14 @@ void HeapShared::archive_java_mirrors() {
}
GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
assert(klasses != NULL, "sanity");
assert(klasses != nullptr, "sanity");
for (int i = 0; i < klasses->length(); i++) {
Klass* orig_k = klasses->at(i);
oop m = scratch_java_mirror(orig_k);
if (m != NULL) {
if (m != nullptr) {
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
oop archived_m = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
guarantee(archived_m != NULL, "scratch mirrors should not point to any unachivable objects");
guarantee(archived_m != nullptr, "scratch mirrors should not point to any unachivable objects");
buffered_k->set_archived_java_mirror(append_root(archived_m));
ResourceMark rm;
log_trace(cds, heap, mirror)(
@ -460,7 +460,7 @@ void HeapShared::mark_native_pointers(oop orig_obj, oop archived_obj) {
void HeapShared::mark_one_native_pointer(oop archived_obj, int offset) {
Metadata* ptr = archived_obj->metadata_field_acquire(offset);
if (ptr != NULL) {
if (ptr != nullptr) {
// Set the native pointer to the requested address (at runtime, if the metadata
// is mapped at the default location, it will be at this address).
address buffer_addr = ArchiveBuilder::current()->get_buffered_addr((address)ptr);
@ -520,7 +520,7 @@ void HeapShared::check_enum_obj(int level,
ik->external_name(), fd.name()->as_C_string());
}
oop oop_field = mirror->obj_field(fd.offset());
if (oop_field == NULL) {
if (oop_field == nullptr) {
guarantee(false, "static field %s::%s must not be null",
ik->external_name(), fd.name()->as_C_string());
} else if (oop_field->klass() != ik && oop_field->klass() != ik->array_klass_or_null()) {
@ -545,7 +545,7 @@ bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) {
}
RunTimeClassInfo* info = RunTimeClassInfo::get_for(k);
assert(info != NULL, "sanity");
assert(info != nullptr, "sanity");
if (log_is_enabled(Info, cds, heap)) {
ResourceMark rm;
@ -662,7 +662,7 @@ void HeapShared::copy_roots() {
// However, HeapShared::archive_objects() happens inside a safepoint, so we can't
// allocate a "regular" ObjArray and pass the result to HeapShared::archive_object().
// Instead, we have to roll our own alloc/copy routine here.
int length = _pending_roots != NULL ? _pending_roots->length() : 0;
int length = _pending_roots != nullptr ? _pending_roots->length() : 0;
size_t size = objArrayOopDesc::object_size(length);
Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size);
@ -689,7 +689,7 @@ void HeapShared::copy_roots() {
//
// Subgraph archiving support
//
HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = nullptr;
HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table;
// Get the subgraph_info for Klass k. A new subgraph_info is created if
@ -709,7 +709,7 @@ KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_
KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
assert(DumpSharedSpaces, "dump time only");
KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k);
assert(info != NULL, "must have been initialized");
assert(info != nullptr, "must have been initialized");
return info;
}
@ -717,7 +717,7 @@ KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
void KlassSubGraphInfo::add_subgraph_entry_field(
int static_field_offset, oop v, bool is_closed_archive) {
assert(DumpSharedSpaces, "dump time only");
if (_subgraph_entry_fields == NULL) {
if (_subgraph_entry_fields == nullptr) {
_subgraph_entry_fields =
new (mtClass) GrowableArray<int>(10, mtClass);
}
@ -731,7 +731,7 @@ void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
assert(DumpSharedSpaces, "dump time only");
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
if (_subgraph_object_klasses == NULL) {
if (_subgraph_object_klasses == nullptr) {
_subgraph_object_klasses =
new (mtClass) GrowableArray<Klass*>(50, mtClass);
}
@ -787,12 +787,12 @@ void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
if (ik->module()->name() == vmSymbols::java_base()) {
assert(ik->package() != NULL, "classes in java.base cannot be in unnamed package");
assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package");
return;
}
#ifndef PRODUCT
if (!ik->module()->is_named() && ik->package() == NULL) {
if (!ik->module()->is_named() && ik->package() == nullptr) {
// This class is loaded by ArchiveHeapTestClass
return;
}
@ -827,8 +827,8 @@ bool KlassSubGraphInfo::is_non_early_klass(Klass* k) {
// Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
_k = info->klass();
_entry_field_records = NULL;
_subgraph_object_klasses = NULL;
_entry_field_records = nullptr;
_subgraph_object_klasses = nullptr;
_is_full_module_graph = info->is_full_module_graph();
if (_is_full_module_graph) {
@ -849,7 +849,7 @@ void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
// populate the entry fields
GrowableArray<int>* entry_fields = info->subgraph_entry_fields();
if (entry_fields != NULL) {
if (entry_fields != nullptr) {
int num_entry_fields = entry_fields->length();
assert(num_entry_fields % 2 == 0, "sanity");
_entry_field_records =
@ -861,7 +861,7 @@ void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
// the Klasses of the objects in the sub-graphs
GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
if (subgraph_object_klasses != NULL) {
if (subgraph_object_klasses != nullptr) {
int num_subgraphs_klasses = subgraph_object_klasses->length();
_subgraph_object_klasses =
ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
@ -888,7 +888,7 @@ struct CopyKlassSubGraphInfoToArchive : StackObj {
CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
if (info.subgraph_object_klasses() != nullptr || info.subgraph_entry_fields() != nullptr) {
ArchivedKlassSubGraphInfoRecord* record =
(ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
record->init(&info);
@ -922,7 +922,7 @@ void HeapShared::write_subgraph_info_table() {
writer.dump(&_run_time_subgraph_info_table, "subgraphs");
#ifndef PRODUCT
if (ArchiveHeapTestClass != NULL) {
if (ArchiveHeapTestClass != nullptr) {
size_t len = strlen(ArchiveHeapTestClass) + 1;
Array<char>* array = ArchiveBuilder::new_ro_array<char>((int)len);
strncpy(array->adr_at(0), ArchiveHeapTestClass, len);
@ -935,13 +935,13 @@ void HeapShared::write_subgraph_info_table() {
}
void HeapShared::serialize_root(SerializeClosure* soc) {
oop roots_oop = NULL;
oop roots_oop = nullptr;
if (soc->reading()) {
soc->do_oop(&roots_oop); // read from archive
assert(oopDesc::is_oop_or_null(roots_oop), "is oop");
// Create an OopHandle only if we have actually mapped or loaded the roots
if (roots_oop != NULL) {
if (roots_oop != nullptr) {
assert(ArchiveHeapLoader::is_fully_available(), "must be");
_roots = OopHandle(Universe::vm_global(), roots_oop);
}
@ -956,7 +956,7 @@ void HeapShared::serialize_tables(SerializeClosure* soc) {
#ifndef PRODUCT
soc->do_ptr((void**)&_archived_ArchiveHeapTestClass);
if (soc->reading() && _archived_ArchiveHeapTestClass != NULL) {
if (soc->reading() && _archived_ArchiveHeapTestClass != nullptr) {
_test_class_name = _archived_ArchiveHeapTestClass->adr_at(0);
setup_test_class(_test_class_name);
}
@ -1013,7 +1013,7 @@ void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableSt
ArchivableStaticFieldInfo* info = &fields[i];
TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
assert(k != NULL && k->is_shared_boot_class(), "sanity");
assert(k != nullptr && k->is_shared_boot_class(), "sanity");
resolve_classes_for_subgraph_of(current, k);
}
}
@ -1026,7 +1026,7 @@ void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k)
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
}
if (record == NULL) {
if (record == nullptr) {
clear_archived_roots_of(k);
}
}
@ -1049,7 +1049,7 @@ void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k
return;
}
if (record != NULL) {
if (record != nullptr) {
init_archived_fields_for(k, record);
}
}
@ -1059,13 +1059,13 @@ HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAP
assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
if (!k->is_shared()) {
return NULL;
return nullptr;
}
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
#ifndef PRODUCT
if (_test_class_name != NULL && k->name()->equals(_test_class_name) && record != NULL) {
if (_test_class_name != nullptr && k->name()->equals(_test_class_name) && record != nullptr) {
_test_class = k;
_test_class_record = record;
}
@ -1073,14 +1073,14 @@ HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAP
// Initialize from archived data. Currently this is done only
// during VM initialization time. No lock is needed.
if (record != NULL) {
if (record != nullptr) {
if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) {
if (log_is_enabled(Info, cds, heap)) {
ResourceMark rm(THREAD);
log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled",
k->external_name());
}
return NULL;
return nullptr;
}
if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) {
@ -1089,7 +1089,7 @@ HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAP
log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled",
k->external_name());
}
return NULL;
return nullptr;
}
if (log_is_enabled(Info, cds, heap)) {
@ -1100,13 +1100,13 @@ HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAP
resolve_or_init(k, do_init, CHECK_NULL);
// Load/link/initialize the klasses of the objects in the subgraph.
// NULL class loader is used.
// nullptr class loader is used.
Array<Klass*>* klasses = record->subgraph_object_klasses();
if (klasses != NULL) {
if (klasses != nullptr) {
for (int i = 0; i < klasses->length(); i++) {
Klass* klass = klasses->at(i);
if (!klass->is_shared()) {
return NULL;
return nullptr;
}
resolve_or_init(klass, do_init, CHECK_NULL);
}
@ -1118,12 +1118,12 @@ HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAP
void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
if (!do_init) {
if (k->class_loader_data() == NULL) {
if (k->class_loader_data() == nullptr) {
Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
}
} else {
assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes");
assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
if (k->is_instance_klass()) {
InstanceKlass* ik = InstanceKlass::cast(k);
ik->initialize(CHECK);
@ -1141,7 +1141,7 @@ void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphI
// the corresponding fields within the mirror.
oop m = k->java_mirror();
Array<int>* entry_field_records = record->entry_field_records();
if (entry_field_records != NULL) {
if (entry_field_records != nullptr) {
int efr_len = entry_field_records->length();
assert(efr_len % 2 == 0, "sanity");
for (int i = 0; i < efr_len; i += 2) {
@ -1167,9 +1167,9 @@ void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphI
void HeapShared::clear_archived_roots_of(Klass* k) {
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
if (record != NULL) {
if (record != nullptr) {
Array<int>* entry_field_records = record->entry_field_records();
if (entry_field_records != NULL) {
if (entry_field_records != nullptr) {
int efr_len = entry_field_records->length();
assert(efr_len % 2 == 0, "sanity");
for (int i = 0; i < efr_len; i += 2) {
@ -1235,7 +1235,7 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
oop archived = HeapShared::archive_reachable_objects_from(
_level + 1, _subgraph_info, obj, _is_closed_archive);
assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
assert(archived != nullptr, "VM should have exited with unarchivable objects for _level > 1");
assert(HeapShared::is_archived_object_during_dumptime(archived), "must be");
if (!_record_klasses_only) {
@ -1253,14 +1253,14 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
KlassSubGraphInfo* subgraph_info() { return _subgraph_info; }
};
WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = NULL;
WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = nullptr;
HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop orig_obj) {
CachedOopInfo info;
WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
info._subgraph_info = (walker == NULL) ? NULL : walker->subgraph_info();
info._referrer = (walker == NULL) ? NULL : walker->orig_referencing_obj();
info._subgraph_info = (walker == nullptr) ? nullptr : walker->subgraph_info();
info._referrer = (walker == nullptr) ? nullptr : walker->orig_referencing_obj();
info._obj = orig_obj;
return info;
@ -1290,7 +1290,7 @@ oop HeapShared::archive_reachable_objects_from(int level,
KlassSubGraphInfo* subgraph_info,
oop orig_obj,
bool is_closed_archive) {
assert(orig_obj != NULL, "must be");
assert(orig_obj != nullptr, "must be");
assert(!is_archived_object_during_dumptime(orig_obj), "sanity");
if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
@ -1313,7 +1313,7 @@ oop HeapShared::archive_reachable_objects_from(int level,
}
oop archived_obj = find_archived_heap_object(orig_obj);
if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
if (java_lang_String::is_instance(orig_obj) && archived_obj != nullptr) {
// To save time, don't walk strings that are already archived. They just contain
// pointers to a type array, whose klass doesn't need to be recorded.
return archived_obj;
@ -1326,11 +1326,11 @@ oop HeapShared::archive_reachable_objects_from(int level,
set_has_been_seen_during_subgraph_recording(orig_obj);
}
bool record_klasses_only = (archived_obj != NULL);
if (archived_obj == NULL) {
bool record_klasses_only = (archived_obj != nullptr);
if (archived_obj == nullptr) {
++_num_new_archived_objs;
archived_obj = archive_object(orig_obj);
if (archived_obj == NULL) {
if (archived_obj == nullptr) {
// Skip archiving the sub-graph referenced from the current entry field.
ResourceMark rm;
log_error(cds, heap)(
@ -1340,7 +1340,7 @@ oop HeapShared::archive_reachable_objects_from(int level,
if (level == 1) {
// Don't archive a subgraph root that's too big. For archives static fields, that's OK
// as the Java code will take care of initializing this field dynamically.
return NULL;
return nullptr;
} else {
// We don't know how to handle an object that has been archived, but some of its reachable
// objects cannot be archived. Bail out for now. We might need to fix this in the future if
@ -1353,17 +1353,17 @@ oop HeapShared::archive_reachable_objects_from(int level,
if (Modules::check_module_oop(orig_obj)) {
Modules::update_oops_in_archived_module(orig_obj, append_root(archived_obj));
}
java_lang_Module::set_module_entry(archived_obj, NULL);
java_lang_Module::set_module_entry(archived_obj, nullptr);
} else if (java_lang_ClassLoader::is_instance(orig_obj)) {
// class_data will be restored explicitly at run time.
guarantee(orig_obj == SystemDictionary::java_platform_loader() ||
orig_obj == SystemDictionary::java_system_loader() ||
java_lang_ClassLoader::loader_data(orig_obj) == NULL, "must be");
java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL);
java_lang_ClassLoader::loader_data(orig_obj) == nullptr, "must be");
java_lang_ClassLoader::release_set_loader_data(archived_obj, nullptr);
}
}
assert(archived_obj != NULL, "must be");
assert(archived_obj != nullptr, "must be");
Klass *orig_k = orig_obj->klass();
subgraph_info->add_subgraph_object_klass(orig_k);
@ -1436,7 +1436,7 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive);
if (af == NULL) {
if (af == nullptr) {
log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
klass_name, field_name);
} else {
@ -1449,7 +1449,7 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
} else {
// The field contains null, we still need to record the entry point,
// so it can be restored at runtime.
subgraph_info->add_subgraph_entry_field(field_offset, NULL, false);
subgraph_info->add_subgraph_entry_field(field_offset, nullptr, false);
}
}
@ -1486,7 +1486,7 @@ void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_o
void HeapShared::verify_subgraph_from(oop orig_obj) {
oop archived_obj = find_archived_heap_object(orig_obj);
if (archived_obj == NULL) {
if (archived_obj == nullptr) {
// It's OK for the root of a subgraph to be not archived. See comments in
// archive_reachable_objects_from().
return;
@ -1513,10 +1513,10 @@ void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
if (is_archived) {
assert(is_archived_object_during_dumptime(obj), "must be");
assert(find_archived_heap_object(obj) == NULL, "must be");
assert(find_archived_heap_object(obj) == nullptr, "must be");
} else {
assert(!is_archived_object_during_dumptime(obj), "must be");
assert(find_archived_heap_object(obj) != NULL, "must be");
assert(find_archived_heap_object(obj) != nullptr, "must be");
}
VerifySharedOopClosure walker(is_archived);
@ -1550,7 +1550,7 @@ void HeapShared::check_default_subgraph_classes() {
}
}
HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL;
HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
int HeapShared::_num_new_walked_objs;
int HeapShared::_num_new_archived_objs;
int HeapShared::_num_old_recorded_klasses;
@ -1562,7 +1562,7 @@ int HeapShared::_num_total_recorded_klasses = 0;
int HeapShared::_num_total_verifications = 0;
bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
return _seen_objects_table->get(obj) != NULL;
return _seen_objects_table->get(obj) != nullptr;
}
void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
@ -1627,7 +1627,7 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
ResourceMark rm; // for stringStream::as_string() etc.
#ifndef PRODUCT
bool is_test_class = (ArchiveHeapTestClass != NULL) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0);
bool is_test_class = (ArchiveHeapTestClass != nullptr) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0);
#else
bool is_test_class = false;
#endif
@ -1663,7 +1663,7 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
}
if (ik->package() != NULL) {
if (ik->package() != nullptr) {
// This restriction makes HeapShared::is_a_test_class_in_unnamed_module() easy.
stringStream st;
st.print("ArchiveHeapTestClass %s is not in unnamed package", ArchiveHeapTestClass);
@ -1712,10 +1712,10 @@ void HeapShared::init_subgraph_entry_fields(TRAPS) {
void HeapShared::setup_test_class(const char* test_class_name) {
ArchivableStaticFieldInfo* p = open_archive_subgraph_entry_fields;
int num_slots = sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
assert(p[num_slots - 2].klass_name == NULL, "must have empty slot that's patched below");
assert(p[num_slots - 1].klass_name == NULL, "must have empty slot that marks the end of the list");
assert(p[num_slots - 2].klass_name == nullptr, "must have empty slot that's patched below");
assert(p[num_slots - 1].klass_name == nullptr, "must have empty slot that marks the end of the list");
if (test_class_name != NULL) {
if (test_class_name != nullptr) {
p[num_slots - 2].klass_name = test_class_name;
p[num_slots - 2].field_name = ARCHIVE_TEST_FIELD_NAME;
}
@ -1725,12 +1725,12 @@ void HeapShared::setup_test_class(const char* test_class_name) {
// during runtime. This may be called before the module system is initialized so
// we cannot rely on InstanceKlass::module(), etc.
bool HeapShared::is_a_test_class_in_unnamed_module(Klass* ik) {
if (_test_class != NULL) {
if (_test_class != nullptr) {
if (ik == _test_class) {
return true;
}
Array<Klass*>* klasses = _test_class_record->subgraph_object_klasses();
if (klasses == NULL) {
if (klasses == nullptr) {
return false;
}
@ -1871,7 +1871,7 @@ class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
virtual void do_oop(oop* p) {
assert(!UseCompressedOops, "sanity");
_num_total_oops ++;
if ((*p) != NULL) {
if ((*p) != nullptr) {
size_t idx = p - (oop*)_start;
_oopmap->set_bit(idx);
if (DumpSharedSpaces) {
@ -1889,7 +1889,7 @@ class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
address HeapShared::to_requested_address(address dumptime_addr) {
assert(DumpSharedSpaces, "static dump time only");
if (dumptime_addr == NULL || UseCompressedOops) {
if (dumptime_addr == nullptr || UseCompressedOops) {
return dumptime_addr;
}
@ -1910,7 +1910,7 @@ address HeapShared::to_requested_address(address dumptime_addr) {
intx delta = REQUESTED_BASE - actual_base;
address requested_addr = dumptime_addr + delta;
assert(REQUESTED_BASE != 0 && requested_addr != NULL, "sanity");
assert(REQUESTED_BASE != 0 && requested_addr != nullptr, "sanity");
return requested_addr;
}
@ -1921,7 +1921,7 @@ ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
HeapWord* p = region.start();
HeapWord* end = region.end();
FindEmbeddedNonNullPointers finder((void*)p, &oopmap);
ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL;
ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : nullptr;
int num_objs = 0;
while (p < end) {
@ -1952,7 +1952,7 @@ ResourceBitMap HeapShared::calculate_ptrmap(MemRegion region) {
for (int i = 0; i < len; i++) {
Metadata** p = _native_pointers->at(i);
if (start <= p && p < end) {
assert(*p != NULL, "must be non-null");
assert(*p != nullptr, "must be non-null");
num_non_null_ptrs ++;
size_t idx = p - start;
oopmap.set_bit(idx);