8300087: Replace NULL with nullptr in share/cds/

Reviewed-by: dholmes, iklam
This commit is contained in:
Johan Sjölen 2023-01-20 09:57:20 +00:00
parent 49d60fee49
commit eca64795be
31 changed files with 516 additions and 516 deletions

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,7 +51,7 @@
#include "utilities/bitMap.inline.hpp"
#include "utilities/formatBuffer.hpp"
ArchiveBuilder* ArchiveBuilder::_current = NULL;
ArchiveBuilder* ArchiveBuilder::_current = nullptr;
ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() {
char* newtop = ArchiveBuilder::current()->_ro_region.top();
@ -95,7 +95,7 @@ void ArchiveBuilder::SourceObjList::remember_embedded_pointer(SourceObjInfo* src
address* field_addr = ref->addr();
assert(src_info->ptrmap_start() < _total_bytes, "sanity");
assert(src_info->ptrmap_end() <= _total_bytes, "sanity");
assert(*field_addr != NULL, "should have checked");
assert(*field_addr != nullptr, "should have checked");
intx field_offset_in_bytes = ((address)field_addr) - src_obj;
DEBUG_ONLY(int src_obj_size = src_info->size_in_bytes();)
@ -141,17 +141,17 @@ void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
}
ArchiveBuilder::ArchiveBuilder() :
_current_dump_space(NULL),
_buffer_bottom(NULL),
_last_verified_top(NULL),
_current_dump_space(nullptr),
_buffer_bottom(nullptr),
_last_verified_top(nullptr),
_num_dump_regions_used(0),
_other_region_used_bytes(0),
_requested_static_archive_bottom(NULL),
_requested_static_archive_top(NULL),
_requested_dynamic_archive_bottom(NULL),
_requested_dynamic_archive_top(NULL),
_mapped_static_archive_bottom(NULL),
_mapped_static_archive_top(NULL),
_requested_static_archive_bottom(nullptr),
_requested_static_archive_top(nullptr),
_requested_dynamic_archive_bottom(nullptr),
_requested_dynamic_archive_top(nullptr),
_mapped_static_archive_bottom(nullptr),
_mapped_static_archive_top(nullptr),
_buffer_to_requested_delta(0),
_rw_region("rw", MAX_SHARED_DELTA),
_ro_region("ro", MAX_SHARED_DELTA),
@ -169,13 +169,13 @@ ArchiveBuilder::ArchiveBuilder() :
_symbols = new (mtClassShared) GrowableArray<Symbol*>(256 * K, mtClassShared);
_special_refs = new (mtClassShared) GrowableArray<SpecialRefInfo>(24 * K, mtClassShared);
assert(_current == NULL, "must be");
assert(_current == nullptr, "must be");
_current = this;
}
ArchiveBuilder::~ArchiveBuilder() {
assert(_current == this, "must be");
_current = NULL;
_current = nullptr;
clean_up_src_obj_table();
@ -207,7 +207,7 @@ public:
};
bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only) {
if (ref->obj() == NULL) {
if (ref->obj() == nullptr) {
return false;
}
if (get_follow_mode(ref) != make_a_copy) {
@ -430,7 +430,7 @@ public:
};
virtual void do_pending_ref(Ref* ref) {
if (ref->obj() != NULL) {
if (ref->obj() != nullptr) {
_builder->remember_embedded_pointer_in_copied_obj(enclosing_ref(), ref);
}
}
@ -439,7 +439,7 @@ public:
bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* enclosing_ref,
MetaspaceClosure::Ref* ref, bool read_only) {
address src_obj = ref->obj();
if (src_obj == NULL) {
if (src_obj == nullptr) {
return false;
}
ref->set_keep_after_pushing();
@ -476,11 +476,11 @@ void ArchiveBuilder::add_special_ref(MetaspaceClosure::SpecialRef type, address
void ArchiveBuilder::remember_embedded_pointer_in_copied_obj(MetaspaceClosure::Ref* enclosing_ref,
MetaspaceClosure::Ref* ref) {
assert(ref->obj() != NULL, "should have checked");
assert(ref->obj() != nullptr, "should have checked");
if (enclosing_ref != NULL) {
if (enclosing_ref != nullptr) {
SourceObjInfo* src_info = (SourceObjInfo*)enclosing_ref->user_data();
if (src_info == NULL) {
if (src_info == nullptr) {
// source objects of point_to_it/set_to_null types are not copied
// so we don't need to remember their pointers.
} else {
@ -641,7 +641,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
}
intptr_t* archived_vtable = CppVtables::get_archived_vtable(ref->msotype(), (address)dest);
if (archived_vtable != NULL) {
if (archived_vtable != nullptr) {
*(address*)dest = (address)archived_vtable;
ArchivePtrMarker::mark_pointer((address*)dest);
}
@ -654,7 +654,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
address ArchiveBuilder::get_buffered_addr(address src_addr) const {
SourceObjInfo* p = _src_obj_table.get(src_addr);
assert(p != NULL, "must be");
assert(p != nullptr, "must be");
return p->buffered_addr();
}
@ -662,7 +662,7 @@ address ArchiveBuilder::get_buffered_addr(address src_addr) const {
address ArchiveBuilder::get_source_addr(address buffered_addr) const {
assert(is_in_buffer_space(buffered_addr), "must be");
address* src_p = _buffered_to_src_table.get(buffered_addr);
assert(src_p != NULL && *src_p != NULL, "must be");
assert(src_p != nullptr && *src_p != nullptr, "must be");
return *src_p;
}
@ -846,7 +846,7 @@ void ArchiveBuilder::relocate_klass_ptr_of_oop(oop o) {
// - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
// - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions
// - Every pointer must have one of the following values:
// [a] NULL:
// [a] nullptr:
// No relocation is needed. Remove this pointer from ptrmap so we don't need to
// consider it at runtime.
// [b] Points into an object X which is inside the buffer:
@ -886,7 +886,7 @@ class RelocateBufferToRequested : public BitMapClosure {
address* p = (address*)_buffer_bottom + offset;
assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
if (*p == NULL) {
if (*p == nullptr) {
// todo -- clear bit, etc
ArchivePtrMarker::ptrmap()->clear_bit(offset);
} else {
@ -1064,7 +1064,7 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
size_t byte_size;
oop archived_oop = cast_to_oop(start);
oop original_oop = HeapShared::get_original_object(archived_oop);
if (original_oop != NULL) {
if (original_oop != nullptr) {
ResourceMark rm;
log_info(cds, map)(PTR_FORMAT ": @@ Object %s",
p2i(to_requested(start)), original_oop->klass()->external_name());
@ -1143,10 +1143,10 @@ public:
log_data((address)bitmap, bitmap_end, 0);
#if INCLUDE_CDS_JAVA_HEAP
if (closed_heap_regions != NULL) {
if (closed_heap_regions != nullptr) {
log_heap_regions("closed heap region", closed_heap_regions);
}
if (open_heap_regions != NULL) {
if (open_heap_regions != nullptr) {
log_heap_regions("open heap region", open_heap_regions);
}
#endif
@ -1180,7 +1180,7 @@ void ArchiveBuilder::write_archive(FileMapInfo* mapinfo,
char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_heap_bitmaps, open_heap_bitmaps,
bitmap_size_in_bytes);
if (closed_heap_regions != NULL) {
if (closed_heap_regions != nullptr) {
_total_closed_heap_region_size = mapinfo->write_heap_regions(
closed_heap_regions,
closed_heap_bitmaps,
@ -1239,7 +1239,7 @@ void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo,
print_bitmap_region_stats(bitmap_used, total_reserved);
if (closed_heap_regions != NULL) {
if (closed_heap_regions != nullptr) {
print_heap_region_stats(closed_heap_regions, "ca", total_reserved);
print_heap_region_stats(open_heap_regions, "oa", total_reserved);
}
@ -1255,7 +1255,7 @@ void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) {
void ArchiveBuilder::print_heap_region_stats(GrowableArray<MemRegion>* regions,
const char *name, size_t total_size) {
int arr_len = regions == NULL ? 0 : regions->length();
int arr_len = regions == nullptr ? 0 : regions->length();
for (int i = 0; i < arr_len; i++) {
char* start = (char*)regions->at(i).start();
size_t size = regions->at(i).byte_size();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -159,7 +159,7 @@ private:
if (follow_mode == point_to_it) {
_buffered_addr = ref->obj();
} else {
_buffered_addr = NULL;
_buffered_addr = nullptr;
}
}
@ -167,8 +167,8 @@ private:
MetaspaceClosure::Ref* ref() const { return _ref; }
void set_buffered_addr(address addr) {
assert(should_copy(), "must be");
assert(_buffered_addr == NULL, "cannot be copied twice");
assert(addr != NULL, "must be a valid copy");
assert(_buffered_addr == nullptr, "cannot be copied twice");
assert(addr != nullptr, "must be a valid copy");
_buffered_addr = addr;
}
void set_ptrmap_start(uintx v) { _ptrmap_start = v; }
@ -422,12 +422,12 @@ public:
GrowableArray<Symbol*>* symbols() const { return _symbols; }
static bool is_active() {
return (_current != NULL);
return (_current != nullptr);
}
static ArchiveBuilder* current() {
assert_is_vm_thread();
assert(_current != NULL, "ArchiveBuilder must be active");
assert(_current != nullptr, "ArchiveBuilder must be active");
return _current;
}
@ -447,7 +447,7 @@ public:
static Klass* get_buffered_klass(Klass* src_klass) {
Klass* klass = (Klass*)current()->get_buffered_addr((address)src_klass);
assert(klass != NULL && klass->is_klass(), "must be");
assert(klass != nullptr && klass->is_klass(), "must be");
return klass;
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -449,7 +449,7 @@ class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
virtual void do_oop(narrowOop* p) {
// This should be called before the loaded regions are modified, so all the embedded pointers
// must be NULL, or must point to a valid object in the loaded regions.
// must be null, or must point to a valid object in the loaded regions.
narrowOop v = *p;
if (!CompressedOops::is_null(v)) {
oop o = CompressedOops::decode_not_null(v);
@ -540,7 +540,7 @@ void ArchiveHeapLoader::patch_native_pointers() {
for (int i = MetaspaceShared::first_archive_heap_region;
i <= MetaspaceShared::last_archive_heap_region; i++) {
FileMapRegion* r = FileMapInfo::current_info()->region_at(i);
if (r->mapped_base() != NULL && r->has_ptrmap()) {
if (r->mapped_base() != nullptr && r->has_ptrmap()) {
log_info(cds, heap)("Patching native pointers in heap region %d", i);
BitMapView bm = r->ptrmap_view();
PatchNativePointers patcher((Metadata**)r->mapped_base());

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -103,10 +103,10 @@ public:
// than CompressedOops::{base,shift} -- see FileMapInfo::map_heap_regions_impl.
// To decode them, do not use CompressedOops::decode_not_null. Use this
// function instead.
inline static oop decode_from_archive(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
inline static oop decode_from_archive(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
// More efficient version, but works only when ArchiveHeap is mapped.
inline static oop decode_from_mapped_archive(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
inline static oop decode_from_mapped_archive(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static void patch_compressed_embedded_pointers(BitMapView bm,
FileMapInfo* info,
@ -172,7 +172,7 @@ private:
}
template<bool IS_MAPPED>
inline static oop decode_from_archive_impl(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
inline static oop decode_from_archive_impl(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
public:

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,13 +44,13 @@
#include "utilities/formatBuffer.hpp"
#include "utilities/globalDefinitions.hpp"
CHeapBitMap* ArchivePtrMarker::_ptrmap = NULL;
CHeapBitMap* ArchivePtrMarker::_ptrmap = nullptr;
VirtualSpace* ArchivePtrMarker::_vs;
bool ArchivePtrMarker::_compacted;
void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, VirtualSpace* vs) {
assert(_ptrmap == NULL, "initialize only once");
assert(_ptrmap == nullptr, "initialize only once");
_vs = vs;
_compacted = false;
_ptrmap = ptrmap;
@ -67,18 +67,18 @@ void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, VirtualSpace* vs) {
}
void ArchivePtrMarker::mark_pointer(address* ptr_loc) {
assert(_ptrmap != NULL, "not initialized");
assert(_ptrmap != nullptr, "not initialized");
assert(!_compacted, "cannot mark anymore");
if (ptr_base() <= ptr_loc && ptr_loc < ptr_end()) {
address value = *ptr_loc;
// We don't want any pointer that points to very bottom of the archive, otherwise when
// MetaspaceShared::default_base_address()==0, we can't distinguish between a pointer
// to nothing (NULL) vs a pointer to an objects that happens to be at the very bottom
// to nothing (null) vs a pointer to an objects that happens to be at the very bottom
// of the archive.
assert(value != (address)ptr_base(), "don't point to the bottom of the archive");
if (value != NULL) {
if (value != nullptr) {
assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
size_t idx = ptr_loc - ptr_base();
if (_ptrmap->size() <= idx) {
@ -92,7 +92,7 @@ void ArchivePtrMarker::mark_pointer(address* ptr_loc) {
}
void ArchivePtrMarker::clear_pointer(address* ptr_loc) {
assert(_ptrmap != NULL, "not initialized");
assert(_ptrmap != nullptr, "not initialized");
assert(!_compacted, "cannot clear anymore");
assert(ptr_base() <= ptr_loc && ptr_loc < ptr_end(), "must be");
@ -118,14 +118,14 @@ public:
bool do_bit(size_t offset) {
address* ptr_loc = _ptr_base + offset;
address ptr_value = *ptr_loc;
if (ptr_value != NULL) {
if (ptr_value != nullptr) {
assert(_relocatable_base <= ptr_value && ptr_value < _relocatable_end, "do not point to arbitrary locations!");
if (_max_non_null_offset < offset) {
_max_non_null_offset = offset;
}
} else {
_ptrmap->clear_bit(offset);
DEBUG_ONLY(log_trace(cds, reloc)("Clearing pointer [" PTR_FORMAT "] -> NULL @ " SIZE_FORMAT_W(9), p2i(ptr_loc), offset));
DEBUG_ONLY(log_trace(cds, reloc)("Clearing pointer [" PTR_FORMAT "] -> null @ " SIZE_FORMAT_W(9), p2i(ptr_loc), offset));
}
return true;
@ -253,7 +253,7 @@ void DumpRegion::pack(DumpRegion* next) {
assert(!is_packed(), "sanity");
_end = (char*)align_up(_top, MetaspaceShared::core_region_alignment());
_is_packed = true;
if (next != NULL) {
if (next != nullptr) {
next->_rs = _rs;
next->_vs = _vs;
next->_base = next->_top = this->_end;
@ -262,7 +262,7 @@ void DumpRegion::pack(DumpRegion* next) {
}
void WriteClosure::do_oop(oop* o) {
if (*o == NULL) {
if (*o == nullptr) {
_dump_region->append_intptr_t(0);
} else {
assert(HeapShared::can_write(), "sanity");
@ -288,7 +288,7 @@ void WriteClosure::do_region(u_char* start, size_t size) {
}
void ReadClosure::do_ptr(void** p) {
assert(*p == NULL, "initializing previous initialized pointer.");
assert(*p == nullptr, "initializing previous initialized pointer.");
intptr_t obj = nextPtr();
assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
"hit tag while initializing ptrs.");
@ -317,7 +317,7 @@ void ReadClosure::do_oop(oop *p) {
if (UseCompressedOops) {
narrowOop o = CompressedOops::narrow_oop_cast(nextPtr());
if (CompressedOops::is_null(o) || !ArchiveHeapLoader::is_fully_available()) {
*p = NULL;
*p = nullptr;
} else {
assert(ArchiveHeapLoader::can_use(), "sanity");
assert(ArchiveHeapLoader::is_fully_available(), "must be");
@ -326,7 +326,7 @@ void ReadClosure::do_oop(oop *p) {
} else {
intptr_t dumptime_oop = nextPtr();
if (dumptime_oop == 0 || !ArchiveHeapLoader::is_fully_available()) {
*p = NULL;
*p = nullptr;
} else {
assert(!ArchiveHeapLoader::is_loaded(), "ArchiveHeapLoader::can_load() is not supported for uncompessed oops");
intptr_t runtime_oop = dumptime_oop + ArchiveHeapLoader::mapped_heap_delta();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -142,7 +142,7 @@ private:
public:
DumpRegion(const char* name, uintx max_delta = 0)
: _name(name), _base(NULL), _top(NULL), _end(NULL),
: _name(name), _base(nullptr), _top(nullptr), _end(nullptr),
_max_delta(max_delta), _is_packed(false) {}
char* expand_top_to(char* newtop);
@ -157,7 +157,7 @@ public:
size_t used() const { return _top - _base; }
bool is_packed() const { return _is_packed; }
bool is_allocatable() const {
return !is_packed() && _base != NULL;
return !is_packed() && _base != nullptr;
}
void print(size_t total_bytes) const;
@ -165,7 +165,7 @@ public:
void init(ReservedSpace* rs, VirtualSpace* vs);
void pack(DumpRegion* next = NULL);
void pack(DumpRegion* next = nullptr);
bool contains(char* p) {
return base() <= p && p < top();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,10 +35,10 @@ inline bool SharedDataRelocator::do_bit(size_t offset) {
address old_ptr = *p;
assert(_valid_old_base <= old_ptr && old_ptr < _valid_old_end, "must be");
assert(old_ptr != NULL, "bits for NULL pointers should have been cleaned at dump time");
assert(old_ptr != nullptr, "bits for null pointers should have been cleaned at dump time");
address new_ptr = old_ptr + _delta;
assert(new_ptr != NULL, "don't point to the bottom of the archive"); // See ArchivePtrMarker::mark_pointer().
assert(new_ptr != nullptr, "don't point to the bottom of the archive"); // See ArchivePtrMarker::mark_pointer().
assert(_valid_new_base <= new_ptr && new_ptr < _valid_new_end, "must be");
DEBUG_ONLY(log_trace(cds, reloc)("Patch2: @%8d [" PTR_FORMAT "] " PTR_FORMAT " -> " PTR_FORMAT,

View file

@ -84,7 +84,7 @@
CDSHeapVerifier::CDSHeapVerifier() : _archived_objs(0), _problems(0)
{
# define ADD_EXCL(...) { static const char* e[] = {__VA_ARGS__, NULL}; add_exclusion(e); }
# define ADD_EXCL(...) { static const char* e[] = {__VA_ARGS__, nullptr}; add_exclusion(e); }
// Unfortunately this needs to be manually maintained. If
// test/hotspot/jtreg/runtime/cds/appcds/cacheObject/ArchivedEnumTest.java fails,
@ -166,10 +166,10 @@ public:
}
oop static_obj_field = _ik->java_mirror()->obj_field(fd->offset());
if (static_obj_field != NULL) {
if (static_obj_field != nullptr) {
Klass* klass = static_obj_field->klass();
if (_exclusions != NULL) {
for (const char** p = _exclusions; *p != NULL; p++) {
if (_exclusions != nullptr) {
for (const char** p = _exclusions; *p != nullptr; p++) {
if (fd->name()->equals(*p)) {
return;
}
@ -228,7 +228,7 @@ inline bool CDSHeapVerifier::do_entry(oop& orig_obj, HeapShared::CachedOopInfo&
_archived_objs++;
StaticFieldInfo* info = _table.get(orig_obj);
if (info != NULL) {
if (info != nullptr) {
ResourceMark rm;
LogStream ls(Log(cds, heap)::warning());
ls.print_cr("Archive heap points to a static field that may be reinitialized at runtime:");
@ -236,7 +236,7 @@ inline bool CDSHeapVerifier::do_entry(oop& orig_obj, HeapShared::CachedOopInfo&
ls.print("Value: ");
orig_obj->print_on(&ls);
ls.print_cr("--- trace begin ---");
trace_to_root(&ls, orig_obj, NULL, &value);
trace_to_root(&ls, orig_obj, nullptr, &value);
ls.print_cr("--- trace end ---");
ls.cr();
_problems ++;
@ -267,8 +267,8 @@ public:
// Call this function (from gdb, etc) if you want to know why an object is archived.
void CDSHeapVerifier::trace_to_root(outputStream* st, oop orig_obj) {
HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(orig_obj);
if (info != NULL) {
trace_to_root(st, orig_obj, NULL, info);
if (info != nullptr) {
trace_to_root(st, orig_obj, nullptr, info);
} else {
st->print_cr("Not an archived object??");
}
@ -276,9 +276,9 @@ void CDSHeapVerifier::trace_to_root(outputStream* st, oop orig_obj) {
int CDSHeapVerifier::trace_to_root(outputStream* st, oop orig_obj, oop orig_field, HeapShared::CachedOopInfo* info) {
int level = 0;
if (info->_referrer != NULL) {
if (info->_referrer != nullptr) {
HeapShared::CachedOopInfo* ref = HeapShared::archived_object_cache()->get(info->_referrer);
assert(ref != NULL, "sanity");
assert(ref != nullptr, "sanity");
level = trace_to_root(st, info->_referrer, orig_obj, ref) + 1;
} else if (java_lang_String::is_instance(orig_obj)) {
st->print_cr("[%2d] (shared string table)", level++);
@ -288,7 +288,7 @@ int CDSHeapVerifier::trace_to_root(outputStream* st, oop orig_obj, oop orig_fiel
st->print("[%2d] ", level);
orig_obj->print_address_on(st);
st->print(" %s", k->internal_name());
if (orig_field != NULL) {
if (orig_field != nullptr) {
if (k->is_instance_klass()) {
TraceFields clo(orig_obj, orig_field, st);
InstanceKlass::cast(k)->do_nonstatic_fields(&clo);

View file

@ -67,7 +67,7 @@ class CDSHeapVerifier : public KlassClosure {
return &excl[1];
}
}
return NULL;
return nullptr;
}
static int trace_to_root(outputStream* st, oop orig_obj, oop orig_field, HeapShared::CachedOopInfo* p);

View file

@ -58,7 +58,7 @@ Handle CDSProtectionDomain::init_security_info(Handle class_loader, InstanceKlas
// for fast access by the VM.
// all packages from module image are already created during VM bootstrap in
// Modules::define_module().
assert(pkg_entry != NULL, "archived class in module image cannot be from unnamed package");
assert(pkg_entry != nullptr, "archived class in module image cannot be from unnamed package");
ModuleEntry* mod_entry = pkg_entry->module();
return get_shared_protection_domain(class_loader, mod_entry, THREAD);
} else {
@ -83,17 +83,17 @@ Handle CDSProtectionDomain::init_security_info(Handle class_loader, InstanceKlas
// url = _shared_jar_urls[index];
// define_shared_package(class_name, class_loader, manifest, url, CHECK_NH);
//
// Note that if an element of these 3 _shared_xxx arrays is NULL, it will be initialized by
// Note that if an element of these 3 _shared_xxx arrays is null, it will be initialized by
// the corresponding SystemDictionaryShared::get_shared_xxx() function.
Handle manifest = get_shared_jar_manifest(index, CHECK_NH);
Handle url = get_shared_jar_url(index, CHECK_NH);
int index_offset = index - ClassLoaderExt::app_class_paths_start_index();
if (index_offset < PackageEntry::max_index_for_defined_in_class_path()) {
if (pkg_entry == NULL || !pkg_entry->is_defined_by_cds_in_class_path(index_offset)) {
if (pkg_entry == nullptr || !pkg_entry->is_defined_by_cds_in_class_path(index_offset)) {
// define_shared_package only needs to be called once for each package in a jar specified
// in the shared class path.
define_shared_package(class_name, class_loader, manifest, url, CHECK_NH);
if (pkg_entry != NULL) {
if (pkg_entry != nullptr) {
pkg_entry->set_defined_by_cds_in_class_path(index_offset);
}
}
@ -108,7 +108,7 @@ Handle CDSProtectionDomain::get_package_name(Symbol* class_name, TRAPS) {
ResourceMark rm(THREAD);
Handle pkgname_string;
TempNewSymbol pkg = ClassLoader::package_from_class_name(class_name);
if (pkg != NULL) { // Package prefix found
if (pkg != nullptr) { // Package prefix found
const char* pkgname = pkg->as_klass_external_name();
pkgname_string = java_lang_String::create_from_str(pkgname,
CHECK_(pkgname_string));
@ -118,17 +118,17 @@ Handle CDSProtectionDomain::get_package_name(Symbol* class_name, TRAPS) {
PackageEntry* CDSProtectionDomain::get_package_entry_from_class(InstanceKlass* ik, Handle class_loader) {
PackageEntry* pkg_entry = ik->package();
if (MetaspaceShared::use_full_module_graph() && ik->is_shared() && pkg_entry != NULL) {
if (MetaspaceShared::use_full_module_graph() && ik->is_shared() && pkg_entry != nullptr) {
assert(MetaspaceShared::is_in_shared_metaspace(pkg_entry), "must be");
assert(!ik->is_shared_unregistered_class(), "unexpected archived package entry for an unregistered class");
assert(ik->module()->is_named(), "unexpected archived package entry for a class in an unnamed module");
return pkg_entry;
}
TempNewSymbol pkg_name = ClassLoader::package_from_class_name(ik->name());
if (pkg_name != NULL) {
if (pkg_name != nullptr) {
pkg_entry = ClassLoaderData::class_loader_data(class_loader())->packages()->lookup_only(pkg_name);
} else {
pkg_entry = NULL;
pkg_entry = nullptr;
}
return pkg_entry;
}
@ -142,7 +142,7 @@ void CDSProtectionDomain::define_shared_package(Symbol* class_name,
Handle url,
TRAPS) {
assert(SystemDictionary::is_system_class_loader(class_loader()), "unexpected class loader");
// get_package_name() returns a NULL handle if the class is in unnamed package
// get_package_name() returns a null handle if the class is in unnamed package
Handle pkgname_string = get_package_name(class_name, CHECK);
if (pkgname_string.not_null()) {
Klass* app_classLoader_klass = vmClasses::jdk_internal_loader_ClassLoaders_AppClassLoader_klass();
@ -177,7 +177,7 @@ Handle CDSProtectionDomain::create_jar_manifest(const char* manifest_chars, size
Handle CDSProtectionDomain::get_shared_jar_manifest(int shared_path_index, TRAPS) {
Handle manifest;
if (shared_jar_manifest(shared_path_index) == NULL) {
if (shared_jar_manifest(shared_path_index) == nullptr) {
SharedClassPathEntry* ent = FileMapInfo::shared_path(shared_path_index);
size_t size = (size_t)ent->manifest_size();
if (size == 0) {
@ -186,7 +186,7 @@ Handle CDSProtectionDomain::get_shared_jar_manifest(int shared_path_index, TRAPS
// ByteArrayInputStream bais = new ByteArrayInputStream(buf);
const char* src = ent->manifest();
assert(src != NULL, "No Manifest data");
assert(src != nullptr, "No Manifest data");
manifest = create_jar_manifest(src, size, CHECK_NH);
atomic_set_shared_jar_manifest(shared_path_index, manifest());
}
@ -197,7 +197,7 @@ Handle CDSProtectionDomain::get_shared_jar_manifest(int shared_path_index, TRAPS
Handle CDSProtectionDomain::get_shared_jar_url(int shared_path_index, TRAPS) {
Handle url_h;
if (shared_jar_url(shared_path_index) == NULL) {
if (shared_jar_url(shared_path_index) == nullptr) {
JavaValue result(T_OBJECT);
const char* path = FileMapInfo::shared_path_name(shared_path_index);
Handle path_string = java_lang_String::create_from_str(path, CHECK_(url_h));
@ -240,7 +240,7 @@ Handle CDSProtectionDomain::get_shared_protection_domain(Handle class_loader,
Handle url,
TRAPS) {
Handle protection_domain;
if (shared_protection_domain(shared_path_index) == NULL) {
if (shared_protection_domain(shared_path_index) == nullptr) {
Handle pd = get_protection_domain_from_classloader(class_loader, url, THREAD);
atomic_set_shared_protection_domain(shared_path_index, pd());
}
@ -257,9 +257,9 @@ Handle CDSProtectionDomain::get_shared_protection_domain(Handle class_loader,
Handle CDSProtectionDomain::get_shared_protection_domain(Handle class_loader,
ModuleEntry* mod, TRAPS) {
ClassLoaderData *loader_data = mod->loader_data();
if (mod->shared_protection_domain() == NULL) {
if (mod->shared_protection_domain() == nullptr) {
Symbol* location = mod->location();
if (location != NULL) {
if (location != nullptr) {
Handle location_string = java_lang_String::create_from_symbol(
location, CHECK_NH);
Handle url;
@ -301,7 +301,7 @@ oop CDSProtectionDomain::shared_protection_domain(int index) {
}
void CDSProtectionDomain::allocate_shared_protection_domain_array(int size, TRAPS) {
if (_shared_protection_domains.resolve() == NULL) {
if (_shared_protection_domains.resolve() == nullptr) {
oop spd = oopFactory::new_objArray(
vmClasses::ProtectionDomain_klass(), size, CHECK);
_shared_protection_domains = OopHandle(Universe::vm_global(), spd);
@ -313,7 +313,7 @@ oop CDSProtectionDomain::shared_jar_url(int index) {
}
void CDSProtectionDomain::allocate_shared_jar_url_array(int size, TRAPS) {
if (_shared_jar_urls.resolve() == NULL) {
if (_shared_jar_urls.resolve() == nullptr) {
oop sju = oopFactory::new_objArray(
vmClasses::URL_klass(), size, CHECK);
_shared_jar_urls = OopHandle(Universe::vm_global(), sju);
@ -325,7 +325,7 @@ oop CDSProtectionDomain::shared_jar_manifest(int index) {
}
void CDSProtectionDomain::allocate_shared_jar_manifest_array(int size, TRAPS) {
if (_shared_jar_manifests.resolve() == NULL) {
if (_shared_jar_manifests.resolve() == nullptr) {
oop sjm = oopFactory::new_objArray(
vmClasses::Jar_Manifest_klass(), size, CHECK);
_shared_jar_manifests = OopHandle(Universe::vm_global(), sjm);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,7 +57,7 @@
"Address to allocate shared memory region for class data") \
range(0, SIZE_MAX) \
\
product(ccstr, SharedArchiveConfigFile, NULL, \
product(ccstr, SharedArchiveConfigFile, nullptr, \
"Data to add to the CDS archive file") \
\
product(uint, SharedSymbolTableBucketSize, 4, \
@ -67,25 +67,25 @@
product(bool, AllowArchivingWithJavaAgent, false, DIAGNOSTIC, \
"Allow Java agent to be run with CDS dumping") \
\
develop(ccstr, ArchiveHeapTestClass, NULL, \
develop(ccstr, ArchiveHeapTestClass, nullptr, \
"For JVM internal testing only. The static field named " \
"\"archivedObjects\" of the specified class is stored in the " \
"CDS archive heap") \
\
product(ccstr, DumpLoadedClassList, NULL, \
product(ccstr, DumpLoadedClassList, nullptr, \
"Dump the names all loaded classes, that could be stored into " \
"the CDS archive, in the specified file") \
\
product(ccstr, SharedClassListFile, NULL, \
product(ccstr, SharedClassListFile, nullptr, \
"Override the default CDS class list") \
\
product(ccstr, SharedArchiveFile, NULL, \
product(ccstr, SharedArchiveFile, nullptr, \
"Override the default location of the CDS archive file") \
\
product(ccstr, ArchiveClassesAtExit, NULL, \
product(ccstr, ArchiveClassesAtExit, nullptr, \
"The path and name of the dynamic archive file") \
\
product(ccstr, ExtraSharedClassListFile, NULL, \
product(ccstr, ExtraSharedClassListFile, nullptr, \
"Extra classlist for building the CDS archive file") \
\
product(int, ArchiveRelocationMode, 0, DIAGNOSTIC, \

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,14 +51,14 @@
#include "utilities/defaultStream.hpp"
#include "utilities/macros.hpp"
volatile Thread* ClassListParser::_parsing_thread = NULL;
ClassListParser* ClassListParser::_instance = NULL;
volatile Thread* ClassListParser::_parsing_thread = nullptr;
ClassListParser* ClassListParser::_instance = nullptr;
ClassListParser::ClassListParser(const char* file, ParseMode parse_mode) : _id2klass_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE) {
log_info(cds)("Parsing %s%s", file,
(parse_mode == _parse_lambda_forms_invokers_only) ? " (lambda form invokers only)" : "");
_classlist_file = file;
_file = NULL;
_file = nullptr;
// Use os::open() because neither fopen() nor os::fopen()
// can handle long path name on Windows.
int fd = os::open(file, O_RDONLY, S_IREAD);
@ -67,7 +67,7 @@ ClassListParser::ClassListParser(const char* file, ParseMode parse_mode) : _id2k
// can be used in parse_one_line()
_file = os::fdopen(fd, "r");
}
if (_file == NULL) {
if (_file == nullptr) {
char errmsg[JVM_MAXPATHLEN];
os::lasterror(errmsg, JVM_MAXPATHLEN);
vm_exit_during_initialization("Loading classlist failed", errmsg);
@ -78,7 +78,7 @@ ClassListParser::ClassListParser(const char* file, ParseMode parse_mode) : _id2k
_parse_mode = parse_mode;
// _instance should only be accessed by the thread that created _instance.
assert(_instance == NULL, "must be singleton");
assert(_instance == nullptr, "must be singleton");
_instance = this;
Atomic::store(&_parsing_thread, Thread::current());
}
@ -88,13 +88,13 @@ bool ClassListParser::is_parsing_thread() {
}
ClassListParser::~ClassListParser() {
if (_file != NULL) {
if (_file != nullptr) {
fclose(_file);
}
Atomic::store(&_parsing_thread, (Thread*)NULL);
Atomic::store(&_parsing_thread, (Thread*)nullptr);
delete _indy_items;
delete _interfaces;
_instance = NULL;
_instance = nullptr;
}
int ClassListParser::parse(TRAPS) {
@ -130,7 +130,7 @@ int ClassListParser::parse(TRAPS) {
ResourceMark rm(THREAD);
char* ex_msg = (char*)"";
oop message = java_lang_Throwable::message(PENDING_EXCEPTION);
if (message != NULL) {
if (message != nullptr) {
ex_msg = java_lang_String::as_utf8_string(message);
}
log_warning(cds)("%s: %s", PENDING_EXCEPTION->klass()->external_name(), ex_msg);
@ -141,7 +141,7 @@ int ClassListParser::parse(TRAPS) {
continue;
}
assert(klass != NULL, "sanity");
assert(klass != nullptr, "sanity");
if (log_is_enabled(Trace, cds)) {
ResourceMark rm(THREAD);
log_trace(cds)("Shared spaces preloaded: %s", klass->external_name());
@ -165,7 +165,7 @@ int ClassListParser::parse(TRAPS) {
bool ClassListParser::parse_one_line() {
for (;;) {
if (fgets(_line, sizeof(_line), _file) == NULL) {
if (fgets(_line, sizeof(_line), _file) == nullptr) {
return false;
}
++ _line_no;
@ -207,7 +207,7 @@ bool ClassListParser::parse_one_line() {
_id = _unspecified;
_super = _unspecified;
_interfaces->clear();
_source = NULL;
_source = nullptr;
_interfaces_specified = false;
_indy_items->clear();
_lambda_form_line = false;
@ -216,7 +216,7 @@ bool ClassListParser::parse_one_line() {
return parse_at_tags();
}
if ((_token = strchr(_line, ' ')) == NULL) {
if ((_token = strchr(_line, ' ')) == nullptr) {
// No optional arguments are specified.
return true;
}
@ -242,7 +242,7 @@ bool ClassListParser::parse_one_line() {
skip_whitespaces();
_source = _token;
char* s = strchr(_token, ' ');
if (s == NULL) {
if (s == nullptr) {
break; // end of input line
} else {
*s = '\0'; // mark the end of _source
@ -284,7 +284,7 @@ void ClassListParser::split_tokens_by_whitespace(int offset) {
int ClassListParser::split_at_tag_from_line() {
_token = _line;
char* ptr;
if ((ptr = strchr(_line, ' ')) == NULL) {
if ((ptr = strchr(_line, ' ')) == nullptr) {
error("Too few items following the @ tag \"%s\" line #%d", _line, _line_no);
return 0;
}
@ -445,7 +445,7 @@ void ClassListParser::error(const char* msg, ...) {
jio_fprintf(defaultStream::error_stream(), "^\n");
}
vm_exit_during_initialization("class list format error.", NULL);
vm_exit_during_initialization("class list format error.", nullptr);
va_end(ap);
}
@ -544,7 +544,7 @@ void ClassListParser::resolve_indy(JavaThread* current, Symbol* class_name_symbo
ResourceMark rm(current);
char* ex_msg = (char*)"";
oop message = java_lang_Throwable::message(PENDING_EXCEPTION);
if (message != NULL) {
if (message != nullptr) {
ex_msg = java_lang_String::as_utf8_string(message);
}
log_warning(cds)("resolve_indy for class %s has encountered exception: %s %s",
@ -640,7 +640,7 @@ Klass* ClassListParser::load_current_class(Symbol* class_name_symbol, TRAPS) {
assert(result.get_type() == T_OBJECT, "just checking");
oop obj = result.get_oop();
assert(obj != NULL, "jdk.internal.loader.BuiltinClassLoader::loadClass never returns null");
assert(obj != nullptr, "jdk.internal.loader.BuiltinClassLoader::loadClass never returns null");
klass = java_lang_Class::as_Klass(obj);
} else {
// If "source:" tag is specified, all super class and super interfaces must be specified in the
@ -648,7 +648,7 @@ Klass* ClassListParser::load_current_class(Symbol* class_name_symbol, TRAPS) {
klass = load_class_from_source(class_name_symbol, CHECK_NULL);
}
assert(klass != NULL, "exception should have been thrown");
assert(klass != nullptr, "exception should have been thrown");
assert(klass->is_instance_klass(), "array classes should have been filtered out");
if (is_id_specified()) {
@ -669,22 +669,22 @@ Klass* ClassListParser::load_current_class(Symbol* class_name_symbol, TRAPS) {
}
bool ClassListParser::is_loading_from_source() {
return (_source != NULL);
return (_source != nullptr);
}
InstanceKlass* ClassListParser::lookup_class_by_id(int id) {
InstanceKlass** klass_ptr = id2klass_table()->get(id);
if (klass_ptr == NULL) {
if (klass_ptr == nullptr) {
error("Class ID %d has not been defined", id);
}
assert(*klass_ptr != NULL, "must be");
assert(*klass_ptr != nullptr, "must be");
return *klass_ptr;
}
InstanceKlass* ClassListParser::lookup_super_for_current_class(Symbol* super_name) {
if (!is_loading_from_source()) {
return NULL;
return nullptr;
}
InstanceKlass* k = lookup_class_by_id(super());
@ -698,7 +698,7 @@ InstanceKlass* ClassListParser::lookup_super_for_current_class(Symbol* super_nam
InstanceKlass* ClassListParser::lookup_interface_for_current_class(Symbol* interface_name) {
if (!is_loading_from_source()) {
return NULL;
return nullptr;
}
const int n = _interfaces->length();
@ -721,5 +721,5 @@ InstanceKlass* ClassListParser::lookup_interface_for_current_class(Symbol* inter
error("The interface %s implemented by class %s does not match any of the specified interface IDs",
interface_name->as_klass_external_name(), _class_name);
ShouldNotReachHere();
return NULL;
return nullptr;
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,12 +39,12 @@ class Thread;
class CDSIndyInfo {
GrowableArray<const char*>* _items;
public:
CDSIndyInfo() : _items(NULL) {}
CDSIndyInfo() : _items(nullptr) {}
void add_item(const char* item) {
if (_items == NULL) {
if (_items == nullptr) {
_items = new GrowableArray<const char*>(9);
}
assert(_items != NULL, "sanity");
assert(_items != nullptr, "sanity");
_items->append(item);
}
void add_ref_kind(int ref_kind) {
@ -144,7 +144,7 @@ public:
static bool is_parsing_thread();
static ClassListParser* instance() {
assert(is_parsing_thread(), "call this only in the thread that created ClassListParsing::_instance");
assert(_instance != NULL, "must be");
assert(_instance != nullptr, "must be");
return _instance;
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,12 +34,12 @@
#include "oops/instanceKlass.hpp"
#include "runtime/mutexLocker.hpp"
fileStream* ClassListWriter::_classlist_file = NULL;
fileStream* ClassListWriter::_classlist_file = nullptr;
void ClassListWriter::init() {
// For -XX:DumpLoadedClassList=<file> option
if (DumpLoadedClassList != NULL) {
const char* list_name = make_log_name(DumpLoadedClassList, NULL);
if (DumpLoadedClassList != nullptr) {
const char* list_name = make_log_name(DumpLoadedClassList, nullptr);
_classlist_file = new(mtInternal)
fileStream(list_name);
_classlist_file->print_cr("# NOTE: Do not modify this file.");
@ -56,7 +56,7 @@ void ClassListWriter::write(const InstanceKlass* k, const ClassFileStream* cfs)
if (!ClassLoader::has_jrt_entry()) {
warning("DumpLoadedClassList and CDS are not supported in exploded build");
DumpLoadedClassList = NULL;
DumpLoadedClassList = nullptr;
return;
}
@ -69,12 +69,12 @@ class ClassListWriter::IDTable : public ResourceHashtable<
15889, // prime number
AnyObj::C_HEAP> {};
ClassListWriter::IDTable* ClassListWriter::_id_table = NULL;
ClassListWriter::IDTable* ClassListWriter::_id_table = nullptr;
int ClassListWriter::_total_ids = 0;
int ClassListWriter::get_id(const InstanceKlass* k) {
assert_locked();
if (_id_table == NULL) {
if (_id_table == nullptr) {
_id_table = new (mtClass)IDTable();
}
bool created;
@ -87,8 +87,8 @@ int ClassListWriter::get_id(const InstanceKlass* k) {
bool ClassListWriter::has_id(const InstanceKlass* k) {
assert_locked();
if (_id_table != NULL) {
return _id_table->get(k) != NULL;
if (_id_table != nullptr) {
return _id_table->get(k) != nullptr;
} else {
return false;
}
@ -96,7 +96,7 @@ bool ClassListWriter::has_id(const InstanceKlass* k) {
void ClassListWriter::handle_class_unloading(const InstanceKlass* klass) {
assert_locked();
if (_id_table != NULL) {
if (_id_table != nullptr) {
_id_table->remove(klass);
}
}
@ -132,7 +132,7 @@ void ClassListWriter::write_to_stream(const InstanceKlass* k, outputStream* stre
{
InstanceKlass* super = k->java_super();
if (super != NULL && !has_id(super)) {
if (super != nullptr && !has_id(super)) {
return;
}
@ -158,7 +158,7 @@ void ClassListWriter::write_to_stream(const InstanceKlass* k, outputStream* stre
stream->print("%s id: %d", k->name()->as_C_string(), get_id(k));
if (!is_builtin_loader) {
InstanceKlass* super = k->java_super();
assert(super != NULL, "must be");
assert(super != nullptr, "must be");
stream->print(" super: %d", get_id(super));
Array<InstanceKlass*>* interfaces = k->local_interfaces();
@ -185,7 +185,7 @@ void ClassListWriter::write_to_stream(const InstanceKlass* k, outputStream* stre
}
void ClassListWriter::delete_classlist() {
if (_classlist_file != NULL) {
if (_classlist_file != nullptr) {
delete _classlist_file;
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,7 +52,7 @@ public:
void handle_class_unloading(const InstanceKlass* klass);
static bool is_enabled() {
return _classlist_file != NULL && _classlist_file->is_open();
return _classlist_file != nullptr && _classlist_file->is_open();
}
#else
@ -65,7 +65,7 @@ public:
static void init() NOT_CDS_RETURN;
static void write(const InstanceKlass* k, const ClassFileStream* cfs) NOT_CDS_RETURN;
static void write_to_stream(const InstanceKlass* k, outputStream* stream, const ClassFileStream* cfs = NULL) NOT_CDS_RETURN;
static void write_to_stream(const InstanceKlass* k, outputStream* stream, const ClassFileStream* cfs = nullptr) NOT_CDS_RETURN;
static void delete_classlist() NOT_CDS_RETURN;
};

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,11 +33,11 @@
#include "oops/klass.inline.hpp"
#include "runtime/handles.inline.hpp"
ClassPrelinker::ClassesTable* ClassPrelinker::_processed_classes = NULL;
ClassPrelinker::ClassesTable* ClassPrelinker::_vm_classes = NULL;
ClassPrelinker::ClassesTable* ClassPrelinker::_processed_classes = nullptr;
ClassPrelinker::ClassesTable* ClassPrelinker::_vm_classes = nullptr;
bool ClassPrelinker::is_vm_class(InstanceKlass* ik) {
return (_vm_classes->get(ik) != NULL);
return (_vm_classes->get(ik) != nullptr);
}
void ClassPrelinker::add_one_vm_class(InstanceKlass* ik) {
@ -45,7 +45,7 @@ void ClassPrelinker::add_one_vm_class(InstanceKlass* ik) {
_vm_classes->put_if_absent(ik, &created);
if (created) {
InstanceKlass* super = ik->java_super();
if (super != NULL) {
if (super != nullptr) {
add_one_vm_class(super);
}
Array<InstanceKlass*>* ifs = ik->local_interfaces();
@ -56,7 +56,7 @@ void ClassPrelinker::add_one_vm_class(InstanceKlass* ik) {
}
void ClassPrelinker::initialize() {
assert(_vm_classes == NULL, "must be");
assert(_vm_classes == nullptr, "must be");
_vm_classes = new (mtClass)ClassesTable();
_processed_classes = new (mtClass)ClassesTable();
for (auto id : EnumRange<vmClassID>{}) {
@ -65,11 +65,11 @@ void ClassPrelinker::initialize() {
}
void ClassPrelinker::dispose() {
assert(_vm_classes != NULL, "must be");
assert(_vm_classes != nullptr, "must be");
delete _vm_classes;
delete _processed_classes;
_vm_classes = NULL;
_processed_classes = NULL;
_vm_classes = nullptr;
_processed_classes = nullptr;
}
bool ClassPrelinker::can_archive_resolved_klass(ConstantPool* cp, int cp_index) {
@ -77,7 +77,7 @@ bool ClassPrelinker::can_archive_resolved_klass(ConstantPool* cp, int cp_index)
assert(cp->tag_at(cp_index).is_klass(), "must be resolved");
Klass* resolved_klass = cp->resolved_klass_at(cp_index);
assert(resolved_klass != NULL, "must be");
assert(resolved_klass != nullptr, "must be");
return can_archive_resolved_klass(cp->pool_holder(), resolved_klass);
}
@ -115,8 +115,8 @@ bool ClassPrelinker::can_archive_resolved_klass(InstanceKlass* cp_holder, Klass*
void ClassPrelinker::dumptime_resolve_constants(InstanceKlass* ik, TRAPS) {
constantPoolHandle cp(THREAD, ik->constants());
if (cp->cache() == NULL || cp->reference_map() == NULL) {
// The cache may be NULL if the pool_holder klass fails verification
if (cp->cache() == nullptr || cp->reference_map() == nullptr) {
// The cache may be null if the pool_holder klass fails verification
// at dump time due to missing dependencies.
return;
}
@ -146,16 +146,16 @@ Klass* ClassPrelinker::find_loaded_class(JavaThread* THREAD, oop class_loader, S
Klass* k = SystemDictionary::find_instance_or_array_klass(THREAD, name,
h_loader,
Handle());
if (k != NULL) {
if (k != nullptr) {
return k;
}
if (class_loader == SystemDictionary::java_system_loader()) {
return find_loaded_class(THREAD, SystemDictionary::java_platform_loader(), name);
} else if (class_loader == SystemDictionary::java_platform_loader()) {
return find_loaded_class(THREAD, NULL, name);
return find_loaded_class(THREAD, nullptr, name);
}
return NULL;
return nullptr;
}
Klass* ClassPrelinker::maybe_resolve_class(constantPoolHandle cp, int cp_index, TRAPS) {
@ -166,12 +166,12 @@ Klass* ClassPrelinker::maybe_resolve_class(constantPoolHandle cp, int cp_index,
!cp_holder->is_shared_app_class()) {
// Don't trust custom loaders, as they may not be well-behaved
// when resolving classes.
return NULL;
return nullptr;
}
Symbol* name = cp->klass_name_at(cp_index);
Klass* resolved_klass = find_loaded_class(THREAD, cp_holder->class_loader(), name);
if (resolved_klass != NULL && can_archive_resolved_klass(cp_holder, resolved_klass)) {
if (resolved_klass != nullptr && can_archive_resolved_klass(cp_holder, resolved_klass)) {
Klass* k = cp->klass_at(cp_index, CHECK_NULL); // Should fail only with OOM
assert(k == resolved_klass, "must be");
}
@ -193,7 +193,7 @@ void ClassPrelinker::resolve_string(constantPoolHandle cp, int cp_index, TRAPS)
#ifdef ASSERT
bool ClassPrelinker::is_in_archivebuilder_buffer(address p) {
if (!Thread::current()->is_VM_thread() || ArchiveBuilder::current() == NULL) {
if (!Thread::current()->is_VM_thread() || ArchiveBuilder::current() == nullptr) {
return false;
} else {
return ArchiveBuilder::current()->is_in_buffer_space(p);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -126,7 +126,7 @@ void CppVtableCloner<T>::initialize(const char* name, CppVtableInfo* info) {
// trick by declaring 2 subclasses:
//
// class CppVtableTesterA: public InstanceKlass {virtual int last_virtual_method() {return 1;} };
// class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; };
// class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return nullptr}; };
//
// CppVtableTesterA and CppVtableTesterB's vtables have the following properties:
// - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N)
@ -149,7 +149,7 @@ public:
virtual void* last_virtual_method() {
// Make this different than CppVtableTesterB::last_virtual_method so the C++
// compiler/linker won't alias the two functions.
return NULL;
return nullptr;
}
};
@ -212,7 +212,7 @@ void CppVtableCloner<T>::init_orig_cpp_vtptr(int kind) {
// the following holds true:
// _index[ConstantPool_Kind]->cloned_vtable() == ((intptr_t**)cp)[0]
// _index[InstanceKlass_Kind]->cloned_vtable() == ((intptr_t**)ik)[0]
CppVtableInfo** CppVtables::_index = NULL;
CppVtableInfo** CppVtables::_index = nullptr;
char* CppVtables::dumptime_init(ArchiveBuilder* builder) {
assert(DumpSharedSpaces, "must");
@ -277,7 +277,7 @@ intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address ob
assert(kind < _num_cloned_vtable_kinds, "must be");
return _index[kind]->cloned_vtable();
} else {
return NULL;
return nullptr;
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ public:
SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
default:
ShouldNotReachHere();
return NULL;
return nullptr;
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,12 +46,12 @@ DumpTimeClassInfo::DumpTimeClassInfo(const DumpTimeClassInfo& src) {
_clsfile_crc32 = src._clsfile_crc32;
_excluded = src._excluded;
_is_early_klass = src._is_early_klass;
_verifier_constraints = NULL;
_verifier_constraint_flags = NULL;
_loader_constraints = NULL;
_verifier_constraints = nullptr;
_verifier_constraint_flags = nullptr;
_loader_constraints = nullptr;
assert(src._enum_klass_static_fields == NULL, "This should not happen with dynamic dump.");
_enum_klass_static_fields = NULL;
assert(src._enum_klass_static_fields == nullptr, "This should not happen with dynamic dump.");
_enum_klass_static_fields = nullptr;
{
int n = src.num_verifier_constraints();
@ -77,12 +77,12 @@ DumpTimeClassInfo::DumpTimeClassInfo(const DumpTimeClassInfo& src) {
}
DumpTimeClassInfo::~DumpTimeClassInfo() {
if (_verifier_constraints != NULL) {
assert(_verifier_constraint_flags != NULL, "must be");
if (_verifier_constraints != nullptr) {
assert(_verifier_constraint_flags != nullptr, "must be");
delete _verifier_constraints;
delete _verifier_constraint_flags;
}
if (_loader_constraints != NULL) {
if (_loader_constraints != nullptr) {
delete _loader_constraints;
}
}
@ -95,10 +95,10 @@ size_t DumpTimeClassInfo::runtime_info_bytesize() const {
void DumpTimeClassInfo::add_verification_constraint(InstanceKlass* k, Symbol* name,
Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object) {
if (_verifier_constraints == NULL) {
if (_verifier_constraints == nullptr) {
_verifier_constraints = new (mtClass) GrowableArray<DTVerifierConstraint>(4, mtClass);
}
if (_verifier_constraint_flags == NULL) {
if (_verifier_constraint_flags == nullptr) {
_verifier_constraint_flags = new (mtClass) GrowableArray<char>(4, mtClass);
}
GrowableArray<DTVerifierConstraint>* vc_array = _verifier_constraints;
@ -140,7 +140,7 @@ static char get_loader_type_by(oop loader) {
void DumpTimeClassInfo::record_linking_constraint(Symbol* name, Handle loader1, Handle loader2) {
assert(loader1 != loader2, "sanity");
LogTarget(Info, class, loader, constraints) log;
if (_loader_constraints == NULL) {
if (_loader_constraints == nullptr) {
_loader_constraints = new (mtClass) GrowableArray<DTLoaderConstraint>(4, mtClass);
}
char lt1 = get_loader_type_by(loader1());
@ -172,14 +172,14 @@ void DumpTimeClassInfo::record_linking_constraint(Symbol* name, Handle loader1,
}
void DumpTimeClassInfo::add_enum_klass_static_field(int archived_heap_root_index) {
if (_enum_klass_static_fields == NULL) {
if (_enum_klass_static_fields == nullptr) {
_enum_klass_static_fields = new (mtClass) GrowableArray<int>(20, mtClass);
}
_enum_klass_static_fields->append(archived_heap_root_index);
}
int DumpTimeClassInfo::enum_klass_static_field(int which_field) {
assert(_enum_klass_static_fields != NULL, "must be");
assert(_enum_klass_static_fields != nullptr, "must be");
return _enum_klass_static_fields->at(which_field);
}
@ -199,7 +199,7 @@ DumpTimeClassInfo* DumpTimeSharedClassTable::allocate_info(InstanceKlass* k) {
DumpTimeClassInfo* DumpTimeSharedClassTable::get_info(InstanceKlass* k) {
assert(!k->is_shared(), "Do not call with shared classes");
DumpTimeClassInfo* p = get(k);
assert(p != NULL, "we must not see any non-shared InstanceKlass* that's "
assert(p != nullptr, "we must not see any non-shared InstanceKlass* that's "
"not stored with SystemDictionaryShared::init_dumptime_info");
assert(p->_klass == k, "Sanity");
return p;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,7 +46,7 @@ class DumpTimeClassInfo: public CHeapObj<mtClass> {
char _loader_type1;
char _loader_type2;
public:
DTLoaderConstraint() : _name(NULL), _loader_type1('0'), _loader_type2('0') {}
DTLoaderConstraint() : _name(nullptr), _loader_type1('0'), _loader_type2('0') {}
DTLoaderConstraint(Symbol* name, char l1, char l2) : _name(name), _loader_type1(l1), _loader_type2(l2) {
Symbol::maybe_increment_refcount(_name);
}
@ -84,7 +84,7 @@ class DumpTimeClassInfo: public CHeapObj<mtClass> {
Symbol* _name;
Symbol* _from_name;
public:
DTVerifierConstraint() : _name(NULL), _from_name(NULL) {}
DTVerifierConstraint() : _name(nullptr), _from_name(nullptr) {}
DTVerifierConstraint(Symbol* n, Symbol* fn) : _name(n), _from_name(fn) {
Symbol::maybe_increment_refcount(_name);
Symbol::maybe_increment_refcount(_from_name);
@ -130,8 +130,8 @@ public:
GrowableArray<int>* _enum_klass_static_fields;
DumpTimeClassInfo() {
_klass = NULL;
_nest_host = NULL;
_klass = nullptr;
_nest_host = nullptr;
_failed_verification = false;
_is_archived_lambda_proxy = false;
_has_checked_exclusion = false;
@ -140,10 +140,10 @@ public:
_clsfile_crc32 = -1;
_excluded = false;
_is_early_klass = JvmtiExport::is_early_phase();
_verifier_constraints = NULL;
_verifier_constraint_flags = NULL;
_loader_constraints = NULL;
_enum_klass_static_fields = NULL;
_verifier_constraints = nullptr;
_verifier_constraint_flags = nullptr;
_loader_constraints = nullptr;
_enum_klass_static_fields = nullptr;
}
DumpTimeClassInfo(const DumpTimeClassInfo& src);
DumpTimeClassInfo& operator=(const DumpTimeClassInfo&) = delete;
@ -159,7 +159,7 @@ public:
private:
template <typename T>
static int array_length_or_zero(GrowableArray<T>* array) {
if (array == NULL) {
if (array == nullptr) {
return 0;
} else {
return array->length();
@ -183,12 +183,12 @@ public:
void metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_klass);
it->push(&_nest_host);
if (_verifier_constraints != NULL) {
if (_verifier_constraints != nullptr) {
for (int i = 0; i < _verifier_constraints->length(); i++) {
_verifier_constraints->adr_at(i)->metaspace_pointers_do(it);
}
}
if (_loader_constraints != NULL) {
if (_loader_constraints != nullptr) {
for (int i = 0; i < _loader_constraints->length(); i++) {
_loader_constraints->adr_at(i)->metaspace_pointers_do(it);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -201,10 +201,10 @@ void DynamicArchiveBuilder::release_header() {
// bad will happen.
assert(SafepointSynchronize::is_at_safepoint(), "must be");
FileMapInfo *mapinfo = FileMapInfo::dynamic_info();
assert(mapinfo != NULL && _header == mapinfo->dynamic_header(), "must be");
assert(mapinfo != nullptr && _header == mapinfo->dynamic_header(), "must be");
delete mapinfo;
assert(!DynamicArchive::is_mapped(), "must be");
_header = NULL;
_header = nullptr;
}
void DynamicArchiveBuilder::post_dump() {
@ -225,14 +225,14 @@ void DynamicArchiveBuilder::sort_methods() {
// The address order of the copied Symbols may be different than when the original
// klasses were created. Re-sort all the tables. See Method::sort_methods().
void DynamicArchiveBuilder::sort_methods(InstanceKlass* ik) const {
assert(ik != NULL, "DynamicArchiveBuilder currently doesn't support dumping the base archive");
assert(ik != nullptr, "DynamicArchiveBuilder currently doesn't support dumping the base archive");
if (MetaspaceShared::is_in_shared_metaspace(ik)) {
// We have reached a supertype that's already in the base archive
return;
}
if (ik->java_mirror() == NULL) {
// NULL mirror means this class has already been visited and methods are already sorted
if (ik->java_mirror() == nullptr) {
// null mirror means this class has already been visited and methods are already sorted
return;
}
ik->remove_java_mirror();
@ -257,13 +257,13 @@ void DynamicArchiveBuilder::sort_methods(InstanceKlass* ik) const {
}
#ifdef ASSERT
if (ik->methods() != NULL) {
if (ik->methods() != nullptr) {
for (int m = 0; m < ik->methods()->length(); m++) {
Symbol* name = ik->methods()->at(m)->name();
assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be");
}
}
if (ik->default_methods() != NULL) {
if (ik->default_methods() != nullptr) {
for (int m = 0; m < ik->default_methods()->length(); m++) {
Symbol* name = ik->default_methods()->at(m)->name();
assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be");
@ -272,7 +272,7 @@ void DynamicArchiveBuilder::sort_methods(InstanceKlass* ik) const {
#endif
Method::sort_methods(ik->methods(), /*set_idnums=*/true, dynamic_dump_method_comparator);
if (ik->default_methods() != NULL) {
if (ik->default_methods() != nullptr) {
Method::sort_methods(ik->default_methods(), /*set_idnums=*/false, dynamic_dump_method_comparator);
}
if (ik->is_linked()) {
@ -320,10 +320,10 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data) {
_header->set_serialized_data(serialized_data);
FileMapInfo* dynamic_info = FileMapInfo::dynamic_info();
assert(dynamic_info != NULL, "Sanity");
assert(dynamic_info != nullptr, "Sanity");
dynamic_info->open_for_write();
ArchiveBuilder::write_archive(dynamic_info, NULL, NULL, NULL, NULL);
ArchiveBuilder::write_archive(dynamic_info, nullptr, nullptr, nullptr, nullptr);
address base = _requested_dynamic_archive_bottom;
address top = _requested_dynamic_archive_top;
@ -367,7 +367,7 @@ void DynamicArchive::check_for_dynamic_dump() {
#define __THEMSG " is unsupported when base CDS archive is not loaded. Run with -Xlog:cds for more info."
if (RecordDynamicDumpInfo) {
vm_exit_during_initialization("-XX:+RecordDynamicDumpInfo" __THEMSG, NULL);
vm_exit_during_initialization("-XX:+RecordDynamicDumpInfo" __THEMSG, nullptr);
} else {
assert(ArchiveClassesAtExit != nullptr, "sanity");
warning("-XX:ArchiveClassesAtExit" __THEMSG);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,7 +63,7 @@ public:
static void prepare_for_dump_at_exit();
static void dump_for_jcmd(const char* archive_name, TRAPS);
static void dump(const char* archive_name, TRAPS);
static bool is_mapped() { return FileMapInfo::dynamic_info() != NULL; }
static bool is_mapped() { return FileMapInfo::dynamic_info() != nullptr; }
static bool validate(FileMapInfo* dynamic_info);
};
#endif // INCLUDE_CDS

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -91,7 +91,7 @@ static void fail_exit(const char *msg, va_list ap) {
jio_vfprintf(defaultStream::error_stream(), msg, ap);
jio_fprintf(defaultStream::error_stream(), "\n");
// Do not change the text of the below message because some tests check for it.
vm_exit_during_initialization("Unable to use shared archive.", NULL);
vm_exit_during_initialization("Unable to use shared archive.", nullptr);
}
@ -180,10 +180,10 @@ FileMapInfo::FileMapInfo(const char* full_path, bool is_static) :
_is_static(is_static), _file_open(false), _is_mapped(false), _fd(-1), _file_offset(0),
_full_path(full_path), _base_archive_name(nullptr), _header(nullptr) {
if (_is_static) {
assert(_current_info == NULL, "must be singleton"); // not thread safe
assert(_current_info == nullptr, "must be singleton"); // not thread safe
_current_info = this;
} else {
assert(_dynamic_archive_info == NULL, "must be singleton"); // not thread safe
assert(_dynamic_archive_info == nullptr, "must be singleton"); // not thread safe
_dynamic_archive_info = this;
}
}
@ -191,10 +191,10 @@ FileMapInfo::FileMapInfo(const char* full_path, bool is_static) :
FileMapInfo::~FileMapInfo() {
if (_is_static) {
assert(_current_info == this, "must be singleton"); // not thread safe
_current_info = NULL;
_current_info = nullptr;
} else {
assert(_dynamic_archive_info == this, "must be singleton"); // not thread safe
_dynamic_archive_info = NULL;
_dynamic_archive_info = nullptr;
}
if (_header != nullptr) {
@ -207,7 +207,7 @@ FileMapInfo::~FileMapInfo() {
}
void FileMapInfo::populate_header(size_t core_region_alignment) {
assert(_header == NULL, "Sanity check");
assert(_header == nullptr, "Sanity check");
size_t c_header_size;
size_t header_size;
size_t base_archive_name_size = 0;
@ -428,7 +428,7 @@ void SharedClassPathEntry::copy_from(SharedClassPathEntry* ent, ClassLoaderData*
_from_class_path_attr = ent->_from_class_path_attr;
set_name(ent->name(), CHECK);
if (ent->is_jar() && !ent->is_signed() && ent->manifest() != NULL) {
if (ent->is_jar() && !ent->is_signed() && ent->manifest() != nullptr) {
Array<u1>* buf = MetadataFactory::new_array<u1>(loader_data,
ent->manifest_size(),
CHECK);
@ -561,12 +561,12 @@ void FileMapInfo::clone_shared_path_table(TRAPS) {
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
ClassPathEntry* jrt = ClassLoader::get_jrt_entry();
assert(jrt != NULL,
assert(jrt != nullptr,
"No modular java runtime image present when allocating the CDS classpath entry table");
if (_saved_shared_path_table_array != NULL) {
if (_saved_shared_path_table_array != nullptr) {
MetadataFactory::free_array<u8>(loader_data, _saved_shared_path_table_array);
_saved_shared_path_table_array = NULL;
_saved_shared_path_table_array = nullptr;
}
copy_shared_path_table(loader_data, CHECK);
@ -578,7 +578,7 @@ void FileMapInfo::allocate_shared_path_table(TRAPS) {
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
ClassPathEntry* jrt = ClassLoader::get_jrt_entry();
assert(jrt != NULL,
assert(jrt != nullptr,
"No modular java runtime image present when allocating the CDS classpath entry table");
_shared_path_table.dumptime_init(loader_data, CHECK);
@ -599,7 +599,7 @@ void FileMapInfo::allocate_shared_path_table(TRAPS) {
}
int FileMapInfo::add_shared_classpaths(int i, const char* which, ClassPathEntry *cpe, TRAPS) {
while (cpe != NULL) {
while (cpe != nullptr) {
bool is_jrt = (cpe == ClassLoader::get_jrt_entry());
bool is_module_path = i >= ClassLoaderExt::app_module_paths_start_index();
const char* type = (is_jrt ? "jrt" : (cpe->is_jar_file() ? "jar" : "dir"));
@ -643,14 +643,14 @@ void FileMapInfo::check_nonempty_dir_in_shared_path_table() {
}
if (has_nonempty_dir) {
ClassLoader::exit_with_path_failure("Cannot have non-empty directory in paths", NULL);
ClassLoader::exit_with_path_failure("Cannot have non-empty directory in paths", nullptr);
}
}
void FileMapInfo::record_non_existent_class_path_entry(const char* path) {
Arguments::assert_is_dumping_archive();
log_info(class, path)("non-existent Class-Path entry %s", path);
if (_non_existent_class_paths == NULL) {
if (_non_existent_class_paths == nullptr) {
_non_existent_class_paths = new (mtClass) GrowableArray<const char*>(10, mtClass);
}
_non_existent_class_paths->append(os::strdup(path));
@ -658,7 +658,7 @@ void FileMapInfo::record_non_existent_class_path_entry(const char* path) {
int FileMapInfo::num_non_existent_class_paths() {
Arguments::assert_is_dumping_archive();
if (_non_existent_class_paths != NULL) {
if (_non_existent_class_paths != nullptr) {
return _non_existent_class_paths->length();
} else {
return 0;
@ -729,9 +729,9 @@ class ManifestStream: public ResourceObj {
if (*_current == '\n') {
*_current = '\0';
u1* value = (u1*)strchr((char*)attr, ':');
if (value != NULL) {
if (value != nullptr) {
assert(*(value+1) == ' ', "Unrecognized format" );
if (strstr((char*)attr, "-Digest") != NULL) {
if (strstr((char*)attr, "-Digest") != nullptr) {
isSigned = true;
break;
}
@ -752,7 +752,7 @@ void FileMapInfo::update_jar_manifest(ClassPathEntry *cpe, SharedClassPathEntry*
assert(cpe->is_jar_file() && ent->is_jar(), "the shared class path entry is not a JAR file");
char* manifest = ClassLoaderExt::read_manifest(THREAD, cpe, &manifest_size);
if (manifest != NULL) {
if (manifest != nullptr) {
ManifestStream* stream = new ManifestStream((u1*)manifest,
manifest_size);
if (stream->check_is_signed()) {
@ -773,7 +773,7 @@ void FileMapInfo::update_jar_manifest(ClassPathEntry *cpe, SharedClassPathEntry*
char* FileMapInfo::skip_first_path_entry(const char* path) {
size_t path_sep_len = strlen(os::path_separator());
char* p = strstr((char*)path, os::path_separator());
if (p != NULL) {
if (p != nullptr) {
debug_only( {
size_t image_name_len = strlen(MODULES_IMAGE_NAME);
assert(strncmp(p - image_name_len, MODULES_IMAGE_NAME, image_name_len) == 0,
@ -790,15 +790,15 @@ char* FileMapInfo::skip_first_path_entry(const char* path) {
}
int FileMapInfo::num_paths(const char* path) {
if (path == NULL) {
if (path == nullptr) {
return 0;
}
int npaths = 1;
char* p = (char*)path;
while (p != NULL) {
while (p != nullptr) {
char* prev = p;
p = strstr((char*)p, os::path_separator());
if (p != NULL) {
if (p != nullptr) {
p++;
// don't count empty path
if ((p - prev) > 1) {
@ -828,7 +828,7 @@ GrowableArray<const char*>* FileMapInfo::create_dumptime_app_classpath_array() {
Arguments::assert_is_dumping_archive();
GrowableArray<const char*>* path_array = new GrowableArray<const char*>(10);
ClassPathEntry* cpe = ClassLoader::app_classpath_entries();
while (cpe != NULL) {
while (cpe != nullptr) {
path_array->append(cpe->name());
cpe = cpe->next();
}
@ -849,10 +849,10 @@ GrowableArray<const char*>* FileMapInfo::create_path_array(const char* paths) {
}
} else {
const char* canonical_path = ClassLoader::get_canonical_path(path, current);
if (canonical_path != NULL) {
char* error_msg = NULL;
if (canonical_path != nullptr) {
char* error_msg = nullptr;
jzfile* zip = ClassLoader::open_zip_file(canonical_path, &error_msg, current);
if (zip != NULL && error_msg == NULL) {
if (zip != nullptr && error_msg == nullptr) {
path_array->append(path);
}
}
@ -939,9 +939,9 @@ bool FileMapInfo::validate_boot_class_paths() {
bool mismatch = false;
bool relaxed_check = !header()->has_platform_or_app_classes();
if (dp_len == 0 && rp == NULL) {
if (dp_len == 0 && rp == nullptr) {
return true; // ok, both runtime and dump time boot paths have modules_images only
} else if (dp_len == 0 && rp != NULL) {
} else if (dp_len == 0 && rp != nullptr) {
if (relaxed_check) {
return true; // ok, relaxed check, runtime has extra boot append path entries
} else {
@ -952,7 +952,7 @@ bool FileMapInfo::validate_boot_class_paths() {
mismatch = true;
}
}
} else if (dp_len > 0 && rp != NULL) {
} else if (dp_len > 0 && rp != nullptr) {
int num;
ResourceMark rm;
GrowableArray<const char*>* rp_array = create_path_array(rp);
@ -984,7 +984,7 @@ bool FileMapInfo::validate_boot_class_paths() {
bool FileMapInfo::validate_app_class_paths(int shared_app_paths_len) {
const char *appcp = Arguments::get_appclasspath();
assert(appcp != NULL, "NULL app classpath");
assert(appcp != nullptr, "null app classpath");
int rp_len = num_paths(appcp);
bool mismatch = false;
if (rp_len < shared_app_paths_len) {
@ -1109,7 +1109,7 @@ bool FileMapInfo::validate_shared_path_table() {
}
log_info(class, path)("ok");
} else {
if (_dynamic_archive_info != NULL && _dynamic_archive_info->_is_static) {
if (_dynamic_archive_info != nullptr && _dynamic_archive_info->_is_static) {
assert(!UseSharedSpaces, "UseSharedSpaces should be disabled");
}
return false;
@ -1118,7 +1118,7 @@ bool FileMapInfo::validate_shared_path_table() {
if (shared_path(i)->validate(false /* not a class path entry */)) {
log_info(class, path)("ok");
} else {
if (_dynamic_archive_info != NULL && _dynamic_archive_info->_is_static) {
if (_dynamic_archive_info != nullptr && _dynamic_archive_info->_is_static) {
assert(!UseSharedSpaces, "UseSharedSpaces should be disabled");
}
return false;
@ -1144,7 +1144,7 @@ bool FileMapInfo::validate_shared_path_table() {
_validating_shared_path_table = false;
#if INCLUDE_JVMTI
if (_classpath_entries_for_jvmti != NULL) {
if (_classpath_entries_for_jvmti != nullptr) {
os::free(_classpath_entries_for_jvmti);
}
size_t sz = sizeof(ClassPathEntry*) * get_number_of_shared_paths();
@ -1209,7 +1209,7 @@ public:
}
bool initialize() {
assert(_archive_name != nullptr, "Archive name is NULL");
assert(_archive_name != nullptr, "Archive name is null");
_fd = os::open(_archive_name, O_RDONLY | O_BINARY, 0);
if (_fd < 0) {
FileMapInfo::fail_continue("Specified shared archive not found (%s)", _archive_name);
@ -1220,7 +1220,7 @@ public:
// for an already opened file, do not set _fd
bool initialize(int fd) {
assert(_archive_name != nullptr, "Archive name is NULL");
assert(_archive_name != nullptr, "Archive name is null");
assert(fd != -1, "Archive must be opened already");
// First read the generic header so we know the exact size of the actual header.
GenericCDSFileMapHeader gen_header;
@ -1358,14 +1358,14 @@ public:
// Return value:
// false:
// <archive_name> is not a valid archive. *base_archive_name is set to null.
// true && (*base_archive_name) == NULL:
// true && (*base_archive_name) == nullptr:
// <archive_name> is a valid static archive.
// true && (*base_archive_name) != NULL:
// true && (*base_archive_name) != nullptr:
// <archive_name> is a valid dynamic archive.
bool FileMapInfo::get_base_archive_name_from_header(const char* archive_name,
char** base_archive_name) {
FileHeaderHelper file_helper(archive_name, false);
*base_archive_name = NULL;
*base_archive_name = nullptr;
if (!file_helper.initialize()) {
return false;
@ -1565,7 +1565,7 @@ void FileMapRegion::init(int region_index, size_t mapping_offset, size_t size, b
_allow_exec = allow_exec;
_crc = crc;
_mapped_from_file = false;
_mapped_base = NULL;
_mapped_base = nullptr;
}
void FileMapRegion::init_bitmaps(ArchiveHeapBitmapInfo oopmap, ArchiveHeapBitmapInfo ptrmap) {
@ -1627,10 +1627,10 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
size_t mapping_offset = 0;
if (region == MetaspaceShared::bm) {
requested_base = NULL; // always NULL for bm region
requested_base = nullptr; // always null for bm region
} else if (size == 0) {
// This is an unused region (e.g., a heap region when !INCLUDE_CDS_JAVA_HEAP)
requested_base = NULL;
requested_base = nullptr;
} else if (HeapShared::is_heap_region(region)) {
assert(!DynamicDumpSharedSpaces, "must be");
requested_base = base;
@ -1660,7 +1660,7 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
r->init(region, mapping_offset, size, read_only, allow_exec, crc);
if (base != NULL) {
if (base != nullptr) {
write_bytes_aligned(base, size);
}
}
@ -1688,7 +1688,7 @@ char* FileMapInfo::write_bitmap_region(const CHeapBitMap* ptrmap,
size_t size_in_bits = ptrmap->size();
size_in_bytes = ptrmap->size_in_bytes();
if (closed_bitmaps != NULL && open_bitmaps != NULL) {
if (closed_bitmaps != nullptr && open_bitmaps != nullptr) {
size_in_bytes = set_bitmaps_offset(closed_bitmaps, size_in_bytes);
size_in_bytes = set_bitmaps_offset(open_bitmaps, size_in_bytes);
}
@ -1697,7 +1697,7 @@ char* FileMapInfo::write_bitmap_region(const CHeapBitMap* ptrmap,
ptrmap->write_to((BitMap::bm_word_t*)buffer, ptrmap->size_in_bytes());
header()->set_ptrmap_size_in_bits(size_in_bits);
if (closed_bitmaps != NULL && open_bitmaps != NULL) {
if (closed_bitmaps != nullptr && open_bitmaps != nullptr) {
size_t curr_offset = write_bitmaps(closed_bitmaps, ptrmap->size_in_bytes(), buffer);
write_bitmaps(open_bitmaps, curr_offset, buffer);
}
@ -1742,7 +1742,7 @@ size_t FileMapInfo::write_heap_regions(GrowableArray<MemRegion>* regions,
int first_region_id, int max_num_regions) {
assert(max_num_regions <= 2, "Only support maximum 2 memory regions");
int arr_len = regions == NULL ? 0 : regions->length();
int arr_len = regions == nullptr ? 0 : regions->length();
if (arr_len > max_num_regions) {
fail_stop("Unable to write archive heap memory regions: "
"number of memory regions exceeds maximum due to fragmentation. "
@ -1753,7 +1753,7 @@ size_t FileMapInfo::write_heap_regions(GrowableArray<MemRegion>* regions,
size_t total_size = 0;
for (int i = 0; i < max_num_regions; i++) {
char* start = NULL;
char* start = nullptr;
size_t size = 0;
if (i < arr_len) {
start = (char*)regions->at(i).start();
@ -1850,7 +1850,7 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
r->allow_exec());
close();
// These have to be errors because the shared region is now unmapped.
if (base == NULL) {
if (base == nullptr) {
log_error(cds)("Unable to remap shared readonly space (errno=%d).", errno);
vm_exit(1);
}
@ -1867,7 +1867,7 @@ static const char* shared_region_name[] = { "ReadWrite", "ReadOnly", "Bitmap",
"String1", "String2", "OpenArchive1", "OpenArchive2" };
MapArchiveResult FileMapInfo::map_regions(int regions[], int num_regions, char* mapped_base_address, ReservedSpace rs) {
DEBUG_ONLY(FileMapRegion* last_region = NULL);
DEBUG_ONLY(FileMapRegion* last_region = nullptr);
intx addr_delta = mapped_base_address - header()->requested_base_address();
// Make sure we don't attempt to use header()->mapped_base_address() unless
@ -1881,7 +1881,7 @@ MapArchiveResult FileMapInfo::map_regions(int regions[], int num_regions, char*
return result;
}
FileMapRegion* r = region_at(idx);
DEBUG_ONLY(if (last_region != NULL) {
DEBUG_ONLY(if (last_region != nullptr) {
// Ensure that the OS won't be able to allocate new memory spaces between any mapped
// regions, or else it would mess up the simple comparison in MetaspaceObj::is_shared().
assert(r->mapped_base() == last_region->mapped_end(), "must have no gaps");
@ -1930,8 +1930,8 @@ MapArchiveResult FileMapInfo::map_region(int i, intx addr_delta, char* mapped_ba
FileMapRegion* r = region_at(i);
size_t size = r->used_aligned();
char *requested_addr = mapped_base_address + r->mapping_offset();
assert(r->mapped_base() == NULL, "must be not mapped yet");
assert(requested_addr != NULL, "must be specified");
assert(r->mapped_base() == nullptr, "must be not mapped yet");
assert(requested_addr != nullptr, "must be specified");
r->set_mapped_from_file(false);
@ -1984,16 +1984,16 @@ MapArchiveResult FileMapInfo::map_region(int i, intx addr_delta, char* mapped_ba
// The return value is the location of the archive relocation bitmap.
char* FileMapInfo::map_bitmap_region() {
FileMapRegion* r = region_at(MetaspaceShared::bm);
if (r->mapped_base() != NULL) {
if (r->mapped_base() != nullptr) {
return r->mapped_base();
}
bool read_only = true, allow_exec = false;
char* requested_addr = NULL; // allow OS to pick any location
char* requested_addr = nullptr; // allow OS to pick any location
char* bitmap_base = os::map_memory(_fd, _full_path, r->file_offset(),
requested_addr, r->used_aligned(), read_only, allow_exec, mtClassShared);
if (bitmap_base == NULL) {
if (bitmap_base == nullptr) {
log_info(cds)("failed to map relocation bitmap");
return NULL;
return nullptr;
}
if (VerifySharedSpaces && !region_crc_check(bitmap_base, r->used(), r->crc())) {
@ -2001,7 +2001,7 @@ char* FileMapInfo::map_bitmap_region() {
if (!os::unmap_memory(bitmap_base, r->used_aligned())) {
fatal("os::unmap_memory of relocation bitmap failed");
}
return NULL;
return nullptr;
}
r->set_mapped_base(bitmap_base);
@ -2019,7 +2019,7 @@ bool FileMapInfo::relocate_pointers_in_core_regions(intx addr_delta) {
log_debug(cds, reloc)("runtime archive relocation start");
char* bitmap_base = map_bitmap_region();
if (bitmap_base == NULL) {
if (bitmap_base == nullptr) {
return false; // OOM, or CRC check failure
} else {
size_t ptrmap_size_in_bits = header()->ptrmap_size_in_bits();
@ -2080,8 +2080,8 @@ size_t FileMapInfo::readonly_total() {
return total;
}
static MemRegion *closed_heap_regions = NULL;
static MemRegion *open_heap_regions = NULL;
static MemRegion *closed_heap_regions = nullptr;
static MemRegion *open_heap_regions = nullptr;
static int num_closed_heap_regions = 0;
static int num_open_heap_regions = 0;
@ -2096,7 +2096,7 @@ bool FileMapInfo::has_heap_regions() {
// if/how these regions should be relocated at run time.
MemRegion FileMapInfo::get_heap_regions_requested_range() {
address start = (address) max_uintx;
address end = NULL;
address end = nullptr;
for (int i = MetaspaceShared::first_closed_heap_region;
i <= MetaspaceShared::last_valid_region;
@ -2116,7 +2116,7 @@ MemRegion FileMapInfo::get_heap_regions_requested_range() {
}
}
}
assert(end != NULL, "must have at least one used heap region");
assert(end != nullptr, "must have at least one used heap region");
start = align_down(start, HeapRegion::GrainBytes);
end = align_up(end, HeapRegion::GrainBytes);
@ -2319,7 +2319,7 @@ void FileMapInfo::map_heap_regions_impl() {
if (_heap_pointers_need_patching) {
char* bitmap_base = map_bitmap_region();
if (bitmap_base == NULL) {
if (bitmap_base == nullptr) {
log_info(cds)("CDS heap cannot be used because bitmap region cannot be mapped");
_heap_pointers_need_patching = false;
return;
@ -2347,12 +2347,12 @@ bool FileMapInfo::map_heap_regions() {
map_heap_regions_impl();
if (!ArchiveHeapLoader::closed_regions_mapped()) {
assert(closed_heap_regions == NULL &&
assert(closed_heap_regions == nullptr &&
num_closed_heap_regions == 0, "sanity");
}
if (!ArchiveHeapLoader::open_regions_mapped()) {
assert(open_heap_regions == NULL && num_open_heap_regions == 0, "sanity");
assert(open_heap_regions == nullptr && num_open_heap_regions == 0, "sanity");
return false;
} else {
return true;
@ -2413,7 +2413,7 @@ bool FileMapInfo::map_heap_regions(int first, int max, bool is_open_archive,
char* base = os::map_memory(_fd, _full_path, r->file_offset(),
addr, regions[i].byte_size(), r->read_only(),
r->allow_exec());
if (base == NULL || base != addr) {
if (base == nullptr || base != addr) {
// dealloc the regions from java heap
dealloc_heap_regions(regions, num_regions);
log_info(cds)("UseSharedSpaces: Unable to map at required address in java heap. "
@ -2463,7 +2463,7 @@ narrowOop FileMapInfo::encoded_heap_region_dumptime_address(FileMapRegion* r) {
void FileMapInfo::patch_heap_embedded_pointers(MemRegion* regions, int num_regions,
int first_region_idx) {
char* bitmap_base = map_bitmap_region();
assert(bitmap_base != NULL, "must have already been mapped");
assert(bitmap_base != nullptr, "must have already been mapped");
for (int i=0; i<num_regions; i++) {
int region_idx = i + first_region_idx;
FileMapRegion* r = region_at(region_idx);
@ -2480,9 +2480,9 @@ void FileMapInfo::patch_heap_embedded_pointers(MemRegion* regions, int num_regio
void FileMapInfo::fixup_mapped_heap_regions() {
assert(vmClasses::Object_klass_loaded(), "must be");
// If any closed regions were found, call the fill routine to make them parseable.
// Note that closed_heap_regions may be non-NULL even if no regions were found.
// Note that closed_heap_regions may be non-null even if no regions were found.
if (num_closed_heap_regions != 0) {
assert(closed_heap_regions != NULL,
assert(closed_heap_regions != nullptr,
"Null closed_heap_regions array with non-zero count");
G1CollectedHeap::heap()->fill_archive_regions(closed_heap_regions,
num_closed_heap_regions);
@ -2495,7 +2495,7 @@ void FileMapInfo::fixup_mapped_heap_regions() {
// do the same for mapped open archive heap regions
if (num_open_heap_regions != 0) {
assert(open_heap_regions != NULL, "NULL open_heap_regions array with non-zero count");
assert(open_heap_regions != nullptr, "Null open_heap_regions array with non-zero count");
G1CollectedHeap::heap()->fill_archive_regions(open_heap_regions,
num_open_heap_regions);
@ -2511,7 +2511,7 @@ void FileMapInfo::fixup_mapped_heap_regions() {
// dealloc the archive regions from java heap
void FileMapInfo::dealloc_heap_regions(MemRegion* regions, int num) {
if (num > 0) {
assert(regions != NULL, "Null archive regions array with non-zero count");
assert(regions != nullptr, "Null archive regions array with non-zero count");
G1CollectedHeap::heap()->dealloc_archive_regions(regions, num);
}
}
@ -2552,7 +2552,7 @@ void FileMapInfo::unmap_region(int i) {
char* mapped_base = r->mapped_base();
size_t size = r->used_aligned();
if (mapped_base != NULL) {
if (mapped_base != nullptr) {
if (size > 0 && r->mapped_from_file()) {
log_info(cds)("Unmapping region #%d at base " INTPTR_FORMAT " (%s)", i, p2i(mapped_base),
shared_region_name[i]);
@ -2560,7 +2560,7 @@ void FileMapInfo::unmap_region(int i) {
fatal("os::unmap_memory failed");
}
}
r->set_mapped_base(NULL);
r->set_mapped_base(nullptr);
}
}
@ -2578,15 +2578,15 @@ void FileMapInfo::metaspace_pointers_do(MetaspaceClosure* it, bool use_copy) {
}
}
FileMapInfo* FileMapInfo::_current_info = NULL;
FileMapInfo* FileMapInfo::_dynamic_archive_info = NULL;
FileMapInfo* FileMapInfo::_current_info = nullptr;
FileMapInfo* FileMapInfo::_dynamic_archive_info = nullptr;
bool FileMapInfo::_heap_pointers_need_patching = false;
SharedPathTable FileMapInfo::_shared_path_table;
SharedPathTable FileMapInfo::_saved_shared_path_table;
Array<u8>* FileMapInfo::_saved_shared_path_table_array = NULL;
Array<u8>* FileMapInfo::_saved_shared_path_table_array = nullptr;
bool FileMapInfo::_validating_shared_path_table = false;
bool FileMapInfo::_memory_mapping_failed = false;
GrowableArray<const char*>* FileMapInfo::_non_existent_class_paths = NULL;
GrowableArray<const char*>* FileMapInfo::_non_existent_class_paths = nullptr;
// Open the shared archive file, read and validate the header
// information (version, boot classpath, etc.). If initialization
@ -2679,7 +2679,7 @@ bool FileMapHeader::validate() {
// This must be done after header validation because it might change the
// header data
const char* prop = Arguments::get_property("java.system.class.loader");
if (prop != NULL) {
if (prop != nullptr) {
warning("Archived non-system classes are disabled because the "
"java.system.class.loader property is specified (value = \"%s\"). "
"To use archived non-system classes, this property must not be set", prop);
@ -2757,7 +2757,7 @@ bool FileMapInfo::validate_header() {
// Unmap mapped regions of shared space.
void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
MetaspaceShared::set_shared_metaspace_range(NULL, NULL, NULL);
MetaspaceShared::set_shared_metaspace_range(nullptr, nullptr, nullptr);
FileMapInfo *map_info = FileMapInfo::current_info();
if (map_info) {
@ -2779,7 +2779,7 @@ void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
}
#if INCLUDE_JVMTI
ClassPathEntry** FileMapInfo::_classpath_entries_for_jvmti = NULL;
ClassPathEntry** FileMapInfo::_classpath_entries_for_jvmti = nullptr;
ClassPathEntry* FileMapInfo::get_classpath_entry_for_jvmti(int i, TRAPS) {
if (i == 0) {
@ -2788,7 +2788,7 @@ ClassPathEntry* FileMapInfo::get_classpath_entry_for_jvmti(int i, TRAPS) {
return ClassLoader::get_jrt_entry();
}
ClassPathEntry* ent = _classpath_entries_for_jvmti[i];
if (ent == NULL) {
if (ent == nullptr) {
SharedClassPathEntry* scpe = shared_path(i);
assert(scpe->is_jar(), "must be"); // other types of scpe will not produce archived classes
@ -2797,18 +2797,18 @@ ClassPathEntry* FileMapInfo::get_classpath_entry_for_jvmti(int i, TRAPS) {
if (os::stat(path, &st) != 0) {
char *msg = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, strlen(path) + 128);
jio_snprintf(msg, strlen(path) + 127, "error in finding JAR file %s", path);
THROW_MSG_(vmSymbols::java_io_IOException(), msg, NULL);
THROW_MSG_(vmSymbols::java_io_IOException(), msg, nullptr);
} else {
ent = ClassLoader::create_class_path_entry(THREAD, path, &st, false, false);
if (ent == NULL) {
if (ent == nullptr) {
char *msg = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, strlen(path) + 128);
jio_snprintf(msg, strlen(path) + 127, "error in opening JAR file %s", path);
THROW_MSG_(vmSymbols::java_io_IOException(), msg, NULL);
THROW_MSG_(vmSymbols::java_io_IOException(), msg, nullptr);
}
}
MutexLocker mu(THREAD, CDSClassFileStream_lock);
if (_classpath_entries_for_jvmti[i] == NULL) {
if (_classpath_entries_for_jvmti[i] == nullptr) {
_classpath_entries_for_jvmti[i] = ent;
} else {
// Another thread has beat me to creating this entry
@ -2826,7 +2826,7 @@ ClassFileStream* FileMapInfo::open_stream_for_jvmti(InstanceKlass* ik, Handle cl
assert(path_index < (int)get_number_of_shared_paths(), "sanity");
ClassPathEntry* cpe = get_classpath_entry_for_jvmti(path_index, CHECK_NULL);
assert(cpe != NULL, "must be");
assert(cpe != nullptr, "must be");
Symbol* name = ik->name();
const char* const class_name = name->as_C_string();
@ -2834,7 +2834,7 @@ ClassFileStream* FileMapInfo::open_stream_for_jvmti(InstanceKlass* ik, Handle cl
name->utf8_length());
ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
ClassFileStream* cfs = cpe->open_stream_for_loader(THREAD, file_name, loader_data);
assert(cfs != NULL, "must be able to read the classfile data of shared classes for built-in loaders.");
assert(cfs != nullptr, "must be able to read the classfile data of shared classes for built-in loaders.");
log_debug(cds, jvmti)("classfile data for %s [%d: %s] = %d bytes", class_name, path_index,
cfs->source(), cfs->length());
return cfs;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -88,10 +88,10 @@ public:
time_t timestamp() const { return _timestamp; }
const char* name() const;
const char* manifest() const {
return (_manifest == NULL) ? NULL : (const char*)_manifest->data();
return (_manifest == nullptr) ? nullptr : (const char*)_manifest->data();
}
int manifest_size() const {
return (_manifest == NULL) ? 0 : _manifest->length();
return (_manifest == nullptr) ? 0 : _manifest->length();
}
void set_manifest(Array<u1>* manifest) {
_manifest = manifest;
@ -115,7 +115,7 @@ class SharedPathTable {
Array<u8>* _table;
int _size;
public:
SharedPathTable() : _table(NULL), _size(0) {}
SharedPathTable() : _table(nullptr), _size(0) {}
SharedPathTable(Array<u8>* table, int size) : _table(table), _size(size) {}
void dumptime_init(ClassLoaderData* loader_data, TRAPS);
@ -126,7 +126,7 @@ public:
}
SharedClassPathEntry* path_at(int index) {
if (index < 0) {
return NULL;
return nullptr;
}
assert(index < _size, "sanity");
char* p = (char*)_table->data();
@ -429,7 +429,7 @@ public:
static FileMapInfo* current_info() {
CDS_ONLY(return _current_info;)
NOT_CDS(return NULL;)
NOT_CDS(return nullptr;)
}
static void set_current_info(FileMapInfo* info) {
@ -438,7 +438,7 @@ public:
static FileMapInfo* dynamic_info() {
CDS_ONLY(return _dynamic_archive_info;)
NOT_CDS(return NULL;)
NOT_CDS(return nullptr;)
}
static void assert_mark(bool check);
@ -561,11 +561,11 @@ public:
private:
void seek_to_position(size_t pos);
char* skip_first_path_entry(const char* path) NOT_CDS_RETURN_(NULL);
char* skip_first_path_entry(const char* path) NOT_CDS_RETURN_(nullptr);
int num_paths(const char* path) NOT_CDS_RETURN_(0);
bool check_paths_existence(const char* paths) NOT_CDS_RETURN_(false);
GrowableArray<const char*>* create_dumptime_app_classpath_array() NOT_CDS_RETURN_(NULL);
GrowableArray<const char*>* create_path_array(const char* path) NOT_CDS_RETURN_(NULL);
GrowableArray<const char*>* create_dumptime_app_classpath_array() NOT_CDS_RETURN_(nullptr);
GrowableArray<const char*>* create_path_array(const char* path) NOT_CDS_RETURN_(nullptr);
bool classpath_failure(const char* msg, const char* name) NOT_CDS_RETURN_(false);
unsigned int longest_common_app_classpath_prefix_len(int num_paths,
GrowableArray<const char*>* rp_array)
@ -590,9 +590,9 @@ public:
static size_t write_bitmaps(GrowableArray<ArchiveHeapBitmapInfo> *bitmaps, size_t curr_offset, char* buffer);
public:
address heap_region_dumptime_address(FileMapRegion* r) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
address heap_region_requested_address(FileMapRegion* r) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
address heap_region_mapped_address(FileMapRegion* r) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
address heap_region_dumptime_address(FileMapRegion* r) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
address heap_region_requested_address(FileMapRegion* r) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
address heap_region_mapped_address(FileMapRegion* r) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
narrowOop encoded_heap_region_dumptime_address(FileMapRegion* r);
private:

View file

@ -73,16 +73,16 @@ struct ArchivableStaticFieldInfo {
BasicType type;
ArchivableStaticFieldInfo(const char* k, const char* f)
: klass_name(k), field_name(f), klass(NULL), offset(0), type(T_ILLEGAL) {}
: klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
bool valid() {
return klass_name != NULL;
return klass_name != nullptr;
}
};
bool HeapShared::_disable_writing = false;
DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL;
GrowableArrayCHeap<Metadata**, mtClassShared>* HeapShared::_native_pointers = NULL;
DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
GrowableArrayCHeap<Metadata**, mtClassShared>* HeapShared::_native_pointers = nullptr;
size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
@ -91,10 +91,10 @@ size_t HeapShared::_total_obj_size;
#ifndef PRODUCT
#define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
static Array<char>* _archived_ArchiveHeapTestClass = NULL;
static const char* _test_class_name = NULL;
static const Klass* _test_class = NULL;
static const ArchivedKlassSubGraphInfoRecord* _test_class_record = NULL;
static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
static const char* _test_class_name = nullptr;
static const Klass* _test_class = nullptr;
static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
#endif
@ -113,7 +113,7 @@ static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
{"java/lang/Character$CharacterCache", "archivedCache"},
{"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
{"sun/util/locale/BaseLocale", "constantBaseLocales"},
{NULL, NULL},
{nullptr, nullptr},
};
// Entry fields for subgraphs archived in the open archive heap region.
static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
@ -123,9 +123,9 @@ static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
{"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
{"jdk/internal/math/FDBigInteger", "archivedCaches"},
#ifndef PRODUCT
{NULL, NULL}, // Extra slot for -XX:ArchiveHeapTestClass
{nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
#endif
{NULL, NULL},
{nullptr, nullptr},
};
// Entry fields for subgraphs archived in the open archive heap region (full module graph).
@ -133,14 +133,14 @@ static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = {
{"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
{"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"},
{"java/lang/Module$ArchivedData", "archivedData"},
{NULL, NULL},
{nullptr, nullptr},
};
KlassSubGraphInfo* HeapShared::_default_subgraph_info;
GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = NULL;
GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
OopHandle HeapShared::_roots;
OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
KlassToOopHandleTable* HeapShared::_scratch_java_mirror_table = NULL;
KlassToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
#ifdef ASSERT
bool HeapShared::is_archived_object_during_dumptime(oop p) {
@ -177,9 +177,9 @@ static void reset_states(oop obj, TRAPS) {
TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
Symbol* method_sig = vmSymbols::void_method_signature();
while (klass != NULL) {
while (klass != nullptr) {
Method* method = klass->find_method(method_name, method_sig);
if (method != NULL) {
if (method != nullptr) {
assert(method->is_private(), "must be");
if (log_is_enabled(Debug, cds)) {
ResourceMark rm(THREAD);
@ -218,16 +218,16 @@ void HeapShared::reset_archived_object_states(TRAPS) {
reset_states(boot_loader(), CHECK);
}
HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
HeapShared::OriginalObjectTable* HeapShared::_original_object_table = NULL;
HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
HeapShared::OriginalObjectTable* HeapShared::_original_object_table = nullptr;
oop HeapShared::find_archived_heap_object(oop obj) {
assert(DumpSharedSpaces, "dump-time only");
ArchivedObjectCache* cache = archived_object_cache();
CachedOopInfo* p = cache->get(obj);
if (p != NULL) {
if (p != nullptr) {
return p->_obj;
} else {
return NULL;
return nullptr;
}
}
@ -237,7 +237,7 @@ int HeapShared::append_root(oop obj) {
// No GC should happen since we aren't scanning _pending_roots.
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
if (_pending_roots == NULL) {
if (_pending_roots == nullptr) {
_pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
}
@ -248,14 +248,14 @@ objArrayOop HeapShared::roots() {
if (DumpSharedSpaces) {
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
if (!HeapShared::can_write()) {
return NULL;
return nullptr;
}
} else {
assert(UseSharedSpaces, "must be");
}
objArrayOop roots = (objArrayOop)_roots.resolve();
assert(roots != NULL, "should have been initialized");
assert(roots != nullptr, "should have been initialized");
return roots;
}
@ -264,7 +264,7 @@ oop HeapShared::get_root(int index, bool clear) {
assert(index >= 0, "sanity");
if (DumpSharedSpaces) {
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(_pending_roots != NULL, "sanity");
assert(_pending_roots != nullptr, "sanity");
return _pending_roots->at(index);
} else {
assert(UseSharedSpaces, "must be");
@ -285,7 +285,7 @@ void HeapShared::clear_root(int index) {
oop old = roots()->obj_at(index);
log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
}
roots()->obj_at_put(index, NULL);
roots()->obj_at_put(index, nullptr);
}
}
@ -295,7 +295,7 @@ oop HeapShared::archive_object(oop obj) {
assert(!obj->is_stackChunk(), "do not archive stack chunks");
oop ao = find_archived_heap_object(obj);
if (ao != NULL) {
if (ao != nullptr) {
// already archived
return ao;
}
@ -304,11 +304,11 @@ oop HeapShared::archive_object(oop obj) {
if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
p2i(obj), (size_t)obj->size());
return NULL;
return nullptr;
}
oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len));
if (archived_oop != NULL) {
if (archived_oop != nullptr) {
count_allocation(len);
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
// Reinitialize markword to remove age/marking/locking/etc.
@ -327,7 +327,7 @@ oop HeapShared::archive_object(oop obj) {
ArchivedObjectCache* cache = archived_object_cache();
CachedOopInfo info = make_cached_oop_info(archived_oop);
cache->put(obj, info);
if (_original_object_table != NULL) {
if (_original_object_table != nullptr) {
_original_object_table->put(archived_oop, obj);
}
mark_native_pointers(obj, archived_oop);
@ -355,10 +355,10 @@ public:
oop get_oop(Klass* k) {
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
OopHandle* handle = get(k);
if (handle != NULL) {
if (handle != nullptr) {
return handle->resolve();
} else {
return NULL;
return nullptr;
}
}
void set_oop(Klass* k, oop o) {
@ -370,7 +370,7 @@ public:
void remove_oop(Klass* k) {
MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
OopHandle* handle = get(k);
if (handle != NULL) {
if (handle != nullptr) {
handle->release(Universe::vm_global());
remove(k);
}
@ -413,9 +413,9 @@ void HeapShared::archive_java_mirrors() {
BasicType bt = (BasicType)i;
if (!is_reference_type(bt)) {
oop m = _scratch_basic_type_mirrors[i].resolve();
assert(m != NULL, "sanity");
assert(m != nullptr, "sanity");
oop archived_m = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
assert(archived_m != NULL, "sanity");
assert(archived_m != nullptr, "sanity");
log_trace(cds, heap, mirror)(
"Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT,
@ -426,14 +426,14 @@ void HeapShared::archive_java_mirrors() {
}
GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
assert(klasses != NULL, "sanity");
assert(klasses != nullptr, "sanity");
for (int i = 0; i < klasses->length(); i++) {
Klass* orig_k = klasses->at(i);
oop m = scratch_java_mirror(orig_k);
if (m != NULL) {
if (m != nullptr) {
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
oop archived_m = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
guarantee(archived_m != NULL, "scratch mirrors should not point to any unachivable objects");
guarantee(archived_m != nullptr, "scratch mirrors should not point to any unachivable objects");
buffered_k->set_archived_java_mirror(append_root(archived_m));
ResourceMark rm;
log_trace(cds, heap, mirror)(
@ -460,7 +460,7 @@ void HeapShared::mark_native_pointers(oop orig_obj, oop archived_obj) {
void HeapShared::mark_one_native_pointer(oop archived_obj, int offset) {
Metadata* ptr = archived_obj->metadata_field_acquire(offset);
if (ptr != NULL) {
if (ptr != nullptr) {
// Set the native pointer to the requested address (at runtime, if the metadata
// is mapped at the default location, it will be at this address).
address buffer_addr = ArchiveBuilder::current()->get_buffered_addr((address)ptr);
@ -520,7 +520,7 @@ void HeapShared::check_enum_obj(int level,
ik->external_name(), fd.name()->as_C_string());
}
oop oop_field = mirror->obj_field(fd.offset());
if (oop_field == NULL) {
if (oop_field == nullptr) {
guarantee(false, "static field %s::%s must not be null",
ik->external_name(), fd.name()->as_C_string());
} else if (oop_field->klass() != ik && oop_field->klass() != ik->array_klass_or_null()) {
@ -545,7 +545,7 @@ bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) {
}
RunTimeClassInfo* info = RunTimeClassInfo::get_for(k);
assert(info != NULL, "sanity");
assert(info != nullptr, "sanity");
if (log_is_enabled(Info, cds, heap)) {
ResourceMark rm;
@ -662,7 +662,7 @@ void HeapShared::copy_roots() {
// However, HeapShared::archive_objects() happens inside a safepoint, so we can't
// allocate a "regular" ObjArray and pass the result to HeapShared::archive_object().
// Instead, we have to roll our own alloc/copy routine here.
int length = _pending_roots != NULL ? _pending_roots->length() : 0;
int length = _pending_roots != nullptr ? _pending_roots->length() : 0;
size_t size = objArrayOopDesc::object_size(length);
Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size);
@ -689,7 +689,7 @@ void HeapShared::copy_roots() {
//
// Subgraph archiving support
//
HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = nullptr;
HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table;
// Get the subgraph_info for Klass k. A new subgraph_info is created if
@ -709,7 +709,7 @@ KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_
KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
assert(DumpSharedSpaces, "dump time only");
KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k);
assert(info != NULL, "must have been initialized");
assert(info != nullptr, "must have been initialized");
return info;
}
@ -717,7 +717,7 @@ KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
void KlassSubGraphInfo::add_subgraph_entry_field(
int static_field_offset, oop v, bool is_closed_archive) {
assert(DumpSharedSpaces, "dump time only");
if (_subgraph_entry_fields == NULL) {
if (_subgraph_entry_fields == nullptr) {
_subgraph_entry_fields =
new (mtClass) GrowableArray<int>(10, mtClass);
}
@ -731,7 +731,7 @@ void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
assert(DumpSharedSpaces, "dump time only");
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
if (_subgraph_object_klasses == NULL) {
if (_subgraph_object_klasses == nullptr) {
_subgraph_object_klasses =
new (mtClass) GrowableArray<Klass*>(50, mtClass);
}
@ -787,12 +787,12 @@ void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
if (ik->module()->name() == vmSymbols::java_base()) {
assert(ik->package() != NULL, "classes in java.base cannot be in unnamed package");
assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package");
return;
}
#ifndef PRODUCT
if (!ik->module()->is_named() && ik->package() == NULL) {
if (!ik->module()->is_named() && ik->package() == nullptr) {
// This class is loaded by ArchiveHeapTestClass
return;
}
@ -827,8 +827,8 @@ bool KlassSubGraphInfo::is_non_early_klass(Klass* k) {
// Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
_k = info->klass();
_entry_field_records = NULL;
_subgraph_object_klasses = NULL;
_entry_field_records = nullptr;
_subgraph_object_klasses = nullptr;
_is_full_module_graph = info->is_full_module_graph();
if (_is_full_module_graph) {
@ -849,7 +849,7 @@ void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
// populate the entry fields
GrowableArray<int>* entry_fields = info->subgraph_entry_fields();
if (entry_fields != NULL) {
if (entry_fields != nullptr) {
int num_entry_fields = entry_fields->length();
assert(num_entry_fields % 2 == 0, "sanity");
_entry_field_records =
@ -861,7 +861,7 @@ void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
// the Klasses of the objects in the sub-graphs
GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
if (subgraph_object_klasses != NULL) {
if (subgraph_object_klasses != nullptr) {
int num_subgraphs_klasses = subgraph_object_klasses->length();
_subgraph_object_klasses =
ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
@ -888,7 +888,7 @@ struct CopyKlassSubGraphInfoToArchive : StackObj {
CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
if (info.subgraph_object_klasses() != nullptr || info.subgraph_entry_fields() != nullptr) {
ArchivedKlassSubGraphInfoRecord* record =
(ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
record->init(&info);
@ -922,7 +922,7 @@ void HeapShared::write_subgraph_info_table() {
writer.dump(&_run_time_subgraph_info_table, "subgraphs");
#ifndef PRODUCT
if (ArchiveHeapTestClass != NULL) {
if (ArchiveHeapTestClass != nullptr) {
size_t len = strlen(ArchiveHeapTestClass) + 1;
Array<char>* array = ArchiveBuilder::new_ro_array<char>((int)len);
strncpy(array->adr_at(0), ArchiveHeapTestClass, len);
@ -935,13 +935,13 @@ void HeapShared::write_subgraph_info_table() {
}
void HeapShared::serialize_root(SerializeClosure* soc) {
oop roots_oop = NULL;
oop roots_oop = nullptr;
if (soc->reading()) {
soc->do_oop(&roots_oop); // read from archive
assert(oopDesc::is_oop_or_null(roots_oop), "is oop");
// Create an OopHandle only if we have actually mapped or loaded the roots
if (roots_oop != NULL) {
if (roots_oop != nullptr) {
assert(ArchiveHeapLoader::is_fully_available(), "must be");
_roots = OopHandle(Universe::vm_global(), roots_oop);
}
@ -956,7 +956,7 @@ void HeapShared::serialize_tables(SerializeClosure* soc) {
#ifndef PRODUCT
soc->do_ptr((void**)&_archived_ArchiveHeapTestClass);
if (soc->reading() && _archived_ArchiveHeapTestClass != NULL) {
if (soc->reading() && _archived_ArchiveHeapTestClass != nullptr) {
_test_class_name = _archived_ArchiveHeapTestClass->adr_at(0);
setup_test_class(_test_class_name);
}
@ -1013,7 +1013,7 @@ void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableSt
ArchivableStaticFieldInfo* info = &fields[i];
TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
assert(k != NULL && k->is_shared_boot_class(), "sanity");
assert(k != nullptr && k->is_shared_boot_class(), "sanity");
resolve_classes_for_subgraph_of(current, k);
}
}
@ -1026,7 +1026,7 @@ void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k)
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
}
if (record == NULL) {
if (record == nullptr) {
clear_archived_roots_of(k);
}
}
@ -1049,7 +1049,7 @@ void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k
return;
}
if (record != NULL) {
if (record != nullptr) {
init_archived_fields_for(k, record);
}
}
@ -1059,13 +1059,13 @@ HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAP
assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
if (!k->is_shared()) {
return NULL;
return nullptr;
}
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
#ifndef PRODUCT
if (_test_class_name != NULL && k->name()->equals(_test_class_name) && record != NULL) {
if (_test_class_name != nullptr && k->name()->equals(_test_class_name) && record != nullptr) {
_test_class = k;
_test_class_record = record;
}
@ -1073,14 +1073,14 @@ HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAP
// Initialize from archived data. Currently this is done only
// during VM initialization time. No lock is needed.
if (record != NULL) {
if (record != nullptr) {
if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) {
if (log_is_enabled(Info, cds, heap)) {
ResourceMark rm(THREAD);
log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled",
k->external_name());
}
return NULL;
return nullptr;
}
if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) {
@ -1089,7 +1089,7 @@ HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAP
log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled",
k->external_name());
}
return NULL;
return nullptr;
}
if (log_is_enabled(Info, cds, heap)) {
@ -1100,13 +1100,13 @@ HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAP
resolve_or_init(k, do_init, CHECK_NULL);
// Load/link/initialize the klasses of the objects in the subgraph.
// NULL class loader is used.
// nullptr class loader is used.
Array<Klass*>* klasses = record->subgraph_object_klasses();
if (klasses != NULL) {
if (klasses != nullptr) {
for (int i = 0; i < klasses->length(); i++) {
Klass* klass = klasses->at(i);
if (!klass->is_shared()) {
return NULL;
return nullptr;
}
resolve_or_init(klass, do_init, CHECK_NULL);
}
@ -1118,12 +1118,12 @@ HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAP
void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
if (!do_init) {
if (k->class_loader_data() == NULL) {
if (k->class_loader_data() == nullptr) {
Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
}
} else {
assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes");
assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
if (k->is_instance_klass()) {
InstanceKlass* ik = InstanceKlass::cast(k);
ik->initialize(CHECK);
@ -1141,7 +1141,7 @@ void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphI
// the corresponding fields within the mirror.
oop m = k->java_mirror();
Array<int>* entry_field_records = record->entry_field_records();
if (entry_field_records != NULL) {
if (entry_field_records != nullptr) {
int efr_len = entry_field_records->length();
assert(efr_len % 2 == 0, "sanity");
for (int i = 0; i < efr_len; i += 2) {
@ -1167,9 +1167,9 @@ void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphI
void HeapShared::clear_archived_roots_of(Klass* k) {
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
if (record != NULL) {
if (record != nullptr) {
Array<int>* entry_field_records = record->entry_field_records();
if (entry_field_records != NULL) {
if (entry_field_records != nullptr) {
int efr_len = entry_field_records->length();
assert(efr_len % 2 == 0, "sanity");
for (int i = 0; i < efr_len; i += 2) {
@ -1235,7 +1235,7 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
oop archived = HeapShared::archive_reachable_objects_from(
_level + 1, _subgraph_info, obj, _is_closed_archive);
assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
assert(archived != nullptr, "VM should have exited with unarchivable objects for _level > 1");
assert(HeapShared::is_archived_object_during_dumptime(archived), "must be");
if (!_record_klasses_only) {
@ -1253,14 +1253,14 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
KlassSubGraphInfo* subgraph_info() { return _subgraph_info; }
};
WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = NULL;
WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = nullptr;
HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop orig_obj) {
CachedOopInfo info;
WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
info._subgraph_info = (walker == NULL) ? NULL : walker->subgraph_info();
info._referrer = (walker == NULL) ? NULL : walker->orig_referencing_obj();
info._subgraph_info = (walker == nullptr) ? nullptr : walker->subgraph_info();
info._referrer = (walker == nullptr) ? nullptr : walker->orig_referencing_obj();
info._obj = orig_obj;
return info;
@ -1290,7 +1290,7 @@ oop HeapShared::archive_reachable_objects_from(int level,
KlassSubGraphInfo* subgraph_info,
oop orig_obj,
bool is_closed_archive) {
assert(orig_obj != NULL, "must be");
assert(orig_obj != nullptr, "must be");
assert(!is_archived_object_during_dumptime(orig_obj), "sanity");
if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
@ -1313,7 +1313,7 @@ oop HeapShared::archive_reachable_objects_from(int level,
}
oop archived_obj = find_archived_heap_object(orig_obj);
if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
if (java_lang_String::is_instance(orig_obj) && archived_obj != nullptr) {
// To save time, don't walk strings that are already archived. They just contain
// pointers to a type array, whose klass doesn't need to be recorded.
return archived_obj;
@ -1326,11 +1326,11 @@ oop HeapShared::archive_reachable_objects_from(int level,
set_has_been_seen_during_subgraph_recording(orig_obj);
}
bool record_klasses_only = (archived_obj != NULL);
if (archived_obj == NULL) {
bool record_klasses_only = (archived_obj != nullptr);
if (archived_obj == nullptr) {
++_num_new_archived_objs;
archived_obj = archive_object(orig_obj);
if (archived_obj == NULL) {
if (archived_obj == nullptr) {
// Skip archiving the sub-graph referenced from the current entry field.
ResourceMark rm;
log_error(cds, heap)(
@ -1340,7 +1340,7 @@ oop HeapShared::archive_reachable_objects_from(int level,
if (level == 1) {
// Don't archive a subgraph root that's too big. For archives static fields, that's OK
// as the Java code will take care of initializing this field dynamically.
return NULL;
return nullptr;
} else {
// We don't know how to handle an object that has been archived, but some of its reachable
// objects cannot be archived. Bail out for now. We might need to fix this in the future if
@ -1353,17 +1353,17 @@ oop HeapShared::archive_reachable_objects_from(int level,
if (Modules::check_module_oop(orig_obj)) {
Modules::update_oops_in_archived_module(orig_obj, append_root(archived_obj));
}
java_lang_Module::set_module_entry(archived_obj, NULL);
java_lang_Module::set_module_entry(archived_obj, nullptr);
} else if (java_lang_ClassLoader::is_instance(orig_obj)) {
// class_data will be restored explicitly at run time.
guarantee(orig_obj == SystemDictionary::java_platform_loader() ||
orig_obj == SystemDictionary::java_system_loader() ||
java_lang_ClassLoader::loader_data(orig_obj) == NULL, "must be");
java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL);
java_lang_ClassLoader::loader_data(orig_obj) == nullptr, "must be");
java_lang_ClassLoader::release_set_loader_data(archived_obj, nullptr);
}
}
assert(archived_obj != NULL, "must be");
assert(archived_obj != nullptr, "must be");
Klass *orig_k = orig_obj->klass();
subgraph_info->add_subgraph_object_klass(orig_k);
@ -1436,7 +1436,7 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive);
if (af == NULL) {
if (af == nullptr) {
log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
klass_name, field_name);
} else {
@ -1449,7 +1449,7 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
} else {
// The field contains null, we still need to record the entry point,
// so it can be restored at runtime.
subgraph_info->add_subgraph_entry_field(field_offset, NULL, false);
subgraph_info->add_subgraph_entry_field(field_offset, nullptr, false);
}
}
@ -1486,7 +1486,7 @@ void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_o
void HeapShared::verify_subgraph_from(oop orig_obj) {
oop archived_obj = find_archived_heap_object(orig_obj);
if (archived_obj == NULL) {
if (archived_obj == nullptr) {
// It's OK for the root of a subgraph to be not archived. See comments in
// archive_reachable_objects_from().
return;
@ -1513,10 +1513,10 @@ void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
if (is_archived) {
assert(is_archived_object_during_dumptime(obj), "must be");
assert(find_archived_heap_object(obj) == NULL, "must be");
assert(find_archived_heap_object(obj) == nullptr, "must be");
} else {
assert(!is_archived_object_during_dumptime(obj), "must be");
assert(find_archived_heap_object(obj) != NULL, "must be");
assert(find_archived_heap_object(obj) != nullptr, "must be");
}
VerifySharedOopClosure walker(is_archived);
@ -1550,7 +1550,7 @@ void HeapShared::check_default_subgraph_classes() {
}
}
HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL;
HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
int HeapShared::_num_new_walked_objs;
int HeapShared::_num_new_archived_objs;
int HeapShared::_num_old_recorded_klasses;
@ -1562,7 +1562,7 @@ int HeapShared::_num_total_recorded_klasses = 0;
int HeapShared::_num_total_verifications = 0;
bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
return _seen_objects_table->get(obj) != NULL;
return _seen_objects_table->get(obj) != nullptr;
}
void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
@ -1627,7 +1627,7 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
ResourceMark rm; // for stringStream::as_string() etc.
#ifndef PRODUCT
bool is_test_class = (ArchiveHeapTestClass != NULL) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0);
bool is_test_class = (ArchiveHeapTestClass != nullptr) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0);
#else
bool is_test_class = false;
#endif
@ -1663,7 +1663,7 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
}
if (ik->package() != NULL) {
if (ik->package() != nullptr) {
// This restriction makes HeapShared::is_a_test_class_in_unnamed_module() easy.
stringStream st;
st.print("ArchiveHeapTestClass %s is not in unnamed package", ArchiveHeapTestClass);
@ -1712,10 +1712,10 @@ void HeapShared::init_subgraph_entry_fields(TRAPS) {
void HeapShared::setup_test_class(const char* test_class_name) {
ArchivableStaticFieldInfo* p = open_archive_subgraph_entry_fields;
int num_slots = sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
assert(p[num_slots - 2].klass_name == NULL, "must have empty slot that's patched below");
assert(p[num_slots - 1].klass_name == NULL, "must have empty slot that marks the end of the list");
assert(p[num_slots - 2].klass_name == nullptr, "must have empty slot that's patched below");
assert(p[num_slots - 1].klass_name == nullptr, "must have empty slot that marks the end of the list");
if (test_class_name != NULL) {
if (test_class_name != nullptr) {
p[num_slots - 2].klass_name = test_class_name;
p[num_slots - 2].field_name = ARCHIVE_TEST_FIELD_NAME;
}
@ -1725,12 +1725,12 @@ void HeapShared::setup_test_class(const char* test_class_name) {
// during runtime. This may be called before the module system is initialized so
// we cannot rely on InstanceKlass::module(), etc.
bool HeapShared::is_a_test_class_in_unnamed_module(Klass* ik) {
if (_test_class != NULL) {
if (_test_class != nullptr) {
if (ik == _test_class) {
return true;
}
Array<Klass*>* klasses = _test_class_record->subgraph_object_klasses();
if (klasses == NULL) {
if (klasses == nullptr) {
return false;
}
@ -1871,7 +1871,7 @@ class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
virtual void do_oop(oop* p) {
assert(!UseCompressedOops, "sanity");
_num_total_oops ++;
if ((*p) != NULL) {
if ((*p) != nullptr) {
size_t idx = p - (oop*)_start;
_oopmap->set_bit(idx);
if (DumpSharedSpaces) {
@ -1889,7 +1889,7 @@ class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
address HeapShared::to_requested_address(address dumptime_addr) {
assert(DumpSharedSpaces, "static dump time only");
if (dumptime_addr == NULL || UseCompressedOops) {
if (dumptime_addr == nullptr || UseCompressedOops) {
return dumptime_addr;
}
@ -1910,7 +1910,7 @@ address HeapShared::to_requested_address(address dumptime_addr) {
intx delta = REQUESTED_BASE - actual_base;
address requested_addr = dumptime_addr + delta;
assert(REQUESTED_BASE != 0 && requested_addr != NULL, "sanity");
assert(REQUESTED_BASE != 0 && requested_addr != nullptr, "sanity");
return requested_addr;
}
@ -1921,7 +1921,7 @@ ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
HeapWord* p = region.start();
HeapWord* end = region.end();
FindEmbeddedNonNullPointers finder((void*)p, &oopmap);
ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL;
ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : nullptr;
int num_objs = 0;
while (p < end) {
@ -1952,7 +1952,7 @@ ResourceBitMap HeapShared::calculate_ptrmap(MemRegion region) {
for (int i = 0; i < len; i++) {
Metadata** p = _native_pointers->at(i);
if (start <= p && p < end) {
assert(*p != NULL, "must be non-null");
assert(*p != nullptr, "must be non-null");
num_non_null_ptrs ++;
size_t idx = p - start;
oopmap.set_bit(idx);

View file

@ -76,16 +76,16 @@ class KlassSubGraphInfo: public CHeapObj<mtClass> {
static void check_allowed_klass(InstanceKlass* ik);
public:
KlassSubGraphInfo(Klass* k, bool is_full_module_graph) :
_k(k), _subgraph_object_klasses(NULL),
_subgraph_entry_fields(NULL),
_k(k), _subgraph_object_klasses(nullptr),
_subgraph_entry_fields(nullptr),
_is_full_module_graph(is_full_module_graph),
_has_non_early_klasses(false) {}
~KlassSubGraphInfo() {
if (_subgraph_object_klasses != NULL) {
if (_subgraph_object_klasses != nullptr) {
delete _subgraph_object_klasses;
}
if (_subgraph_entry_fields != NULL) {
if (_subgraph_entry_fields != nullptr) {
delete _subgraph_entry_fields;
}
};
@ -101,7 +101,7 @@ class KlassSubGraphInfo: public CHeapObj<mtClass> {
bool is_closed_archive);
void add_subgraph_object_klass(Klass *orig_k);
int num_subgraph_object_klasses() {
return _subgraph_object_klasses == NULL ? 0 :
return _subgraph_object_klasses == nullptr ? 0 :
_subgraph_object_klasses->length();
}
bool is_full_module_graph() const { return _is_full_module_graph; }
@ -125,7 +125,7 @@ class ArchivedKlassSubGraphInfoRecord {
Array<Klass*>* _subgraph_object_klasses;
public:
ArchivedKlassSubGraphInfoRecord() :
_k(NULL), _entry_field_records(NULL), _subgraph_object_klasses(NULL) {}
_k(nullptr), _entry_field_records(nullptr), _subgraph_object_klasses(nullptr) {}
void init(KlassSubGraphInfo* info);
Klass* klass() const { return _k; }
Array<int>* entry_field_records() const { return _entry_field_records; }
@ -159,8 +159,8 @@ public:
static bool is_subgraph_root_class(InstanceKlass* ik);
// Scratch objects for archiving Klass::java_mirror()
static oop scratch_java_mirror(BasicType t) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
static oop scratch_java_mirror(Klass* k) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
static oop scratch_java_mirror(BasicType t) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static oop scratch_java_mirror(Klass* k) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
private:
#if INCLUDE_CDS_JAVA_HEAP
@ -289,13 +289,13 @@ private:
static KlassToOopHandleTable* _scratch_java_mirror_table;
static void init_seen_objects_table() {
assert(_seen_objects_table == NULL, "must be");
assert(_seen_objects_table == nullptr, "must be");
_seen_objects_table = new (mtClass)SeenObjectsTable();
}
static void delete_seen_objects_table() {
assert(_seen_objects_table != NULL, "must be");
assert(_seen_objects_table != nullptr, "must be");
delete _seen_objects_table;
_seen_objects_table = NULL;
_seen_objects_table = nullptr;
}
// Statistics (for one round of start_recording_subgraph ... done_recording_subgraph)
@ -347,25 +347,25 @@ private:
_original_object_table =
new (mtClass)OriginalObjectTable();
} else {
_original_object_table = NULL;
_original_object_table = nullptr;
}
}
static void destroy_archived_object_cache() {
delete _archived_object_cache;
_archived_object_cache = NULL;
if (_original_object_table != NULL) {
_archived_object_cache = nullptr;
if (_original_object_table != nullptr) {
delete _original_object_table;
_original_object_table = NULL;
_original_object_table = nullptr;
}
}
static ArchivedObjectCache* archived_object_cache() {
return _archived_object_cache;
}
static oop get_original_object(oop archived_object) {
assert(_original_object_table != NULL, "sanity");
assert(_original_object_table != nullptr, "sanity");
oop* r = _original_object_table->get(archived_object);
if (r == NULL) {
return NULL;
if (r == nullptr) {
return nullptr;
} else {
return *r;
}
@ -445,7 +445,7 @@ private:
// Returns the address of a heap object when it's mapped at the
// runtime requested address. See comments in archiveBuilder.hpp.
static address to_requested_address(address dumptime_addr) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
static address to_requested_address(address dumptime_addr) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static oop to_requested_address(oop dumptime_oop) {
return cast_to_oop(to_requested_address(cast_from_oop<address>(dumptime_oop)));
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -71,7 +71,7 @@ static bool should_be_archived(char* line) {
void LambdaFormInvokers::append(char* line) {
MutexLocker ml(Thread::current(), LambdaFormInvokers_lock);
if (_lambdaform_lines == NULL) {
if (_lambdaform_lines == nullptr) {
_lambdaform_lines = new GrowableArrayCHeap<char*, mtClassShared>(150);
}
_lambdaform_lines->append(line);
@ -118,7 +118,7 @@ void LambdaFormInvokers::regenerate_holder_classes(TRAPS) {
Symbol* cds_name = vmSymbols::jdk_internal_misc_CDS();
Klass* cds_klass = SystemDictionary::resolve_or_null(cds_name, THREAD);
guarantee(cds_klass != NULL, "jdk/internal/misc/CDS must exist!");
guarantee(cds_klass != nullptr, "jdk/internal/misc/CDS must exist!");
HandleMark hm(THREAD);
int len = _lambdaform_lines->length();
@ -161,8 +161,8 @@ void LambdaFormInvokers::regenerate_holder_classes(TRAPS) {
for (int i = 0; i < sz; i+= 2) {
Handle h_name(THREAD, h_array->obj_at(i));
typeArrayHandle h_bytes(THREAD, (typeArrayOop)h_array->obj_at(i+1));
assert(h_name != NULL, "Class name is NULL");
assert(h_bytes != NULL, "Class bytes is NULL");
assert(h_name != nullptr, "Class name is null");
assert(h_bytes != nullptr, "Class bytes is null");
char *class_name = java_lang_String::as_utf8_string(h_name());
if (strstr(class_name, "java/lang/invoke/BoundMethodHandle$Species_") != nullptr) {
@ -171,7 +171,7 @@ void LambdaFormInvokers::regenerate_holder_classes(TRAPS) {
// need to regenerate.
TempNewSymbol class_name_sym = SymbolTable::new_symbol(class_name);
Klass* klass = SystemDictionary::resolve_or_null(class_name_sym, THREAD);
assert(klass != NULL, "must already be loaded");
assert(klass != nullptr, "must already be loaded");
if (!klass->is_shared() && klass->shared_classpath_index() < 0) {
// Fake it, so that it will be included into the archive.
klass->set_shared_classpath_index(0);
@ -184,7 +184,7 @@ void LambdaFormInvokers::regenerate_holder_classes(TRAPS) {
// make a copy of class bytes so GC will not affect us.
char *buf = NEW_RESOURCE_ARRAY(char, len);
memcpy(buf, (char*)h_bytes->byte_at_addr(0), len);
ClassFileStream st((u1*)buf, len, NULL, ClassFileStream::verify);
ClassFileStream st((u1*)buf, len, nullptr, ClassFileStream::verify);
regenerate_class(class_name, st, CHECK);
}
}
@ -193,7 +193,7 @@ void LambdaFormInvokers::regenerate_holder_classes(TRAPS) {
void LambdaFormInvokers::regenerate_class(char* class_name, ClassFileStream& st, TRAPS) {
TempNewSymbol class_name_sym = SymbolTable::new_symbol(class_name);
Klass* klass = SystemDictionary::resolve_or_null(class_name_sym, THREAD);
assert(klass != NULL, "must exist");
assert(klass != nullptr, "must exist");
assert(klass->is_instance_klass(), "Should be");
ClassLoaderData* cld = ClassLoaderData::the_null_class_loader_data();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,8 +30,8 @@
// This constructor is used only by SystemDictionaryShared::clone_dumptime_tables().
// See comments there about the need for making a deep copy.
DumpTimeLambdaProxyClassInfo::DumpTimeLambdaProxyClassInfo(const DumpTimeLambdaProxyClassInfo& src) {
_proxy_klasses = NULL;
if (src._proxy_klasses != NULL && src._proxy_klasses->length() > 0) {
_proxy_klasses = nullptr;
if (src._proxy_klasses != nullptr && src._proxy_klasses->length() > 0) {
int n = src._proxy_klasses->length();
_proxy_klasses = new (mtClassShared) GrowableArray<InstanceKlass*>(n, mtClassShared);
for (int i = 0; i < n; i++) {
@ -41,7 +41,7 @@ DumpTimeLambdaProxyClassInfo::DumpTimeLambdaProxyClassInfo(const DumpTimeLambdaP
}
DumpTimeLambdaProxyClassInfo::~DumpTimeLambdaProxyClassInfo() {
if (_proxy_klasses != NULL) {
if (_proxy_klasses != nullptr) {
delete _proxy_klasses;
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -77,8 +77,8 @@ public:
unsigned int hash() const;
static unsigned int dumptime_hash(Symbol* sym) {
if (sym == NULL) {
// _invoked_name maybe NULL
if (sym == nullptr) {
// _invoked_name maybe null
return 0;
}
return java_lang_String::hash_code((const jbyte*)sym->bytes(), sym->utf8_length());
@ -107,16 +107,16 @@ public:
class DumpTimeLambdaProxyClassInfo {
public:
GrowableArray<InstanceKlass*>* _proxy_klasses;
DumpTimeLambdaProxyClassInfo() : _proxy_klasses(NULL) {}
DumpTimeLambdaProxyClassInfo() : _proxy_klasses(nullptr) {}
DumpTimeLambdaProxyClassInfo(const DumpTimeLambdaProxyClassInfo& src);
DumpTimeLambdaProxyClassInfo& operator=(const DumpTimeLambdaProxyClassInfo&) = delete;
~DumpTimeLambdaProxyClassInfo();
void add_proxy_klass(InstanceKlass* proxy_klass) {
if (_proxy_klasses == NULL) {
if (_proxy_klasses == nullptr) {
_proxy_klasses = new (mtClassShared) GrowableArray<InstanceKlass*>(5, mtClassShared);
}
assert(_proxy_klasses != NULL, "sanity");
assert(_proxy_klasses != nullptr, "sanity");
_proxy_klasses->append(proxy_klass);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -90,7 +90,7 @@ VirtualSpace MetaspaceShared::_symbol_vs;
bool MetaspaceShared::_has_error_classes;
bool MetaspaceShared::_archive_loading_failed = false;
bool MetaspaceShared::_remapped_readwrite = false;
void* MetaspaceShared::_shared_metaspace_static_top = NULL;
void* MetaspaceShared::_shared_metaspace_static_top = nullptr;
intx MetaspaceShared::_relocation_delta;
char* MetaspaceShared::_requested_base_address;
bool MetaspaceShared::_use_optimized_module_handling = true;
@ -185,7 +185,7 @@ public:
}
void do_cld(ClassLoaderData* cld) {
for (Klass* klass = cld->klasses(); klass != NULL; klass = klass->next_link()) {
for (Klass* klass = cld->klasses(); klass != nullptr; klass = klass->next_link()) {
if (klass->is_instance_klass()) {
dump(InstanceKlass::cast(klass));
}
@ -206,7 +206,7 @@ void MetaspaceShared::dump_loaded_classes(const char* file_name, TRAPS) {
}
static bool shared_base_too_high(char* specified_base, char* aligned_base, size_t cds_max) {
if (specified_base != NULL && aligned_base < specified_base) {
if (specified_base != nullptr && aligned_base < specified_base) {
// SharedBaseAddress is very high (e.g., 0xffffffffffffff00) so
// align_up(SharedBaseAddress, MetaspaceShared::core_region_alignment()) has wrapped around.
return true;
@ -223,7 +223,7 @@ static char* compute_shared_base(size_t cds_max) {
char* specified_base = (char*)SharedBaseAddress;
char* aligned_base = align_up(specified_base, MetaspaceShared::core_region_alignment());
const char* err = NULL;
const char* err = nullptr;
if (shared_base_too_high(specified_base, aligned_base, cds_max)) {
err = "too high";
} else if (!shared_base_valid(aligned_base)) {
@ -282,7 +282,7 @@ void MetaspaceShared::post_initialize(TRAPS) {
CDSProtectionDomain::allocate_shared_data_arrays(size, CHECK);
if (!DynamicDumpSharedSpaces) {
FileMapInfo* info;
if (FileMapInfo::dynamic_info() == NULL) {
if (FileMapInfo::dynamic_info() == nullptr) {
info = FileMapInfo::current_info();
} else {
info = FileMapInfo::dynamic_info();
@ -294,8 +294,8 @@ void MetaspaceShared::post_initialize(TRAPS) {
}
}
static GrowableArrayCHeap<OopHandle, mtClassShared>* _extra_interned_strings = NULL;
static GrowableArrayCHeap<Symbol*, mtClassShared>* _extra_symbols = NULL;
static GrowableArrayCHeap<OopHandle, mtClassShared>* _extra_interned_strings = nullptr;
static GrowableArrayCHeap<Symbol*, mtClassShared>* _extra_symbols = nullptr;
void MetaspaceShared::read_extra_data(JavaThread* current, const char* filename) {
_extra_interned_strings = new GrowableArrayCHeap<OopHandle, mtClassShared>(10000);
@ -344,7 +344,7 @@ void MetaspaceShared::read_extra_data(JavaThread* current, const char* filename)
}
#endif
// Make sure this string is included in the dumped interned string table.
assert(str != NULL, "must succeed");
assert(str != nullptr, "must succeed");
_extra_interned_strings->append(OopHandle(Universe::vm_global(), str));
}
}
@ -459,10 +459,10 @@ public:
VM_PopulateDumpSharedSpace() :
VM_GC_Operation(0 /* total collections, ignored */, GCCause::_archive_time_gc),
_closed_heap_regions(NULL),
_open_heap_regions(NULL),
_closed_heap_bitmaps(NULL),
_open_heap_bitmaps(NULL) {}
_closed_heap_regions(nullptr),
_open_heap_regions(nullptr),
_closed_heap_bitmaps(nullptr),
_open_heap_bitmaps(nullptr) {}
bool skip_operation() const { return false; }
@ -486,7 +486,7 @@ public:
// may not be used by any of the archived classes -- these are usually
// symbols that we anticipate to be used at run time, so we can store
// them in the RO region, to be shared across multiple processes.
if (_extra_symbols != NULL) {
if (_extra_symbols != nullptr) {
for (int i = 0; i < _extra_symbols->length(); i++) {
it->push(_extra_symbols->adr_at(i));
}
@ -624,7 +624,7 @@ bool MetaspaceShared::may_be_eagerly_linked(InstanceKlass* ik) {
// that may not be expected by custom class loaders.
//
// It's OK to do this for the built-in loaders as we know they can
// tolerate this. (Note that unregistered classes are loaded by the NULL
// tolerate this. (Note that unregistered classes are loaded by the null
// loader during DumpSharedSpaces).
return false;
}
@ -662,7 +662,7 @@ void MetaspaceShared::link_shared_classes(bool jcmd_request, TRAPS) {
bool has_linked = false;
for (int i = 0; i < collect_cld.nof_cld(); i++) {
ClassLoaderData* cld = collect_cld.cld_at(i);
for (Klass* klass = cld->klasses(); klass != NULL; klass = klass->next_link()) {
for (Klass* klass = cld->klasses(); klass != nullptr; klass = klass->next_link()) {
if (klass->is_instance_klass()) {
InstanceKlass* ik = InstanceKlass::cast(klass);
if (may_be_eagerly_linked(ik)) {
@ -740,7 +740,7 @@ void MetaspaceShared::get_default_classlist(char* default_classlist, const size_
os::jvm_path(default_classlist, (jint)(buf_size));
for (int i = 0; i < 3; i++) {
char *end = strrchr(default_classlist, *os::file_separator());
if (end != NULL) *end = '\0';
if (end != nullptr) *end = '\0';
}
size_t classlist_path_len = strlen(default_classlist);
if (classlist_path_len >= 3) {
@ -765,7 +765,7 @@ void MetaspaceShared::preload_classes(TRAPS) {
const char* classlist_path;
get_default_classlist(default_classlist, sizeof(default_classlist));
if (SharedClassListFile == NULL) {
if (SharedClassListFile == nullptr) {
classlist_path = default_classlist;
} else {
classlist_path = SharedClassListFile;
@ -836,9 +836,9 @@ bool MetaspaceShared::try_link_class(JavaThread* current, InstanceKlass* ik) {
if (ik->is_loaded() && !ik->is_linked() && ik->can_be_verified_at_dumptime() &&
!SystemDictionaryShared::has_class_failed_verification(ik)) {
bool saved = BytecodeVerificationLocal;
if (ik->is_shared_unregistered_class() && ik->class_loader() == NULL) {
if (ik->is_shared_unregistered_class() && ik->class_loader() == nullptr) {
// The verification decision is based on BytecodeVerificationRemote
// for non-system classes. Since we are using the NULL classloader
// for non-system classes. Since we are using the null classloader
// to load non-system classes for customized class loaders during dumping,
// we need to temporarily change BytecodeVerificationLocal to be the same as
// BytecodeVerificationRemote. Note this can cause the parent system
@ -883,7 +883,7 @@ void VM_PopulateDumpSharedSpace::dump_java_heap_objects(GrowableArray<Klass*>* k
}
}
}
if (_extra_interned_strings != NULL) {
if (_extra_interned_strings != nullptr) {
for (i = 0; i < _extra_interned_strings->length(); i ++) {
OopHandle string = _extra_interned_strings->at(i);
HeapShared::add_to_dumped_interned_strings(string.resolve());
@ -933,7 +933,7 @@ void VM_PopulateDumpSharedSpace::dump_one_heap_bitmap(MemRegion region,
bitmap.write_to(buffer, size_in_bytes);
} else {
size_in_bytes = 0;
buffer = NULL;
buffer = nullptr;
}
log_info(cds, heap)("%s = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region "
@ -970,9 +970,9 @@ void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
FileMapInfo* static_mapinfo = open_static_archive();
FileMapInfo* dynamic_mapinfo = NULL;
FileMapInfo* dynamic_mapinfo = nullptr;
if (static_mapinfo != NULL) {
if (static_mapinfo != nullptr) {
log_info(cds)("Core region alignment: " SIZE_FORMAT, static_mapinfo->core_region_alignment());
dynamic_mapinfo = open_dynamic_archive();
@ -987,7 +987,7 @@ void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
}
if (result == MAP_ARCHIVE_SUCCESS) {
bool dynamic_mapped = (dynamic_mapinfo != NULL && dynamic_mapinfo->is_mapped());
bool dynamic_mapped = (dynamic_mapinfo != nullptr && dynamic_mapinfo->is_mapped());
char* cds_base = static_mapinfo->mapped_base();
char* cds_end = dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end();
set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end);
@ -1001,7 +1001,7 @@ void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
FileMapInfo::set_shared_path_table(static_mapinfo);
}
} else {
set_shared_metaspace_range(NULL, NULL, NULL);
set_shared_metaspace_range(nullptr, nullptr, nullptr);
if (DynamicDumpSharedSpaces) {
warning("-XX:ArchiveClassesAtExit is unsupported when base CDS archive is not loaded. Run with -Xlog:cds for more info.");
}
@ -1015,38 +1015,38 @@ void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
}
}
if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) {
if (static_mapinfo != nullptr && !static_mapinfo->is_mapped()) {
delete static_mapinfo;
}
if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) {
if (dynamic_mapinfo != nullptr && !dynamic_mapinfo->is_mapped()) {
delete dynamic_mapinfo;
}
}
FileMapInfo* MetaspaceShared::open_static_archive() {
const char* static_archive = Arguments::GetSharedArchivePath();
assert(static_archive != nullptr, "SharedArchivePath is NULL");
assert(static_archive != nullptr, "SharedArchivePath is nullptr");
FileMapInfo* mapinfo = new FileMapInfo(static_archive, true);
if (!mapinfo->initialize()) {
delete(mapinfo);
return NULL;
return nullptr;
}
return mapinfo;
}
FileMapInfo* MetaspaceShared::open_dynamic_archive() {
if (DynamicDumpSharedSpaces) {
return NULL;
return nullptr;
}
const char* dynamic_archive = Arguments::GetSharedDynamicArchivePath();
if (dynamic_archive == nullptr) {
return NULL;
return nullptr;
}
FileMapInfo* mapinfo = new FileMapInfo(dynamic_archive, false);
if (!mapinfo->initialize()) {
delete(mapinfo);
return NULL;
return nullptr;
}
return mapinfo;
}
@ -1056,7 +1056,7 @@ FileMapInfo* MetaspaceShared::open_dynamic_archive() {
// false = map at an alternative address picked by OS.
MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo,
bool use_requested_addr) {
if (use_requested_addr && static_mapinfo->requested_base_address() == NULL) {
if (use_requested_addr && static_mapinfo->requested_base_address() == nullptr) {
log_info(cds)("Archive(s) were created with -XX:SharedBaseAddress=0. Always map at os-selected address.");
return MAP_ARCHIVE_MMAP_FAILURE;
}
@ -1074,7 +1074,7 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
return MAP_ARCHIVE_MMAP_FAILURE;
};
if (dynamic_mapinfo != NULL) {
if (dynamic_mapinfo != nullptr) {
// Ensure that the OS won't be able to allocate new memory spaces between the two
// archives, or else it would mess up the simple comparison in MetaspaceObj::is_shared().
assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap");
@ -1088,7 +1088,7 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
total_space_rs,
archive_space_rs,
class_space_rs);
if (mapped_base_address == NULL) {
if (mapped_base_address == nullptr) {
result = MAP_ARCHIVE_MMAP_FAILURE;
log_debug(cds)("Failed to reserve spaces (use_requested_addr=%u)", (unsigned)use_requested_addr);
} else {
@ -1163,7 +1163,7 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
if (dynamic_result == MAP_ARCHIVE_SUCCESS) {
result = MAP_ARCHIVE_SUCCESS;
} else if (dynamic_result == MAP_ARCHIVE_OTHER_FAILURE) {
assert(dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped(), "must have failed");
assert(dynamic_mapinfo != nullptr && !dynamic_mapinfo->is_mapped(), "must have failed");
// No need to retry mapping the dynamic archive again, as it will never succeed
// (bad file, etc) -- just keep the base archive.
log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s",
@ -1268,7 +1268,7 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
// archive space, close enough such that narrow class pointer encoding
// covers both spaces.
// If UseCompressedClassPointers=0, class_space_rs remains unreserved.
// - On error: NULL is returned and the spaces remain unreserved.
// - On error: null is returned and the spaces remain unreserved.
char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo,
FileMapInfo* dynamic_mapinfo,
bool use_archive_base_addr,
@ -1276,16 +1276,16 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
ReservedSpace& archive_space_rs,
ReservedSpace& class_space_rs) {
address const base_address = (address) (use_archive_base_addr ? static_mapinfo->requested_base_address() : NULL);
address const base_address = (address) (use_archive_base_addr ? static_mapinfo->requested_base_address() : nullptr);
const size_t archive_space_alignment = core_region_alignment();
// Size and requested location of the archive_space_rs (for both static and dynamic archives)
assert(static_mapinfo->mapping_base_offset() == 0, "Must be");
size_t archive_end_offset = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset();
size_t archive_end_offset = (dynamic_mapinfo == nullptr) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset();
size_t archive_space_size = align_up(archive_end_offset, archive_space_alignment);
// If a base address is given, it must have valid alignment and be suitable as encoding base.
if (base_address != NULL) {
if (base_address != nullptr) {
assert(is_aligned(base_address, archive_space_alignment),
"Archive base address invalid: " PTR_FORMAT ".", p2i(base_address));
if (Metaspace::using_class_space()) {
@ -1300,13 +1300,13 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
os::vm_page_size(), (char*)base_address);
if (archive_space_rs.is_reserved()) {
assert(base_address == NULL ||
assert(base_address == nullptr ||
(address)archive_space_rs.base() == base_address, "Sanity");
// Register archive space with NMT.
MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared);
return archive_space_rs.base();
}
return NULL;
return nullptr;
}
#ifdef _LP64
@ -1353,7 +1353,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
}
if (!archive_space_rs.is_reserved() || !class_space_rs.is_reserved()) {
release_reserved_spaces(total_space_rs, archive_space_rs, class_space_rs);
return NULL;
return nullptr;
}
} else {
if (use_archive_base_addr && base_address != nullptr) {
@ -1365,11 +1365,11 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
}
if (!total_space_rs.is_reserved()) {
return NULL;
return nullptr;
}
// Paranoid checks:
assert(base_address == NULL || (address)total_space_rs.base() == base_address,
assert(base_address == nullptr || (address)total_space_rs.base() == base_address,
"Sanity (" PTR_FORMAT " vs " PTR_FORMAT ")", p2i(base_address), p2i(total_space_rs.base()));
assert(is_aligned(total_space_rs.base(), archive_space_alignment), "Sanity");
assert(total_space_rs.size() == total_range_size, "Sanity");
@ -1396,7 +1396,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
#else
ShouldNotReachHere();
return NULL;
return nullptr;
#endif
}
@ -1424,7 +1424,7 @@ static int archive_regions_count = 2;
MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) {
assert(UseSharedSpaces, "must be runtime");
if (mapinfo == NULL) {
if (mapinfo == nullptr) {
return MAP_ARCHIVE_SUCCESS; // The dynamic archive has not been specified. No error has happened -- trivially succeeded.
}
@ -1454,7 +1454,7 @@ MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped
void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) {
assert(UseSharedSpaces, "must be runtime");
if (mapinfo != NULL) {
if (mapinfo != nullptr) {
mapinfo->unmap_regions(archive_regions, archive_regions_count);
mapinfo->unmap_region(MetaspaceShared::bm);
mapinfo->set_is_mapped(false);
@ -1503,7 +1503,7 @@ void MetaspaceShared::initialize_shared_spaces() {
static_mapinfo->unmap_region(MetaspaceShared::bm);
FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info();
if (dynamic_mapinfo != NULL) {
if (dynamic_mapinfo != nullptr) {
intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data();
ReadClosure rc(&buffer);
SymbolTable::serialize_shared_table_header(&rc, false);
@ -1541,7 +1541,7 @@ void MetaspaceShared::initialize_shared_spaces() {
tty->print_cr("Number of shared symbols: %d", cl.total());
tty->print_cr("Number of shared strings: %zu", StringTable::shared_entry_count());
tty->print_cr("VM version: %s\r\n", static_mapinfo->vm_version());
if (FileMapInfo::current_info() == NULL || _archive_loading_failed) {
if (FileMapInfo::current_info() == nullptr || _archive_loading_failed) {
tty->print_cr("archive is invalid");
vm_exit(1);
} else {
@ -1561,7 +1561,7 @@ bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
if (!mapinfo->remap_shared_readonly_as_readwrite()) {
return false;
}
if (FileMapInfo::dynamic_info() != NULL) {
if (FileMapInfo::dynamic_info() != nullptr) {
mapinfo = FileMapInfo::dynamic_info();
if (!mapinfo->remap_shared_readonly_as_readwrite()) {
return false;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,7 @@
// the "source:" in the class list file (see classListParser.cpp), and can be a directory or
// a JAR file.
InstanceKlass* UnregisteredClasses::load_class(Symbol* name, const char* path, TRAPS) {
assert(name != NULL, "invariant");
assert(name != nullptr, "invariant");
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
{
@ -77,7 +77,7 @@ class URLClassLoaderTable : public ResourceHashtable<
137, // prime number
AnyObj::C_HEAP> {};
static URLClassLoaderTable* _url_classloader_table = NULL;
static URLClassLoaderTable* _url_classloader_table = nullptr;
Handle UnregisteredClasses::create_url_classloader(Symbol* path, TRAPS) {
ResourceMark rm(THREAD);
@ -101,11 +101,11 @@ Handle UnregisteredClasses::create_url_classloader(Symbol* path, TRAPS) {
}
Handle UnregisteredClasses::get_url_classloader(Symbol* path, TRAPS) {
if (_url_classloader_table == NULL) {
if (_url_classloader_table == nullptr) {
_url_classloader_table = new (mtClass)URLClassLoaderTable();
}
OopHandle* url_classloader_ptr = _url_classloader_table->get(path);
if (url_classloader_ptr != NULL) {
if (url_classloader_ptr != nullptr) {
return Handle(THREAD, (*url_classloader_ptr).resolve());
} else {
Handle url_classloader = create_url_classloader(path, CHECK_NH);