8178499: Remove _ptr_ and _size_ infixes from align functions

Reviewed-by: rehn, tschatzl
This commit is contained in:
Stefan Karlsson 2017-07-04 15:58:10 +02:00
parent 0fcf645cff
commit dbd3b5a79e
115 changed files with 454 additions and 462 deletions

View file

@ -340,7 +340,7 @@ void C1_MacroAssembler::allocate_array(
// Check for negative or excessive length. // Check for negative or excessive length.
size_t max_length = max_array_allocation_length >> log2_elt_size; size_t max_length = max_array_allocation_length >> log2_elt_size;
if (UseTLAB) { if (UseTLAB) {
size_t max_tlab = align_size_up(ThreadLocalAllocBuffer::max_size() >> log2_elt_size, 64*K); size_t max_tlab = align_up(ThreadLocalAllocBuffer::max_size() >> log2_elt_size, 64*K);
if (max_tlab < max_length) { max_length = max_tlab; } if (max_tlab < max_length) { max_length = max_tlab; }
} }
load_const_optimized(t1, max_length); load_const_optimized(t1, max_length);

View file

@ -251,7 +251,7 @@ void Runtime1::initialize_pd() {
fpu_reg_save_offsets[i] = sp_offset; fpu_reg_save_offsets[i] = sp_offset;
sp_offset += BytesPerWord; sp_offset += BytesPerWord;
} }
frame_size_in_bytes = align_size_up(sp_offset, frame::alignment_in_bytes); frame_size_in_bytes = align_up(sp_offset, frame::alignment_in_bytes);
} }
@ -275,7 +275,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
static OopMapSet* generate_exception_throw_with_stack_parms(StubAssembler* sasm, address target, static OopMapSet* generate_exception_throw_with_stack_parms(StubAssembler* sasm, address target,
int stack_parms) { int stack_parms) {
// Make a frame and preserve the caller's caller-save registers. // Make a frame and preserve the caller's caller-save registers.
const int parm_size_in_bytes = align_size_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes); const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord); const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord);
OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes); OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes);
@ -325,7 +325,7 @@ OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, ad
static OopMapSet* stub_call_with_stack_parms(StubAssembler* sasm, Register result, address target, static OopMapSet* stub_call_with_stack_parms(StubAssembler* sasm, Register result, address target,
int stack_parms, bool do_return = true) { int stack_parms, bool do_return = true) {
// Make a frame and preserve the caller's caller-save registers. // Make a frame and preserve the caller's caller-save registers.
const int parm_size_in_bytes = align_size_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes); const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord); const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord);
OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes); OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes);

View file

@ -626,7 +626,7 @@ class StubGenerator: public StubCodeGenerator {
int spill_slots = 3; int spill_slots = 3;
if (preserve1 != noreg) { spill_slots++; } if (preserve1 != noreg) { spill_slots++; }
if (preserve2 != noreg) { spill_slots++; } if (preserve2 != noreg) { spill_slots++; }
const int frame_size = align_size_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes); const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
Label filtered; Label filtered;
// Is marking active? // Is marking active?
@ -687,7 +687,7 @@ class StubGenerator: public StubCodeGenerator {
case BarrierSet::G1SATBCTLogging: case BarrierSet::G1SATBCTLogging:
{ {
int spill_slots = (preserve != noreg) ? 1 : 0; int spill_slots = (preserve != noreg) ? 1 : 0;
const int frame_size = align_size_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes); const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
__ save_LR_CR(R0); __ save_LR_CR(R0);
__ push_frame(frame_size, R0); __ push_frame(frame_size, R0);

View file

@ -697,7 +697,7 @@ void VM_Version::determine_features() {
// Execute code. Illegal instructions will be replaced by 0 in the signal handler. // Execute code. Illegal instructions will be replaced by 0 in the signal handler.
VM_Version::_is_determine_features_test_running = true; VM_Version::_is_determine_features_test_running = true;
// We must align the first argument to 16 bytes because of the lqarx check. // We must align the first argument to 16 bytes because of the lqarx check.
(*test)(align_ptr_up(mid_of_test_area, 16), (uint64_t)0); (*test)(align_up(mid_of_test_area, 16), (uint64_t)0);
VM_Version::_is_determine_features_test_running = false; VM_Version::_is_determine_features_test_running = false;
// determine which instructions are legal. // determine which instructions are legal.

View file

@ -272,7 +272,7 @@ void Runtime1::initialize_pd() {
// this should match assembler::total_frame_size_in_bytes, which // this should match assembler::total_frame_size_in_bytes, which
// isn't callable from this context. It's checked by an assert when // isn't callable from this context. It's checked by an assert when
// it's used though. // it's used though.
frame_size_in_bytes = align_size_up(sp_offset * wordSize, 8); frame_size_in_bytes = align_up(sp_offset * wordSize, 8);
} }

View file

@ -90,7 +90,7 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
} }
TRACE_jvmci_3("relocating at " PTR_FORMAT " (+%d) with destination at %d", p2i(pc), pc_offset, data_offset); TRACE_jvmci_3("relocating at " PTR_FORMAT " (+%d) with destination at %d", p2i(pc), pc_offset, data_offset);
}else { }else {
int const_size = align_size_up(_constants->end()-_constants->start(), CodeEntryAlignment); int const_size = align_up(_constants->end()-_constants->start(), CodeEntryAlignment);
NativeMovRegMem* load = nativeMovRegMem_at(pc); NativeMovRegMem* load = nativeMovRegMem_at(pc);
// This offset must match with SPARCLoadConstantTableBaseOp.emitCode // This offset must match with SPARCLoadConstantTableBaseOp.emitCode
load->set_offset(- (const_size - data_offset + Assembler::min_simm13())); load->set_offset(- (const_size - data_offset + Assembler::min_simm13()));

View file

@ -86,7 +86,7 @@ void memset_with_concurrent_readers(void* to, int value, size_t size) {
void* end = static_cast<char*>(to) + size; void* end = static_cast<char*>(to) + size;
if (size >= (size_t)BytesPerWord) { if (size >= (size_t)BytesPerWord) {
// Fill any partial word prefix. // Fill any partial word prefix.
uintx* aligned_to = static_cast<uintx*>(align_ptr_up(to, BytesPerWord)); uintx* aligned_to = static_cast<uintx*>(align_up(to, BytesPerWord));
fill_subword(to, aligned_to, value); fill_subword(to, aligned_to, value);
// Compute fill word. // Compute fill word.
@ -97,7 +97,7 @@ void memset_with_concurrent_readers(void* to, int value, size_t size) {
xvalue |= (xvalue << 16); xvalue |= (xvalue << 16);
xvalue |= (xvalue << 32); xvalue |= (xvalue << 32);
uintx* aligned_end = static_cast<uintx*>(align_ptr_down(end, BytesPerWord)); uintx* aligned_end = static_cast<uintx*>(align_down(end, BytesPerWord));
assert(aligned_to <= aligned_end, "invariant"); assert(aligned_to <= aligned_end, "invariant");
// for ( ; aligned_to < aligned_end; ++aligned_to) { // for ( ; aligned_to < aligned_end; ++aligned_to) {

View file

@ -43,7 +43,7 @@ class Bytes: AllStatic {
T x; T x;
if (is_ptr_aligned(p, sizeof(T))) { if (is_aligned(p, sizeof(T))) {
x = *(T*)p; x = *(T*)p;
} else { } else {
memcpy(&x, p, sizeof(T)); memcpy(&x, p, sizeof(T));
@ -56,7 +56,7 @@ class Bytes: AllStatic {
static inline void put_native(void* p, T x) { static inline void put_native(void* p, T x) {
assert(p != NULL, "null pointer"); assert(p != NULL, "null pointer");
if (is_ptr_aligned(p, sizeof(T))) { if (is_aligned(p, sizeof(T))) {
*(T*)p = x; *(T*)p = x;
} else { } else {
memcpy(p, &x, sizeof(T)); memcpy(p, &x, sizeof(T));

View file

@ -57,8 +57,8 @@ class InterpreterFrame : public ZeroFrame {
protected: protected:
enum Layout { enum Layout {
istate_off = jf_header_words + istate_off = jf_header_words +
(align_size_up_(sizeof(BytecodeInterpreter), (align_up_(sizeof(BytecodeInterpreter),
wordSize) >> LogBytesPerWord) - 1, wordSize) >> LogBytesPerWord) - 1,
header_words header_words
}; };

View file

@ -148,7 +148,7 @@ IRT_ENTRY(address,
ZeroStack *stack = thread->zero_stack(); ZeroStack *stack = thread->zero_stack();
int required_words = int required_words =
(align_size_up(sizeof(ffi_cif), wordSize) >> LogBytesPerWord) + (align_up(sizeof(ffi_cif), wordSize) >> LogBytesPerWord) +
(method->is_static() ? 2 : 1) + method->size_of_parameters() + 1; (method->is_static() ? 2 : 1) + method->size_of_parameters() + 1;
stack->overflow_check(required_words, CHECK_NULL); stack->overflow_check(required_words, CHECK_NULL);

View file

@ -39,7 +39,7 @@ int ZeroStack::suggest_size(Thread *thread) const {
assert(needs_setup(), "already set up"); assert(needs_setup(), "already set up");
int abi_available = abi_stack_available(thread); int abi_available = abi_stack_available(thread);
assert(abi_available >= 0, "available abi stack must be >= 0"); assert(abi_available >= 0, "available abi stack must be >= 0");
return align_size_down(abi_available / 2, wordSize); return align_down(abi_available / 2, wordSize);
} }
void ZeroStack::handle_overflow(TRAPS) { void ZeroStack::handle_overflow(TRAPS) {

View file

@ -88,7 +88,7 @@ class ZeroStack {
} }
void *alloc(size_t size) { void *alloc(size_t size) {
int count = align_size_up(size, wordSize) >> LogBytesPerWord; int count = align_up(size, wordSize) >> LogBytesPerWord;
assert(count <= available_words(), "stack overflow"); assert(count <= available_words(), "stack overflow");
return _sp -= count; return _sp -= count;
} }

View file

@ -53,7 +53,7 @@ bool MiscUtils::is_readable_pointer(const void* p) {
if (!CanUseSafeFetch32()) { if (!CanUseSafeFetch32()) {
return true; return true;
} }
int* const aligned = (int*) align_ptr_down(p, 4); int* const aligned = (int*) align_down(p, 4);
int cafebabe = 0xcafebabe; int cafebabe = 0xcafebabe;
int deadbeef = 0xdeadbeef; int deadbeef = 0xdeadbeef;
return (SafeFetch32(aligned, cafebabe) != cafebabe) || return (SafeFetch32(aligned, cafebabe) != cafebabe) ||

View file

@ -1936,7 +1936,7 @@ static char* reserve_shmated_memory (
} }
// Align size of shm up to 64K to avoid errors if we later try to change the page size. // Align size of shm up to 64K to avoid errors if we later try to change the page size.
const size_t size = align_size_up(bytes, 64*K); const size_t size = align_up(bytes, 64*K);
// Reserve the shared segment. // Reserve the shared segment.
int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR); int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
@ -2077,7 +2077,7 @@ static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t al
} }
// Size shall always be a multiple of os::vm_page_size (esp. in 64K mode). // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
const size_t size = align_size_up(bytes, os::vm_page_size()); const size_t size = align_up(bytes, os::vm_page_size());
// alignment: Allocate memory large enough to include an aligned range of the right size and // alignment: Allocate memory large enough to include an aligned range of the right size and
// cut off the leading and trailing waste pages. // cut off the leading and trailing waste pages.
@ -2110,7 +2110,7 @@ static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t al
} }
// Handle alignment. // Handle alignment.
char* const addr_aligned = align_ptr_up(addr, alignment_hint); char* const addr_aligned = align_up(addr, alignment_hint);
const size_t waste_pre = addr_aligned - addr; const size_t waste_pre = addr_aligned - addr;
char* const addr_aligned_end = addr_aligned + size; char* const addr_aligned_end = addr_aligned + size;
const size_t waste_post = extra_size - waste_pre - size; const size_t waste_post = extra_size - waste_pre - size;
@ -2336,9 +2336,9 @@ char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment
assert0(requested_addr == NULL); assert0(requested_addr == NULL);
// Always round to os::vm_page_size(), which may be larger than 4K. // Always round to os::vm_page_size(), which may be larger than 4K.
bytes = align_size_up(bytes, os::vm_page_size()); bytes = align_up(bytes, os::vm_page_size());
const size_t alignment_hint0 = const size_t alignment_hint0 =
alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0; alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0;
// In 4K mode always use mmap. // In 4K mode always use mmap.
// In 64K mode allocate small sizes with mmap, large ones with 64K shmatted. // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
@ -2360,8 +2360,8 @@ bool os::pd_release_memory(char* addr, size_t size) {
guarantee0(vmi); guarantee0(vmi);
// Always round to os::vm_page_size(), which may be larger than 4K. // Always round to os::vm_page_size(), which may be larger than 4K.
size = align_size_up(size, os::vm_page_size()); size = align_up(size, os::vm_page_size());
addr = align_ptr_up(addr, os::vm_page_size()); addr = align_up(addr, os::vm_page_size());
bool rc = false; bool rc = false;
bool remove_bookkeeping = false; bool remove_bookkeeping = false;
@ -2527,7 +2527,7 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
char* addr = NULL; char* addr = NULL;
// Always round to os::vm_page_size(), which may be larger than 4K. // Always round to os::vm_page_size(), which may be larger than 4K.
bytes = align_size_up(bytes, os::vm_page_size()); bytes = align_up(bytes, os::vm_page_size());
// In 4K mode always use mmap. // In 4K mode always use mmap.
// In 64K mode allocate small sizes with mmap, large ones with 64K shmatted. // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
@ -4312,7 +4312,7 @@ size_t os::current_stack_size() {
// We need to do this because caller code will assume stack low address is // We need to do this because caller code will assume stack low address is
// page aligned and will place guard pages without checking. // page aligned and will place guard pages without checking.
address low = bounds.base - bounds.size; address low = bounds.base - bounds.size;
address low_aligned = (address)align_ptr_up(low, os::vm_page_size()); address low_aligned = (address)align_up(low, os::vm_page_size());
size_t s = bounds.base - low_aligned; size_t s = bounds.base - low_aligned;
return s; return s;
} }

View file

@ -149,7 +149,7 @@ bool AixSymbols::get_function_name (
codeptr_t pc2 = (codeptr_t) pc; codeptr_t pc2 = (codeptr_t) pc;
// Make sure the pointer is word aligned. // Make sure the pointer is word aligned.
pc2 = (codeptr_t) align_ptr_up((char*)pc2, 4); pc2 = (codeptr_t) align_up((char*)pc2, 4);
CHECK_POINTER_READABLE(pc2) CHECK_POINTER_READABLE(pc2)
// Find start of traceback table. // Find start of traceback table.

View file

@ -2272,7 +2272,7 @@ bool os::pd_release_memory(char* addr, size_t size) {
static bool bsd_mprotect(char* addr, size_t size, int prot) { static bool bsd_mprotect(char* addr, size_t size, int prot) {
// Bsd wants the mprotect address argument to be page aligned. // Bsd wants the mprotect address argument to be page aligned.
char* bottom = (char*)align_size_down((intptr_t)addr, os::Bsd::page_size()); char* bottom = (char*)align_down((intptr_t)addr, os::Bsd::page_size());
// According to SUSv3, mprotect() should only be used with mappings // According to SUSv3, mprotect() should only be used with mappings
// established by mmap(), and mmap() always maps whole pages. Unaligned // established by mmap(), and mmap() always maps whole pages. Unaligned
@ -2281,7 +2281,7 @@ static bool bsd_mprotect(char* addr, size_t size, int prot) {
// caller if you hit this assert. // caller if you hit this assert.
assert(addr == bottom, "sanity check"); assert(addr == bottom, "sanity check");
size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size()); size = align_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size());
return ::mprotect(bottom, size, prot) == 0; return ::mprotect(bottom, size, prot) == 0;
} }

View file

@ -578,7 +578,7 @@ static void NOINLINE _expand_stack_to(address bottom) {
// Adjust bottom to point to the largest address within the same page, it // Adjust bottom to point to the largest address within the same page, it
// gives us a one-page buffer if alloca() allocates slightly more memory. // gives us a one-page buffer if alloca() allocates slightly more memory.
bottom = (address)align_size_down((uintptr_t)bottom, os::Linux::page_size()); bottom = (address)align_down((uintptr_t)bottom, os::Linux::page_size());
bottom += os::Linux::page_size() - 1; bottom += os::Linux::page_size() - 1;
// sp might be slightly above current stack pointer; if that's the case, we // sp might be slightly above current stack pointer; if that's the case, we
@ -715,7 +715,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
if (stack_size <= SIZE_MAX - guard_size) { if (stack_size <= SIZE_MAX - guard_size) {
stack_size += guard_size; stack_size += guard_size;
} }
assert(is_size_aligned(stack_size, os::vm_page_size()), "stack_size not aligned"); assert(is_aligned(stack_size, os::vm_page_size()), "stack_size not aligned");
int status = pthread_attr_setstacksize(&attr, stack_size); int status = pthread_attr_setstacksize(&attr, stack_size);
assert_status(status == 0, status, "pthread_attr_setstacksize"); assert_status(status == 0, status, "pthread_attr_setstacksize");
@ -1101,7 +1101,7 @@ void os::Linux::capture_initial_stack(size_t max_size) {
} }
// stack_top could be partially down the page so align it // stack_top could be partially down the page so align it
stack_top = align_size_up(stack_top, page_size()); stack_top = align_up(stack_top, page_size());
// Allowed stack value is minimum of max_size and what we derived from rlimit // Allowed stack value is minimum of max_size and what we derived from rlimit
if (max_size > 0) { if (max_size > 0) {
@ -1111,7 +1111,7 @@ void os::Linux::capture_initial_stack(size_t max_size) {
// clamp it at 8MB as we do on Solaris // clamp it at 8MB as we do on Solaris
_initial_thread_stack_size = MIN2(stack_size, 8*M); _initial_thread_stack_size = MIN2(stack_size, 8*M);
} }
_initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size()); _initial_thread_stack_size = align_down(_initial_thread_stack_size, page_size());
_initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size; _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!"); assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!");
@ -3170,7 +3170,7 @@ static char* anon_mmap_aligned(size_t bytes, size_t alignment, char* req_addr) {
start = NULL; start = NULL;
} }
} else { } else {
char* const start_aligned = align_ptr_up(start, alignment); char* const start_aligned = align_up(start, alignment);
char* const end_aligned = start_aligned + bytes; char* const end_aligned = start_aligned + bytes;
char* const end = start + extra_size; char* const end = start + extra_size;
if (start_aligned > start) { if (start_aligned > start) {
@ -3200,7 +3200,7 @@ bool os::pd_release_memory(char* addr, size_t size) {
static bool linux_mprotect(char* addr, size_t size, int prot) { static bool linux_mprotect(char* addr, size_t size, int prot) {
// Linux wants the mprotect address argument to be page aligned. // Linux wants the mprotect address argument to be page aligned.
char* bottom = (char*)align_size_down((intptr_t)addr, os::Linux::page_size()); char* bottom = (char*)align_down((intptr_t)addr, os::Linux::page_size());
// According to SUSv3, mprotect() should only be used with mappings // According to SUSv3, mprotect() should only be used with mappings
// established by mmap(), and mmap() always maps whole pages. Unaligned // established by mmap(), and mmap() always maps whole pages. Unaligned
@ -3209,7 +3209,7 @@ static bool linux_mprotect(char* addr, size_t size, int prot) {
// caller if you hit this assert. // caller if you hit this assert.
assert(addr == bottom, "sanity check"); assert(addr == bottom, "sanity check");
size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size()); size = align_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());
return ::mprotect(bottom, size, prot) == 0; return ::mprotect(bottom, size, prot) == 0;
} }
@ -3244,7 +3244,7 @@ bool os::Linux::transparent_huge_pages_sanity_check(bool warn,
MAP_ANONYMOUS|MAP_PRIVATE, MAP_ANONYMOUS|MAP_PRIVATE,
-1, 0); -1, 0);
if (p != MAP_FAILED) { if (p != MAP_FAILED) {
void *aligned_p = align_ptr_up(p, page_size); void *aligned_p = align_up(p, page_size);
result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0; result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
@ -3487,9 +3487,9 @@ void os::large_page_init() {
} while (0) } while (0)
static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) { static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {
assert(is_size_aligned(bytes, alignment), "Must be divisible by the alignment"); assert(is_aligned(bytes, alignment), "Must be divisible by the alignment");
if (!is_size_aligned(alignment, SHMLBA)) { if (!is_aligned(alignment, SHMLBA)) {
assert(false, "Code below assumes that alignment is at least SHMLBA aligned"); assert(false, "Code below assumes that alignment is at least SHMLBA aligned");
return NULL; return NULL;
} }
@ -3525,7 +3525,7 @@ static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {
} }
static char* shmat_at_address(int shmid, char* req_addr) { static char* shmat_at_address(int shmid, char* req_addr) {
if (!is_ptr_aligned(req_addr, SHMLBA)) { if (!is_aligned(req_addr, SHMLBA)) {
assert(false, "Requested address needs to be SHMLBA aligned"); assert(false, "Requested address needs to be SHMLBA aligned");
return NULL; return NULL;
} }
@ -3543,8 +3543,8 @@ static char* shmat_at_address(int shmid, char* req_addr) {
static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) { static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) {
// If a req_addr has been provided, we assume that the caller has already aligned the address. // If a req_addr has been provided, we assume that the caller has already aligned the address.
if (req_addr != NULL) { if (req_addr != NULL) {
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size"); assert(is_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size");
assert(is_ptr_aligned(req_addr, alignment), "Must be divisible by given alignment"); assert(is_aligned(req_addr, alignment), "Must be divisible by given alignment");
return shmat_at_address(shmid, req_addr); return shmat_at_address(shmid, req_addr);
} }
@ -3553,7 +3553,7 @@ static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char*
// However, if the alignment is larger than the large page size, we have // However, if the alignment is larger than the large page size, we have
// to manually ensure that the memory returned is 'alignment' aligned. // to manually ensure that the memory returned is 'alignment' aligned.
if (alignment > os::large_page_size()) { if (alignment > os::large_page_size()) {
assert(is_size_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size"); assert(is_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size");
return shmat_with_alignment(shmid, bytes, alignment); return shmat_with_alignment(shmid, bytes, alignment);
} else { } else {
return shmat_at_address(shmid, NULL); return shmat_at_address(shmid, NULL);
@ -3565,10 +3565,10 @@ char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
// "exec" is passed in but not used. Creating the shared image for // "exec" is passed in but not used. Creating the shared image for
// the code cache doesn't have an SHM_X executable permission to check. // the code cache doesn't have an SHM_X executable permission to check.
assert(UseLargePages && UseSHM, "only for SHM large pages"); assert(UseLargePages && UseSHM, "only for SHM large pages");
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address"); assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address");
assert(is_ptr_aligned(req_addr, alignment), "Unaligned address"); assert(is_aligned(req_addr, alignment), "Unaligned address");
if (!is_size_aligned(bytes, os::large_page_size())) { if (!is_aligned(bytes, os::large_page_size())) {
return NULL; // Fallback to small pages. return NULL; // Fallback to small pages.
} }
@ -3627,8 +3627,8 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes,
char* req_addr, char* req_addr,
bool exec) { bool exec) {
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages"); assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size"); assert(is_aligned(bytes, os::large_page_size()), "Unaligned size");
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address"); assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address");
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
char* addr = (char*)::mmap(req_addr, bytes, prot, char* addr = (char*)::mmap(req_addr, bytes, prot,
@ -3640,7 +3640,7 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes,
return NULL; return NULL;
} }
assert(is_ptr_aligned(addr, os::large_page_size()), "Must be"); assert(is_aligned(addr, os::large_page_size()), "Must be");
return addr; return addr;
} }
@ -3659,8 +3659,8 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes,
size_t large_page_size = os::large_page_size(); size_t large_page_size = os::large_page_size();
assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes"); assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
assert(is_ptr_aligned(req_addr, alignment), "Must be"); assert(is_aligned(req_addr, alignment), "Must be");
assert(is_size_aligned(bytes, alignment), "Must be"); assert(is_aligned(bytes, alignment), "Must be");
// First reserve - but not commit - the address range in small pages. // First reserve - but not commit - the address range in small pages.
char* const start = anon_mmap_aligned(bytes, alignment, req_addr); char* const start = anon_mmap_aligned(bytes, alignment, req_addr);
@ -3669,17 +3669,17 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes,
return NULL; return NULL;
} }
assert(is_ptr_aligned(start, alignment), "Must be"); assert(is_aligned(start, alignment), "Must be");
char* end = start + bytes; char* end = start + bytes;
// Find the regions of the allocated chunk that can be promoted to large pages. // Find the regions of the allocated chunk that can be promoted to large pages.
char* lp_start = align_ptr_up(start, large_page_size); char* lp_start = align_up(start, large_page_size);
char* lp_end = align_ptr_down(end, large_page_size); char* lp_end = align_down(end, large_page_size);
size_t lp_bytes = lp_end - lp_start; size_t lp_bytes = lp_end - lp_start;
assert(is_size_aligned(lp_bytes, large_page_size), "Must be"); assert(is_aligned(lp_bytes, large_page_size), "Must be");
if (lp_bytes == 0) { if (lp_bytes == 0) {
// The mapped region doesn't even span the start and the end of a large page. // The mapped region doesn't even span the start and the end of a large page.
@ -3740,12 +3740,12 @@ char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes,
char* req_addr, char* req_addr,
bool exec) { bool exec) {
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages"); assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
assert(is_ptr_aligned(req_addr, alignment), "Must be"); assert(is_aligned(req_addr, alignment), "Must be");
assert(is_size_aligned(alignment, os::vm_allocation_granularity()), "Must be"); assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be");
assert(is_power_of_2(os::large_page_size()), "Must be"); assert(is_power_of_2(os::large_page_size()), "Must be");
assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes"); assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) { if (is_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec); return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
} else { } else {
return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec); return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
@ -5967,12 +5967,12 @@ class TestReserveMemorySpecial : AllStatic {
for (int i = 0; i < num_sizes; i++) { for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i]; const size_t size = sizes[i];
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) { for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false); char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " -> " PTR_FORMAT " %s", test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " -> " PTR_FORMAT " %s",
size, alignment, p2i(p), (p != NULL ? "" : "(failed)")); size, alignment, p2i(p), (p != NULL ? "" : "(failed)"));
if (p != NULL) { if (p != NULL) {
assert(is_ptr_aligned(p, alignment), "must be"); assert(is_aligned(p, alignment), "must be");
small_page_write(p, size); small_page_write(p, size);
os::Linux::release_memory_special_huge_tlbfs(p, size); os::Linux::release_memory_special_huge_tlbfs(p, size);
} }
@ -5985,8 +5985,8 @@ class TestReserveMemorySpecial : AllStatic {
for (int i = 0; i < num_sizes; i++) { for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i]; const size_t size = sizes[i];
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) { for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* const req_addr = align_ptr_up(mapping1, alignment); char* const req_addr = align_up(mapping1, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false); char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s", test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s",
size, alignment, p2i(req_addr), p2i(p), size, alignment, p2i(req_addr), p2i(p),
@ -6005,8 +6005,8 @@ class TestReserveMemorySpecial : AllStatic {
for (int i = 0; i < num_sizes; i++) { for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i]; const size_t size = sizes[i];
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) { for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* const req_addr = align_ptr_up(mapping2, alignment); char* const req_addr = align_up(mapping2, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false); char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s", test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s",
size, alignment, p2i(req_addr), p2i(p), ((p != NULL ? "" : "(failed)"))); size, alignment, p2i(req_addr), p2i(p), ((p != NULL ? "" : "(failed)")));
@ -6039,8 +6039,8 @@ class TestReserveMemorySpecial : AllStatic {
char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false); char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
if (addr != NULL) { if (addr != NULL) {
assert(is_ptr_aligned(addr, alignment), "Check"); assert(is_aligned(addr, alignment), "Check");
assert(is_ptr_aligned(addr, os::large_page_size()), "Check"); assert(is_aligned(addr, os::large_page_size()), "Check");
small_page_write(addr, size); small_page_write(addr, size);
@ -6053,7 +6053,7 @@ class TestReserveMemorySpecial : AllStatic {
size_t ag = os::vm_allocation_granularity(); size_t ag = os::vm_allocation_granularity();
for (size_t size = ag; size < lp * 3; size += ag) { for (size_t size = ag; size < lp * 3; size += ag) {
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) { for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
test_reserve_memory_special_shm(size, alignment); test_reserve_memory_special_shm(size, alignment);
} }
} }

View file

@ -158,7 +158,7 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment) {
} }
// Do manual alignment // Do manual alignment
char* aligned_base = align_ptr_up(extra_base, alignment); char* aligned_base = align_up(extra_base, alignment);
// [ | | ] // [ | | ]
// ^ extra_base // ^ extra_base
@ -322,7 +322,7 @@ bool os::has_allocatable_memory_limit(julong* limit) {
julong lower_limit = min_allocation_size; julong lower_limit = min_allocation_size;
while ((upper_limit - lower_limit) > min_allocation_size) { while ((upper_limit - lower_limit) > min_allocation_size) {
julong temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit; julong temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit;
temp_limit = align_size_down_(temp_limit, min_allocation_size); temp_limit = align_down_(temp_limit, min_allocation_size);
if (is_allocatable(temp_limit)) { if (is_allocatable(temp_limit)) {
lower_limit = temp_limit; lower_limit = temp_limit;
} else { } else {
@ -1180,7 +1180,7 @@ jint os::Posix::set_minimum_stack_sizes() {
JavaThread::stack_guard_zone_size() + JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size(); JavaThread::stack_shadow_zone_size();
_java_thread_min_stack_allowed = align_size_up(_java_thread_min_stack_allowed, vm_page_size()); _java_thread_min_stack_allowed = align_up(_java_thread_min_stack_allowed, vm_page_size());
_java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, os_min_stack_allowed); _java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, os_min_stack_allowed);
size_t stack_size_in_bytes = ThreadStackSize * K; size_t stack_size_in_bytes = ThreadStackSize * K;
@ -1204,7 +1204,7 @@ jint os::Posix::set_minimum_stack_sizes() {
JavaThread::stack_guard_zone_size() + JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size(); JavaThread::stack_shadow_zone_size();
_compiler_thread_min_stack_allowed = align_size_up(_compiler_thread_min_stack_allowed, vm_page_size()); _compiler_thread_min_stack_allowed = align_up(_compiler_thread_min_stack_allowed, vm_page_size());
_compiler_thread_min_stack_allowed = MAX2(_compiler_thread_min_stack_allowed, os_min_stack_allowed); _compiler_thread_min_stack_allowed = MAX2(_compiler_thread_min_stack_allowed, os_min_stack_allowed);
stack_size_in_bytes = CompilerThreadStackSize * K; stack_size_in_bytes = CompilerThreadStackSize * K;
@ -1216,7 +1216,7 @@ jint os::Posix::set_minimum_stack_sizes() {
return JNI_ERR; return JNI_ERR;
} }
_vm_internal_thread_min_stack_allowed = align_size_up(_vm_internal_thread_min_stack_allowed, vm_page_size()); _vm_internal_thread_min_stack_allowed = align_up(_vm_internal_thread_min_stack_allowed, vm_page_size());
_vm_internal_thread_min_stack_allowed = MAX2(_vm_internal_thread_min_stack_allowed, os_min_stack_allowed); _vm_internal_thread_min_stack_allowed = MAX2(_vm_internal_thread_min_stack_allowed, os_min_stack_allowed);
stack_size_in_bytes = VMThreadStackSize * K; stack_size_in_bytes = VMThreadStackSize * K;
@ -1276,9 +1276,9 @@ size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_s
// pthread_attr_setstacksize() may require that the size be rounded up to the OS page size. // pthread_attr_setstacksize() may require that the size be rounded up to the OS page size.
// Be careful not to round up to 0. Align down in that case. // Be careful not to round up to 0. Align down in that case.
if (stack_size <= SIZE_MAX - vm_page_size()) { if (stack_size <= SIZE_MAX - vm_page_size()) {
stack_size = align_size_up(stack_size, vm_page_size()); stack_size = align_up(stack_size, vm_page_size());
} else { } else {
stack_size = align_size_down(stack_size, vm_page_size()); stack_size = align_down(stack_size, vm_page_size());
} }
return stack_size; return stack_size;

View file

@ -235,7 +235,7 @@ size_t os::current_stack_size() {
} }
// base may not be page aligned // base may not be page aligned
address base = current_stack_base(); address base = current_stack_base();
address bottom = align_ptr_up(base - size, os::vm_page_size());; address bottom = align_up(base - size, os::vm_page_size());;
return (size_t)(base - bottom); return (size_t)(base - bottom);
} }
@ -1122,7 +1122,7 @@ void os::initialize_thread(Thread* thr) {
if (current_size == 0) current_size = 2 * K * K; if (current_size == 0) current_size = 2 * K * K;
stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size; stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
} }
address bottom = align_ptr_up(base - stack_size, os::vm_page_size());; address bottom = align_up(base - stack_size, os::vm_page_size());;
stack_size = (size_t)(base - bottom); stack_size = (size_t)(base - bottom);
assert(stack_size > 0, "Stack size calculation problem"); assert(stack_size > 0, "Stack size calculation problem");
@ -2331,12 +2331,12 @@ void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
} }
size_t os::Solaris::page_size_for_alignment(size_t alignment) { size_t os::Solaris::page_size_for_alignment(size_t alignment) {
assert(is_size_aligned(alignment, (size_t) vm_page_size()), assert(is_aligned(alignment, (size_t) vm_page_size()),
SIZE_FORMAT " is not aligned to " SIZE_FORMAT, SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
alignment, (size_t) vm_page_size()); alignment, (size_t) vm_page_size());
for (int i = 0; _page_sizes[i] != 0; i++) { for (int i = 0; _page_sizes[i] != 0; i++) {
if (is_size_aligned(alignment, _page_sizes[i])) { if (is_aligned(alignment, _page_sizes[i])) {
return _page_sizes[i]; return _page_sizes[i];
} }
} }
@ -2348,7 +2348,7 @@ int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
size_t alignment_hint, bool exec) { size_t alignment_hint, bool exec) {
int err = Solaris::commit_memory_impl(addr, bytes, exec); int err = Solaris::commit_memory_impl(addr, bytes, exec);
if (err == 0 && UseLargePages && alignment_hint > 0) { if (err == 0 && UseLargePages && alignment_hint > 0) {
assert(is_size_aligned(bytes, alignment_hint), assert(is_aligned(bytes, alignment_hint),
SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint); SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint);
// The syscall memcntl requires an exact page size (see man memcntl for details). // The syscall memcntl requires an exact page size (see man memcntl for details).
@ -2765,7 +2765,7 @@ bool os::pd_release_memory(char* addr, size_t bytes) {
} }
static bool solaris_mprotect(char* addr, size_t bytes, int prot) { static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()), assert(addr == (char*)align_down((uintptr_t)addr, os::vm_page_size()),
"addr must be page aligned"); "addr must be page aligned");
int retVal = mprotect(addr, bytes, prot); int retVal = mprotect(addr, bytes, prot);
return retVal == 0; return retVal == 0;
@ -2902,9 +2902,9 @@ bool os::Solaris::is_valid_page_size(size_t bytes) {
bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) { bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
assert(is_valid_page_size(align), SIZE_FORMAT " is not a valid page size", align); assert(is_valid_page_size(align), SIZE_FORMAT " is not a valid page size", align);
assert(is_ptr_aligned((void*) start, align), assert(is_aligned((void*) start, align),
PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align); PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align);
assert(is_size_aligned(bytes, align), assert(is_aligned(bytes, align),
SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align); SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align);
// Signal to OS that we want large pages for addresses // Signal to OS that we want large pages for addresses

View file

@ -2386,7 +2386,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
bool pc_is_near_addr = bool pc_is_near_addr =
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
bool instr_spans_page_boundary = bool instr_spans_page_boundary =
(align_size_down((intptr_t) pc ^ (intptr_t) addr, (align_down((intptr_t) pc ^ (intptr_t) addr,
(intptr_t) page_size) > 0); (intptr_t) page_size) > 0);
if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
@ -2398,7 +2398,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
// Set memory to RWX and retry // Set memory to RWX and retry
address page_start = align_ptr_down(addr, page_size); address page_start = align_down(addr, page_size);
bool res = os::protect_memory((char*) page_start, page_size, bool res = os::protect_memory((char*) page_start, page_size,
os::MEM_PROT_RWX); os::MEM_PROT_RWX);
@ -2775,7 +2775,7 @@ static bool numa_interleaving_init() {
// NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
if (numa_node_list_holder.build()) { if (numa_node_list_holder.build()) {
if (log_is_enabled(Debug, os, cpu)) { if (log_is_enabled(Debug, os, cpu)) {
@ -2832,12 +2832,12 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
// we still need to round up to a page boundary (in case we are using large pages) // we still need to round up to a page boundary (in case we are using large pages)
// but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
// instead we handle this in the bytes_to_rq computation below // instead we handle this in the bytes_to_rq computation below
p_buf = align_ptr_up(p_buf, page_size); p_buf = align_up(p_buf, page_size);
// now go through and allocate one chunk at a time until all bytes are // now go through and allocate one chunk at a time until all bytes are
// allocated // allocated
size_t bytes_remaining = bytes; size_t bytes_remaining = bytes;
// An overflow of align_size_up() would have been caught above // An overflow of align_up() would have been caught above
// in the calculation of size_of_reserve. // in the calculation of size_of_reserve.
char * next_alloc_addr = p_buf; char * next_alloc_addr = p_buf;
HANDLE hProc = GetCurrentProcess(); HANDLE hProc = GetCurrentProcess();
@ -2996,7 +2996,7 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment) {
return NULL; return NULL;
} }
// Do manual alignment // Do manual alignment
aligned_base = align_ptr_up(extra_base, alignment); aligned_base = align_up(extra_base, alignment);
os::release_memory(extra_base, extra_size); os::release_memory(extra_base, extra_size);
@ -3065,7 +3065,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
bool exec) { bool exec) {
assert(UseLargePages, "only for large pages"); assert(UseLargePages, "only for large pages");
if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
return NULL; // Fallback to small pages. return NULL; // Fallback to small pages.
} }
@ -4066,7 +4066,7 @@ jint os::init_2(void) {
JavaThread::stack_shadow_zone_size() + JavaThread::stack_shadow_zone_size() +
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size()); min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
if (actual_reserve_size < min_stack_allowed) { if (actual_reserve_size < min_stack_allowed) {
tty->print_cr("\nThe Java thread stack size specified is too small. " tty->print_cr("\nThe Java thread stack size specified is too small. "

View file

@ -718,7 +718,7 @@ JVM_handle_bsd_signal(int sig,
bool pc_is_near_addr = bool pc_is_near_addr =
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
bool instr_spans_page_boundary = bool instr_spans_page_boundary =
(align_size_down((intptr_t) pc ^ (intptr_t) addr, (align_down((intptr_t) pc ^ (intptr_t) addr,
(intptr_t) page_size) > 0); (intptr_t) page_size) > 0);
if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
@ -730,7 +730,7 @@ JVM_handle_bsd_signal(int sig,
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
// Set memory to RWX and retry // Set memory to RWX and retry
address page_start = align_ptr_down(addr, page_size); address page_start = align_down(addr, page_size);
bool res = os::protect_memory((char*) page_start, page_size, bool res = os::protect_memory((char*) page_start, page_size,
os::MEM_PROT_RWX); os::MEM_PROT_RWX);

View file

@ -534,7 +534,7 @@ JVM_handle_linux_signal(int sig,
bool pc_is_near_addr = bool pc_is_near_addr =
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
bool instr_spans_page_boundary = bool instr_spans_page_boundary =
(align_size_down((intptr_t) pc ^ (intptr_t) addr, (align_down((intptr_t) pc ^ (intptr_t) addr,
(intptr_t) page_size) > 0); (intptr_t) page_size) > 0);
if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
@ -546,7 +546,7 @@ JVM_handle_linux_signal(int sig,
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
// Set memory to RWX and retry // Set memory to RWX and retry
address page_start = align_ptr_down(addr, page_size); address page_start = align_down(addr, page_size);
bool res = os::protect_memory((char*) page_start, page_size, bool res = os::protect_memory((char*) page_start, page_size,
os::MEM_PROT_RWX); os::MEM_PROT_RWX);

View file

@ -350,7 +350,7 @@ static void current_stack_region(address *bottom, size_t *size) {
if (res != 0) { if (res != 0) {
fatal("pthread_attr_getguardsize failed with errno = %d", res); fatal("pthread_attr_getguardsize failed with errno = %d", res);
} }
int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes; int guard_pages = align_up(guard_bytes, page_bytes) / page_bytes;
assert(guard_bytes == guard_pages * page_bytes, "unaligned guard"); assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
#ifdef IA64 #ifdef IA64
@ -361,7 +361,7 @@ static void current_stack_region(address *bottom, size_t *size) {
// there's nothing to stop us allocating more to the normal stack // there's nothing to stop us allocating more to the normal stack
// or more to the register stack if one or the other were found // or more to the register stack if one or the other were found
// to grow faster. // to grow faster.
int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes; int total_pages = align_down(stack_bytes, page_bytes) / page_bytes;
stack_bottom += (total_pages - guard_pages) / 2 * page_bytes; stack_bottom += (total_pages - guard_pages) / 2 * page_bytes;
#endif // IA64 #endif // IA64

View file

@ -635,7 +635,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
bool pc_is_near_addr = bool pc_is_near_addr =
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
bool instr_spans_page_boundary = bool instr_spans_page_boundary =
(align_size_down((intptr_t) pc ^ (intptr_t) addr, (align_down((intptr_t) pc ^ (intptr_t) addr,
(intptr_t) page_size) > 0); (intptr_t) page_size) > 0);
if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
@ -647,7 +647,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
// Make memory rwx and retry // Make memory rwx and retry
address page_start = align_ptr_down(addr, page_size); address page_start = align_down(addr, page_size);
bool res = os::protect_memory((char*) page_start, page_size, bool res = os::protect_memory((char*) page_start, page_size,
os::MEM_PROT_RWX); os::MEM_PROT_RWX);

View file

@ -624,7 +624,7 @@ csize_t CodeBuffer::total_offset_of(const CodeSection* cs) const {
csize_t CodeBuffer::total_relocation_size() const { csize_t CodeBuffer::total_relocation_size() const {
csize_t total = copy_relocations_to(NULL); // dry run only csize_t total = copy_relocations_to(NULL); // dry run only
return (csize_t) align_size_up(total, HeapWordSize); return (csize_t) align_up(total, HeapWordSize);
} }
csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const { csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool only_inst) const {
@ -769,7 +769,7 @@ void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
CodeSection* dest_cs = dest->code_section(n); CodeSection* dest_cs = dest->code_section(n);
assert(cs->size() == dest_cs->size(), "sanity"); assert(cs->size() == dest_cs->size(), "sanity");
csize_t usize = dest_cs->size(); csize_t usize = dest_cs->size();
csize_t wsize = align_size_up(usize, HeapWordSize); csize_t wsize = align_up(usize, HeapWordSize);
assert(dest_cs->start() + wsize <= dest_end, "no overflow"); assert(dest_cs->start() + wsize <= dest_end, "no overflow");
// Copy the code as aligned machine words. // Copy the code as aligned machine words.
// This may also include an uninitialized partial word at the end. // This may also include an uninitialized partial word at the end.

View file

@ -227,7 +227,7 @@ class CodeSection VALUE_OBJ_CLASS_SPEC {
// Slop between sections, used only when allocating temporary BufferBlob buffers. // Slop between sections, used only when allocating temporary BufferBlob buffers.
static csize_t end_slop() { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); } static csize_t end_slop() { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
csize_t align_at_start(csize_t off) const { return (csize_t) align_size_up(off, alignment()); } csize_t align_at_start(csize_t off) const { return (csize_t) align_up(off, alignment()); }
// Mark a section frozen. Assign its remaining space to // Mark a section frozen. Assign its remaining space to
// the following section. It will never expand after this point. // the following section. It will never expand after this point.

View file

@ -3714,7 +3714,7 @@ void ClassFileParser::layout_fields(ConstantPool* cp,
if ( fac->count[STATIC_DOUBLE] && if ( fac->count[STATIC_DOUBLE] &&
(Universe::field_type_should_be_aligned(T_DOUBLE) || (Universe::field_type_should_be_aligned(T_DOUBLE) ||
Universe::field_type_should_be_aligned(T_LONG)) ) { Universe::field_type_should_be_aligned(T_LONG)) ) {
next_static_double_offset = align_size_up(next_static_double_offset, BytesPerLong); next_static_double_offset = align_up(next_static_double_offset, BytesPerLong);
} }
int next_static_word_offset = next_static_double_offset + int next_static_word_offset = next_static_double_offset +
@ -3856,7 +3856,7 @@ void ClassFileParser::layout_fields(ConstantPool* cp,
// long/double alignment. // long/double alignment.
if (nonstatic_double_count > 0) { if (nonstatic_double_count > 0) {
int offset = next_nonstatic_double_offset; int offset = next_nonstatic_double_offset;
next_nonstatic_double_offset = align_size_up(offset, BytesPerLong); next_nonstatic_double_offset = align_up(offset, BytesPerLong);
if (compact_fields && offset != next_nonstatic_double_offset) { if (compact_fields && offset != next_nonstatic_double_offset) {
// Allocate available fields into the gap before double field. // Allocate available fields into the gap before double field.
int length = next_nonstatic_double_offset - offset; int length = next_nonstatic_double_offset - offset;
@ -3906,7 +3906,7 @@ void ClassFileParser::layout_fields(ConstantPool* cp,
if( allocation_style == 1 ) { if( allocation_style == 1 ) {
next_nonstatic_oop_offset = next_nonstatic_padded_offset; next_nonstatic_oop_offset = next_nonstatic_padded_offset;
if( nonstatic_oop_count > 0 ) { if( nonstatic_oop_count > 0 ) {
next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, heapOopSize); next_nonstatic_oop_offset = align_up(next_nonstatic_oop_offset, heapOopSize);
} }
next_nonstatic_padded_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize); next_nonstatic_padded_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
} }
@ -4061,31 +4061,31 @@ void ClassFileParser::layout_fields(ConstantPool* cp,
switch (atype) { switch (atype) {
case NONSTATIC_BYTE: case NONSTATIC_BYTE:
next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, 1); next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, 1);
real_offset = next_nonstatic_padded_offset; real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += 1; next_nonstatic_padded_offset += 1;
break; break;
case NONSTATIC_SHORT: case NONSTATIC_SHORT:
next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerShort); next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerShort);
real_offset = next_nonstatic_padded_offset; real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += BytesPerShort; next_nonstatic_padded_offset += BytesPerShort;
break; break;
case NONSTATIC_WORD: case NONSTATIC_WORD:
next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerInt); next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerInt);
real_offset = next_nonstatic_padded_offset; real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += BytesPerInt; next_nonstatic_padded_offset += BytesPerInt;
break; break;
case NONSTATIC_DOUBLE: case NONSTATIC_DOUBLE:
next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerLong); next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerLong);
real_offset = next_nonstatic_padded_offset; real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += BytesPerLong; next_nonstatic_padded_offset += BytesPerLong;
break; break;
case NONSTATIC_OOP: case NONSTATIC_OOP:
next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, heapOopSize); next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, heapOopSize);
real_offset = next_nonstatic_padded_offset; real_offset = next_nonstatic_padded_offset;
next_nonstatic_padded_offset += heapOopSize; next_nonstatic_padded_offset += heapOopSize;
@ -4147,9 +4147,9 @@ void ClassFileParser::layout_fields(ConstantPool* cp,
int notaligned_nonstatic_fields_end = next_nonstatic_padded_offset; int notaligned_nonstatic_fields_end = next_nonstatic_padded_offset;
int nonstatic_fields_end = align_size_up(notaligned_nonstatic_fields_end, heapOopSize); int nonstatic_fields_end = align_up(notaligned_nonstatic_fields_end, heapOopSize);
int instance_end = align_size_up(notaligned_nonstatic_fields_end, wordSize); int instance_end = align_up(notaligned_nonstatic_fields_end, wordSize);
int static_fields_end = align_size_up(next_static_byte_offset, wordSize); int static_fields_end = align_up(next_static_byte_offset, wordSize);
int static_field_size = (static_fields_end - int static_field_size = (static_fields_end -
InstanceMirrorKlass::offset_of_static_fields()) / wordSize; InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
@ -4158,7 +4158,7 @@ void ClassFileParser::layout_fields(ConstantPool* cp,
int instance_size = align_object_size(instance_end / wordSize); int instance_size = align_object_size(instance_end / wordSize);
assert(instance_size == align_object_size(align_size_up( assert(instance_size == align_object_size(align_up(
(instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize), (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize),
wordSize) / wordSize), "consistent layout helper value"); wordSize) / wordSize), "consistent layout helper value");

View file

@ -3815,7 +3815,7 @@ void JavaClasses::compute_hard_coded_offsets() {
// java_lang_boxing_object // java_lang_boxing_object
java_lang_boxing_object::value_offset = java_lang_boxing_object::hc_value_offset + header; java_lang_boxing_object::value_offset = java_lang_boxing_object::hc_value_offset + header;
java_lang_boxing_object::long_value_offset = align_size_up((java_lang_boxing_object::hc_value_offset + header), BytesPerLong); java_lang_boxing_object::long_value_offset = align_up((java_lang_boxing_object::hc_value_offset + header), BytesPerLong);
// java_lang_ref_Reference: // java_lang_ref_Reference:
java_lang_ref_Reference::referent_offset = java_lang_ref_Reference::hc_referent_offset * x + header; java_lang_ref_Reference::referent_offset = java_lang_ref_Reference::hc_referent_offset * x + header;
@ -3827,7 +3827,7 @@ void JavaClasses::compute_hard_coded_offsets() {
java_lang_ref_Reference::number_of_fake_oop_fields = 1; java_lang_ref_Reference::number_of_fake_oop_fields = 1;
// java_lang_ref_SoftReference Class // java_lang_ref_SoftReference Class
java_lang_ref_SoftReference::timestamp_offset = align_size_up((java_lang_ref_SoftReference::hc_timestamp_offset * x + header), BytesPerLong); java_lang_ref_SoftReference::timestamp_offset = align_up((java_lang_ref_SoftReference::hc_timestamp_offset * x + header), BytesPerLong);
// Don't multiply static fields because they are always in wordSize units // Don't multiply static fields because they are always in wordSize units
java_lang_ref_SoftReference::static_clock_offset = java_lang_ref_SoftReference::hc_static_clock_offset * x; java_lang_ref_SoftReference::static_clock_offset = java_lang_ref_SoftReference::hc_static_clock_offset * x;

View file

@ -281,8 +281,8 @@ void CodeCache::initialize_heaps() {
// Align CodeHeaps // Align CodeHeaps
size_t alignment = heap_alignment(); size_t alignment = heap_alignment();
non_nmethod_size = align_size_up(non_nmethod_size, alignment); non_nmethod_size = align_up(non_nmethod_size, alignment);
profiled_size = align_size_down(profiled_size, alignment); profiled_size = align_down(profiled_size, alignment);
// Reserve one continuous chunk of memory for CodeHeaps and split it into // Reserve one continuous chunk of memory for CodeHeaps and split it into
// parts for the individual heaps. The memory layout looks like this: // parts for the individual heaps. The memory layout looks like this:
@ -322,7 +322,7 @@ ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
os::vm_page_size(); os::vm_page_size();
const size_t granularity = os::vm_allocation_granularity(); const size_t granularity = os::vm_allocation_granularity();
const size_t r_align = MAX2(page_size, granularity); const size_t r_align = MAX2(page_size, granularity);
const size_t r_size = align_size_up(size, r_align); const size_t r_size = align_up(size, r_align);
const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
MAX2(page_size, granularity); MAX2(page_size, granularity);

View file

@ -612,16 +612,16 @@ ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set)
} }
int ImmutableOopMapBuilder::size_for(const OopMap* map) const { int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
return align_size_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8); return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
} }
int ImmutableOopMapBuilder::heap_size() { int ImmutableOopMapBuilder::heap_size() {
int base = sizeof(ImmutableOopMapSet); int base = sizeof(ImmutableOopMapSet);
base = align_size_up(base, 8); base = align_up(base, 8);
// all of ours pc / offset pairs // all of ours pc / offset pairs
int pairs = _set->size() * sizeof(ImmutableOopMapPair); int pairs = _set->size() * sizeof(ImmutableOopMapPair);
pairs = align_size_up(pairs, 8); pairs = align_up(pairs, 8);
for (int i = 0; i < _set->size(); ++i) { for (int i = 0; i < _set->size(); ++i) {
int size = 0; int size = 0;

View file

@ -64,7 +64,7 @@ void CompactibleFreeListSpace::set_cms_values() {
// MinChunkSize should be a multiple of MinObjAlignment and be large enough // MinChunkSize should be a multiple of MinObjAlignment and be large enough
// for chunks to contain a FreeChunk. // for chunks to contain a FreeChunk.
size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes); size_t min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
MinChunkSize = min_chunk_size_in_bytes / BytesPerWord; MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
assert(IndexSetStart == 0 && IndexSetStride == 0, "already set"); assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
@ -2873,8 +2873,7 @@ initialize_sequential_subtasks_for_marking(int n_threads,
if (span.contains(low)) { if (span.contains(low)) {
// Align low down to a card boundary so that // Align low down to a card boundary so that
// we can use block_offset_careful() on span boundaries. // we can use block_offset_careful() on span boundaries.
HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low, HeapWord* aligned_low = align_down(low, CardTableModRefBS::card_size);
CardTableModRefBS::card_size);
// Clip span prefix at aligned_low // Clip span prefix at aligned_low
span = span.intersection(MemRegion(aligned_low, span.end())); span = span.intersection(MemRegion(aligned_low, span.end()));
} else if (low > span.end()) { } else if (low > span.end()) {

View file

@ -3219,7 +3219,7 @@ void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
if (sp->used_region().contains(_restart_addr)) { if (sp->used_region().contains(_restart_addr)) {
// Align down to a card boundary for the start of 0th task // Align down to a card boundary for the start of 0th task
// for this space. // for this space.
aligned_start = align_ptr_down(_restart_addr, CardTableModRefBS::card_size); aligned_start = align_down(_restart_addr, CardTableModRefBS::card_size);
} }
size_t chunk_size = sp->marking_task_size(); size_t chunk_size = sp->marking_task_size();

View file

@ -287,7 +287,7 @@ HeapRegion* OldGCAllocRegion::release() {
// Determine how far we are from the next card boundary. If it is smaller than // Determine how far we are from the next card boundary. If it is smaller than
// the minimum object size we can allocate into, expand into the next card. // the minimum object size we can allocate into, expand into the next card.
HeapWord* top = cur->top(); HeapWord* top = cur->top();
HeapWord* aligned_top = align_ptr_up(top, BOTConstants::N_bytes); HeapWord* aligned_top = align_up(top, BOTConstants::N_bytes);
size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize); size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);

View file

@ -425,7 +425,7 @@ void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
size_t end_alignment_in_bytes) { size_t end_alignment_in_bytes) {
assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(), assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
"alignment " SIZE_FORMAT " too large", end_alignment_in_bytes); "alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize), assert(is_aligned(end_alignment_in_bytes, HeapWordSize),
"alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize); "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
// If we've allocated nothing, simply return. // If we've allocated nothing, simply return.
@ -436,7 +436,7 @@ void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
// If an end alignment was requested, insert filler objects. // If an end alignment was requested, insert filler objects.
if (end_alignment_in_bytes != 0) { if (end_alignment_in_bytes != 0) {
HeapWord* currtop = _allocation_region->top(); HeapWord* currtop = _allocation_region->top();
HeapWord* newtop = align_ptr_up(currtop, end_alignment_in_bytes); HeapWord* newtop = align_up(currtop, end_alignment_in_bytes);
size_t fill_size = pointer_delta(newtop, currtop); size_t fill_size = pointer_delta(newtop, currtop);
if (fill_size != 0) { if (fill_size != 0) {
if (fill_size < CollectedHeap::min_fill_size()) { if (fill_size < CollectedHeap::min_fill_size()) {
@ -445,8 +445,8 @@ void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
// region boundary because the max supported alignment is smaller than the min // region boundary because the max supported alignment is smaller than the min
// region size, and because the allocation code never leaves space smaller than // region size, and because the allocation code never leaves space smaller than
// the min_fill_size at the top of the current allocation region. // the min_fill_size at the top of the current allocation region.
newtop = align_ptr_up(currtop + CollectedHeap::min_fill_size(), newtop = align_up(currtop + CollectedHeap::min_fill_size(),
end_alignment_in_bytes); end_alignment_in_bytes);
fill_size = pointer_delta(newtop, currtop); fill_size = pointer_delta(newtop, currtop);
} }
HeapWord* fill = archive_mem_allocate(fill_size); HeapWord* fill = archive_mem_allocate(fill_size);

View file

@ -131,7 +131,7 @@ private:
void clear_card_bitmap_range(HeapWord* start, HeapWord* end) { void clear_card_bitmap_range(HeapWord* start, HeapWord* end) {
BitMap::idx_t start_idx = card_live_bitmap_index_for(start); BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
BitMap::idx_t end_idx = card_live_bitmap_index_for(align_ptr_up(end, CardTableModRefBS::card_size)); BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTableModRefBS::card_size));
_card_bm.clear_range(start_idx, end_idx); _card_bm.clear_range(start_idx, end_idx);
} }
@ -139,7 +139,7 @@ private:
// Mark the card liveness bitmap for the object spanning from start to end. // Mark the card liveness bitmap for the object spanning from start to end.
void mark_card_bitmap_range(HeapWord* start, HeapWord* end) { void mark_card_bitmap_range(HeapWord* start, HeapWord* end) {
BitMap::idx_t start_idx = card_live_bitmap_index_for(start); BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
BitMap::idx_t end_idx = card_live_bitmap_index_for(align_ptr_up(end, CardTableModRefBS::card_size)); BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTableModRefBS::card_size));
assert((end_idx - start_idx) > 0, "Trying to mark zero sized range."); assert((end_idx - start_idx) > 0, "Trying to mark zero sized range.");
@ -423,7 +423,7 @@ public:
void G1CardLiveData::clear(WorkGang* workers) { void G1CardLiveData::clear(WorkGang* workers) {
guarantee(Universe::is_fully_initialized(), "Should not call this during initialization."); guarantee(Universe::is_fully_initialized(), "Should not call this during initialization.");
size_t const num_chunks = align_size_up(live_cards_bm().size_in_bytes(), G1ClearCardLiveDataTask::chunk_size()) / G1ClearCardLiveDataTask::chunk_size(); size_t const num_chunks = align_up(live_cards_bm().size_in_bytes(), G1ClearCardLiveDataTask::chunk_size()) / G1ClearCardLiveDataTask::chunk_size();
uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
G1ClearCardLiveDataTask cl(live_cards_bm(), num_chunks); G1ClearCardLiveDataTask cl(live_cards_bm(), num_chunks);

View file

@ -372,7 +372,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) { size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size); assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords; return align_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
} }
// If could fit into free regions w/o expansion, try. // If could fit into free regions w/o expansion, try.
@ -1606,7 +1606,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationConte
bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) { bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
aligned_expand_bytes = align_size_up(aligned_expand_bytes, aligned_expand_bytes = align_up(aligned_expand_bytes,
HeapRegion::GrainBytes); HeapRegion::GrainBytes);
log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B", log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
@ -1647,7 +1647,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, do
void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
size_t aligned_shrink_bytes = size_t aligned_shrink_bytes =
ReservedSpace::page_align_size_down(shrink_bytes); ReservedSpace::page_align_size_down(shrink_bytes);
aligned_shrink_bytes = align_size_down(aligned_shrink_bytes, aligned_shrink_bytes = align_down(aligned_shrink_bytes,
HeapRegion::GrainBytes); HeapRegion::GrainBytes);
uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
@ -2435,7 +2435,7 @@ size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
// For G1 TLABs should not contain humongous objects, so the maximum TLAB size // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
// must be equal to the humongous object limit. // must be equal to the humongous object limit.
size_t G1CollectedHeap::max_tlab_size() const { size_t G1CollectedHeap::max_tlab_size() const {
return align_size_down(_humongous_object_threshold_in_words, MinObjAlignment); return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
} }
size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {

View file

@ -71,7 +71,7 @@ G1CMBitMapRO::G1CMBitMapRO(int shifter) :
HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
const HeapWord* limit) const { const HeapWord* limit) const {
// First we must round addr *up* to a possible object boundary. // First we must round addr *up* to a possible object boundary.
addr = align_ptr_up(addr, HeapWordSize << _shifter); addr = align_up(addr, HeapWordSize << _shifter);
size_t addrOffset = heapWordToOffset(addr); size_t addrOffset = heapWordToOffset(addr);
assert(limit != NULL, "limit must not be NULL"); assert(limit != NULL, "limit must not be NULL");
size_t limitOffset = heapWordToOffset(limit); size_t limitOffset = heapWordToOffset(limit);
@ -170,8 +170,8 @@ bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
_max_chunk_capacity = align_size_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
size_t initial_chunk_capacity = align_size_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
guarantee(initial_chunk_capacity <= _max_chunk_capacity, guarantee(initial_chunk_capacity <= _max_chunk_capacity,
"Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT,
@ -714,7 +714,7 @@ void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool
assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());

View file

@ -44,13 +44,13 @@ void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t
vmassert(_low_boundary == NULL, "VirtualSpace already initialized"); vmassert(_low_boundary == NULL, "VirtualSpace already initialized");
vmassert(page_size > 0, "Page size must be non-zero."); vmassert(page_size > 0, "Page size must be non-zero.");
guarantee(is_ptr_aligned(rs.base(), page_size), guarantee(is_aligned(rs.base(), page_size),
"Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size); "Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size);
guarantee(is_size_aligned(used_size, os::vm_page_size()), guarantee(is_aligned(used_size, os::vm_page_size()),
"Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size); "Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size);
guarantee(used_size <= rs.size(), guarantee(used_size <= rs.size(),
"Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size()); "Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size());
guarantee(is_size_aligned(rs.size(), page_size), guarantee(is_aligned(rs.size(), page_size),
"Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size); "Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size);
_low_boundary = rs.base(); _low_boundary = rs.base();
@ -141,7 +141,7 @@ void G1PageBasedVirtualSpace::commit_preferred_pages(size_t start, size_t num_pa
void G1PageBasedVirtualSpace::commit_tail() { void G1PageBasedVirtualSpace::commit_tail() {
vmassert(_tail_size > 0, "The size of the tail area must be > 0 when reaching here"); vmassert(_tail_size > 0, "The size of the tail area must be > 0 when reaching here");
char* const aligned_end_address = align_ptr_down(_high_boundary, _page_size); char* const aligned_end_address = align_down(_high_boundary, _page_size);
os::commit_memory_or_exit(aligned_end_address, _tail_size, os::vm_page_size(), _executable, os::commit_memory_or_exit(aligned_end_address, _tail_size, os::vm_page_size(), _executable,
err_msg("Failed to commit tail area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".", err_msg("Failed to commit tail area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".",
p2i(aligned_end_address), p2i(_high_boundary), _tail_size)); p2i(aligned_end_address), p2i(_high_boundary), _tail_size));

View file

@ -100,7 +100,7 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
// Is the given page index the first after last page? // Is the given page index the first after last page?
bool is_after_last_page(size_t index) const; bool is_after_last_page(size_t index) const;
// Is the last page only partially covered by this space? // Is the last page only partially covered by this space?
bool is_last_page_partial() const { return !is_ptr_aligned(_high_boundary, _page_size); } bool is_last_page_partial() const { return !is_aligned(_high_boundary, _page_size); }
// Returns the end address of the given page bounded by the reserved space. // Returns the end address of the given page bounded by the reserved space.
char* bounded_end_addr(size_t end_page) const; char* bounded_end_addr(size_t end_page) const;

View file

@ -111,7 +111,7 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
_regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() { _regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {
guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
_refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size); _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_up(rs.size(), page_size)), page_size);
} }
virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) { virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {

View file

@ -256,7 +256,7 @@ public:
return; return;
} }
size_t const num_chunks = align_size_up(_cur_dirty_region * HeapRegion::CardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size(); size_t const num_chunks = align_up(_cur_dirty_region * HeapRegion::CardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size();
uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
size_t const chunk_length = G1ClearCardTableTask::chunk_size() / HeapRegion::CardsPerRegion; size_t const chunk_length = G1ClearCardTableTask::chunk_size() / HeapRegion::CardsPerRegion;

View file

@ -79,7 +79,7 @@ class PtrQueue VALUE_OBJ_CLASS_SPEC {
} }
static size_t byte_index_to_index(size_t ind) { static size_t byte_index_to_index(size_t ind) {
assert(is_size_aligned(ind, _element_size), "precondition"); assert(is_aligned(ind, _element_size), "precondition");
return ind / _element_size; return ind / _element_size;
} }

View file

@ -68,7 +68,7 @@ public:
static size_t size() { return sizeof(SparsePRTEntry) + sizeof(card_elem_t) * (cards_num() - card_array_alignment); } static size_t size() { return sizeof(SparsePRTEntry) + sizeof(card_elem_t) * (cards_num() - card_array_alignment); }
// Returns the size of the card array. // Returns the size of the card array.
static int cards_num() { static int cards_num() {
return align_size_up(G1RSetSparseRegionEntries, card_array_alignment); return align_up(G1RSetSparseRegionEntries, card_array_alignment);
} }
// Set the region_ind to the given value, and delete all cards. // Set the region_ind to the given value, and delete all cards.

View file

@ -161,7 +161,7 @@ void AdjoiningGenerations::request_old_gen_expansion(size_t expand_in_bytes) {
const size_t alignment = virtual_spaces()->alignment(); const size_t alignment = virtual_spaces()->alignment();
size_t change_in_bytes = MIN3(young_gen_available, size_t change_in_bytes = MIN3(young_gen_available,
old_gen_available, old_gen_available,
align_size_up_(expand_in_bytes, alignment)); align_up_(expand_in_bytes, alignment));
if (change_in_bytes == 0) { if (change_in_bytes == 0) {
return; return;
@ -203,7 +203,7 @@ bool AdjoiningGenerations::request_young_gen_expansion(size_t expand_in_bytes) {
const size_t alignment = virtual_spaces()->alignment(); const size_t alignment = virtual_spaces()->alignment();
size_t change_in_bytes = MIN3(young_gen_available, size_t change_in_bytes = MIN3(young_gen_available,
old_gen_available, old_gen_available,
align_size_up_(expand_in_bytes, alignment)); align_up_(expand_in_bytes, alignment));
if (change_in_bytes == 0) { if (change_in_bytes == 0) {
return false; return false;

View file

@ -91,7 +91,7 @@ size_t ASPSOldGen::available_for_expansion() {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
size_t result = gen_size_limit() - virtual_space()->committed_size(); size_t result = gen_size_limit() - virtual_space()->committed_size();
size_t result_aligned = align_size_down(result, heap->generation_alignment()); size_t result_aligned = align_down(result, heap->generation_alignment());
return result_aligned; return result_aligned;
} }
@ -106,7 +106,7 @@ size_t ASPSOldGen::available_for_contraction() {
PSAdaptiveSizePolicy* policy = heap->size_policy(); PSAdaptiveSizePolicy* policy = heap->size_policy();
const size_t working_size = const size_t working_size =
used_in_bytes() + (size_t) policy->avg_promoted()->padded_average(); used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
const size_t working_aligned = align_size_up(working_size, gen_alignment); const size_t working_aligned = align_up(working_size, gen_alignment);
const size_t working_or_min = MAX2(working_aligned, min_gen_size()); const size_t working_or_min = MAX2(working_aligned, min_gen_size());
if (working_or_min > reserved().byte_size()) { if (working_or_min > reserved().byte_size()) {
// If the used or minimum gen size (aligned up) is greater // If the used or minimum gen size (aligned up) is greater
@ -124,7 +124,7 @@ size_t ASPSOldGen::available_for_contraction() {
size_t result = policy->promo_increment_aligned_down(max_contraction); size_t result = policy->promo_increment_aligned_down(max_contraction);
// Also adjust for inter-generational alignment // Also adjust for inter-generational alignment
size_t result_aligned = align_size_down(result, gen_alignment); size_t result_aligned = align_down(result, gen_alignment);
Log(gc, ergo) log; Log(gc, ergo) log;
if (log.is_trace()) { if (log.is_trace()) {

View file

@ -75,7 +75,7 @@ size_t ASPSYoungGen::available_for_expansion() {
"generation size limit is wrong"); "generation size limit is wrong");
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
size_t result = gen_size_limit() - current_committed_size; size_t result = gen_size_limit() - current_committed_size;
size_t result_aligned = align_size_down(result, heap->generation_alignment()); size_t result_aligned = align_down(result, heap->generation_alignment());
return result_aligned; return result_aligned;
} }
@ -98,7 +98,7 @@ size_t ASPSYoungGen::available_for_contraction() {
assert(eden_space()->capacity_in_bytes() >= eden_alignment, assert(eden_space()->capacity_in_bytes() >= eden_alignment,
"Alignment is wrong"); "Alignment is wrong");
size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment; size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
eden_avail = align_size_down(eden_avail, gen_alignment); eden_avail = align_down(eden_avail, gen_alignment);
assert(virtual_space()->committed_size() >= min_gen_size(), assert(virtual_space()->committed_size() >= min_gen_size(),
"minimum gen size is wrong"); "minimum gen size is wrong");
@ -110,7 +110,7 @@ size_t ASPSYoungGen::available_for_contraction() {
// for reasons the "increment" fraction is used. // for reasons the "increment" fraction is used.
PSAdaptiveSizePolicy* policy = heap->size_policy(); PSAdaptiveSizePolicy* policy = heap->size_policy();
size_t result = policy->eden_increment_aligned_down(max_contraction); size_t result = policy->eden_increment_aligned_down(max_contraction);
size_t result_aligned = align_size_down(result, gen_alignment); size_t result_aligned = align_down(result, gen_alignment);
log_trace(gc, ergo)("ASPSYoungGen::available_for_contraction: " SIZE_FORMAT " K", result_aligned/K); log_trace(gc, ergo)("ASPSYoungGen::available_for_contraction: " SIZE_FORMAT " K", result_aligned/K);
log_trace(gc, ergo)(" max_contraction " SIZE_FORMAT " K", max_contraction/K); log_trace(gc, ergo)(" max_contraction " SIZE_FORMAT " K", max_contraction/K);
@ -166,7 +166,7 @@ bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
// Adjust new generation size // Adjust new generation size
const size_t eden_plus_survivors = const size_t eden_plus_survivors =
align_size_up(eden_size + 2 * survivor_size, alignment); align_up(eden_size + 2 * survivor_size, alignment);
size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()), size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()),
min_gen_size()); min_gen_size());
assert(desired_size <= gen_size_limit(), "just checking"); assert(desired_size <= gen_size_limit(), "just checking");
@ -332,7 +332,7 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
if (from_size == 0) { if (from_size == 0) {
from_size = alignment; from_size = alignment;
} else { } else {
from_size = align_size_up(from_size, alignment); from_size = align_up(from_size, alignment);
} }
from_end = from_start + from_size; from_end = from_start + from_size;
@ -419,9 +419,9 @@ void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
"from start moved to the right"); "from start moved to the right");
guarantee((HeapWord*)from_end >= from_space()->top(), guarantee((HeapWord*)from_end >= from_space()->top(),
"from end moved into live data"); "from end moved into live data");
assert(is_ptr_object_aligned(eden_start), "checking alignment"); assert(is_object_aligned(eden_start), "checking alignment");
assert(is_ptr_object_aligned(from_start), "checking alignment"); assert(is_object_aligned(from_start), "checking alignment");
assert(is_ptr_object_aligned(to_start), "checking alignment"); assert(is_object_aligned(to_start), "checking alignment");
MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end); MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);

View file

@ -504,14 +504,14 @@ bool CardTableExtension::resize_commit_uncommit(int changed_region,
} }
#ifdef ASSERT #ifdef ASSERT
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
assert(cur_committed.start() == align_ptr_up(cur_committed.start(), os::vm_page_size()), assert(cur_committed.start() == align_up(cur_committed.start(), os::vm_page_size()),
"Starts should have proper alignment"); "Starts should have proper alignment");
#endif #endif
jbyte* new_start = byte_for(new_region.start()); jbyte* new_start = byte_for(new_region.start());
// Round down because this is for the start address // Round down because this is for the start address
HeapWord* new_start_aligned = HeapWord* new_start_aligned =
(HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size()); (HeapWord*)align_down((uintptr_t)new_start, os::vm_page_size());
// The guard page is always committed and should not be committed over. // The guard page is always committed and should not be committed over.
// This method is used in cases where the generation is growing toward // This method is used in cases where the generation is growing toward
// lower addresses but the guard region is still at the end of the // lower addresses but the guard region is still at the end of the
@ -584,7 +584,7 @@ void CardTableExtension::resize_update_committed_table(int changed_region,
jbyte* new_start = byte_for(new_region.start()); jbyte* new_start = byte_for(new_region.start());
// Set the new start of the committed region // Set the new start of the committed region
HeapWord* new_start_aligned = HeapWord* new_start_aligned =
(HeapWord*)align_ptr_down(new_start, os::vm_page_size()); (HeapWord*)align_down(new_start, os::vm_page_size());
MemRegion new_committed = MemRegion(new_start_aligned, MemRegion new_committed = MemRegion(new_start_aligned,
_committed[changed_region].end()); _committed[changed_region].end());
_committed[changed_region] = new_committed; _committed[changed_region] = new_committed;

View file

@ -177,7 +177,7 @@ HeapWord* MutableSpace::allocate(size_t size) {
if (pointer_delta(end(), obj) >= size) { if (pointer_delta(end(), obj) >= size) {
HeapWord* new_top = obj + size; HeapWord* new_top = obj + size;
set_top(new_top); set_top(new_top);
assert(is_ptr_object_aligned(obj) && is_ptr_object_aligned(new_top), assert(is_object_aligned(obj) && is_object_aligned(new_top),
"checking alignment"); "checking alignment");
return obj; return obj;
} else { } else {
@ -198,7 +198,7 @@ HeapWord* MutableSpace::cas_allocate(size_t size) {
if (result != obj) { if (result != obj) {
continue; // another thread beat us to the allocation, try again continue; // another thread beat us to the allocation, try again
} }
assert(is_ptr_object_aligned(obj) && is_ptr_object_aligned(new_top), assert(is_object_aligned(obj) && is_object_aligned(new_top),
"checking alignment"); "checking alignment");
return obj; return obj;
} else { } else {

View file

@ -43,7 +43,7 @@ void ObjectStartArray::initialize(MemRegion reserved_region) {
assert(bytes_to_reserve > 0, "Sanity"); assert(bytes_to_reserve > 0, "Sanity");
bytes_to_reserve = bytes_to_reserve =
align_size_up(bytes_to_reserve, os::vm_allocation_granularity()); align_up(bytes_to_reserve, os::vm_allocation_granularity());
// Do not use large-pages for the backing store. The one large page region // Do not use large-pages for the backing store. The one large page region
// will be used for the heap proper. // will be used for the heap proper.
@ -89,7 +89,7 @@ void ObjectStartArray::set_covered_region(MemRegion mr) {
// Only commit memory in page sized chunks // Only commit memory in page sized chunks
requested_blocks_size_in_bytes = requested_blocks_size_in_bytes =
align_size_up(requested_blocks_size_in_bytes, os::vm_page_size()); align_up(requested_blocks_size_in_bytes, os::vm_page_size());
_covered_region = mr; _covered_region = mr;

View file

@ -44,7 +44,7 @@ ParMarkBitMap::initialize(MemRegion covered_region)
const size_t raw_bytes = words * sizeof(idx_t); const size_t raw_bytes = words * sizeof(idx_t);
const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10); const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
const size_t granularity = os::vm_allocation_granularity(); const size_t granularity = os::vm_allocation_granularity();
_reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity)); _reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
MAX2(page_sz, granularity); MAX2(page_sz, granularity);

View file

@ -370,10 +370,10 @@ void PSAdaptiveSizePolicy::compute_eden_space_size(
} }
// Align everything and make a final limit check // Align everything and make a final limit check
desired_eden_size = align_size_up(desired_eden_size, _space_alignment); desired_eden_size = align_up(desired_eden_size, _space_alignment);
desired_eden_size = MAX2(desired_eden_size, _space_alignment); desired_eden_size = MAX2(desired_eden_size, _space_alignment);
eden_limit = align_size_down(eden_limit, _space_alignment); eden_limit = align_down(eden_limit, _space_alignment);
// And one last limit check, now that we've aligned things. // And one last limit check, now that we've aligned things.
if (desired_eden_size > eden_limit) { if (desired_eden_size > eden_limit) {
@ -547,10 +547,10 @@ void PSAdaptiveSizePolicy::compute_old_gen_free_space(
} }
// Align everything and make a final limit check // Align everything and make a final limit check
desired_promo_size = align_size_up(desired_promo_size, _space_alignment); desired_promo_size = align_up(desired_promo_size, _space_alignment);
desired_promo_size = MAX2(desired_promo_size, _space_alignment); desired_promo_size = MAX2(desired_promo_size, _space_alignment);
promo_limit = align_size_down(promo_limit, _space_alignment); promo_limit = align_down(promo_limit, _space_alignment);
// And one last limit check, now that we've aligned things. // And one last limit check, now that we've aligned things.
desired_promo_size = MIN2(desired_promo_size, promo_limit); desired_promo_size = MIN2(desired_promo_size, promo_limit);
@ -925,24 +925,24 @@ size_t PSAdaptiveSizePolicy::eden_increment(size_t cur_eden) {
size_t PSAdaptiveSizePolicy::eden_increment_aligned_up(size_t cur_eden) { size_t PSAdaptiveSizePolicy::eden_increment_aligned_up(size_t cur_eden) {
size_t result = eden_increment(cur_eden, YoungGenerationSizeIncrement); size_t result = eden_increment(cur_eden, YoungGenerationSizeIncrement);
return align_size_up(result, _space_alignment); return align_up(result, _space_alignment);
} }
size_t PSAdaptiveSizePolicy::eden_increment_aligned_down(size_t cur_eden) { size_t PSAdaptiveSizePolicy::eden_increment_aligned_down(size_t cur_eden) {
size_t result = eden_increment(cur_eden); size_t result = eden_increment(cur_eden);
return align_size_down(result, _space_alignment); return align_down(result, _space_alignment);
} }
size_t PSAdaptiveSizePolicy::eden_increment_with_supplement_aligned_up( size_t PSAdaptiveSizePolicy::eden_increment_with_supplement_aligned_up(
size_t cur_eden) { size_t cur_eden) {
size_t result = eden_increment(cur_eden, size_t result = eden_increment(cur_eden,
YoungGenerationSizeIncrement + _young_gen_size_increment_supplement); YoungGenerationSizeIncrement + _young_gen_size_increment_supplement);
return align_size_up(result, _space_alignment); return align_up(result, _space_alignment);
} }
size_t PSAdaptiveSizePolicy::eden_decrement_aligned_down(size_t cur_eden) { size_t PSAdaptiveSizePolicy::eden_decrement_aligned_down(size_t cur_eden) {
size_t eden_heap_delta = eden_decrement(cur_eden); size_t eden_heap_delta = eden_decrement(cur_eden);
return align_size_down(eden_heap_delta, _space_alignment); return align_down(eden_heap_delta, _space_alignment);
} }
size_t PSAdaptiveSizePolicy::eden_decrement(size_t cur_eden) { size_t PSAdaptiveSizePolicy::eden_decrement(size_t cur_eden) {
@ -964,24 +964,24 @@ size_t PSAdaptiveSizePolicy::promo_increment(size_t cur_promo) {
size_t PSAdaptiveSizePolicy::promo_increment_aligned_up(size_t cur_promo) { size_t PSAdaptiveSizePolicy::promo_increment_aligned_up(size_t cur_promo) {
size_t result = promo_increment(cur_promo, TenuredGenerationSizeIncrement); size_t result = promo_increment(cur_promo, TenuredGenerationSizeIncrement);
return align_size_up(result, _space_alignment); return align_up(result, _space_alignment);
} }
size_t PSAdaptiveSizePolicy::promo_increment_aligned_down(size_t cur_promo) { size_t PSAdaptiveSizePolicy::promo_increment_aligned_down(size_t cur_promo) {
size_t result = promo_increment(cur_promo, TenuredGenerationSizeIncrement); size_t result = promo_increment(cur_promo, TenuredGenerationSizeIncrement);
return align_size_down(result, _space_alignment); return align_down(result, _space_alignment);
} }
size_t PSAdaptiveSizePolicy::promo_increment_with_supplement_aligned_up( size_t PSAdaptiveSizePolicy::promo_increment_with_supplement_aligned_up(
size_t cur_promo) { size_t cur_promo) {
size_t result = promo_increment(cur_promo, size_t result = promo_increment(cur_promo,
TenuredGenerationSizeIncrement + _old_gen_size_increment_supplement); TenuredGenerationSizeIncrement + _old_gen_size_increment_supplement);
return align_size_up(result, _space_alignment); return align_up(result, _space_alignment);
} }
size_t PSAdaptiveSizePolicy::promo_decrement_aligned_down(size_t cur_promo) { size_t PSAdaptiveSizePolicy::promo_decrement_aligned_down(size_t cur_promo) {
size_t promo_heap_delta = promo_decrement(cur_promo); size_t promo_heap_delta = promo_decrement(cur_promo);
return align_size_down(promo_heap_delta, _space_alignment); return align_down(promo_heap_delta, _space_alignment);
} }
size_t PSAdaptiveSizePolicy::promo_decrement(size_t cur_promo) { size_t PSAdaptiveSizePolicy::promo_decrement(size_t cur_promo) {
@ -996,7 +996,7 @@ uint PSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
size_t survivor_limit) { size_t survivor_limit) {
assert(survivor_limit >= _space_alignment, assert(survivor_limit >= _space_alignment,
"survivor_limit too small"); "survivor_limit too small");
assert(is_size_aligned(survivor_limit, _space_alignment), assert(is_aligned(survivor_limit, _space_alignment),
"survivor_limit not aligned"); "survivor_limit not aligned");
// This method is called even if the tenuring threshold and survivor // This method is called even if the tenuring threshold and survivor
@ -1059,7 +1059,7 @@ uint PSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
// we use this to see how good of an estimate we have of what survived. // we use this to see how good of an estimate we have of what survived.
// We're trying to pad the survivor size as little as possible without // We're trying to pad the survivor size as little as possible without
// overflowing the survivor spaces. // overflowing the survivor spaces.
size_t target_size = align_size_up((size_t)_avg_survived->padded_average(), size_t target_size = align_up((size_t)_avg_survived->padded_average(),
_space_alignment); _space_alignment);
target_size = MAX2(target_size, _space_alignment); target_size = MAX2(target_size, _space_alignment);

View file

@ -374,7 +374,7 @@ class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
// large filler object at the bottom). // large filler object at the bottom).
const size_t sz = gen_size / MinSurvivorRatio; const size_t sz = gen_size / MinSurvivorRatio;
const size_t alignment = _space_alignment; const size_t alignment = _space_alignment;
return sz > alignment ? align_size_down(sz, alignment) : alignment; return sz > alignment ? align_down(sz, alignment) : alignment;
} }
size_t live_at_last_full_gc() { size_t live_at_last_full_gc() {

View file

@ -410,7 +410,7 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
const size_t alignment = old_gen->virtual_space()->alignment(); const size_t alignment = old_gen->virtual_space()->alignment();
const size_t eden_used = eden_space->used_in_bytes(); const size_t eden_used = eden_space->used_in_bytes();
const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
const size_t absorb_size = align_size_up(eden_used + promoted, alignment); const size_t absorb_size = align_up(eden_used + promoted, alignment);
const size_t eden_capacity = eden_space->capacity_in_bytes(); const size_t eden_capacity = eden_space->capacity_in_bytes();
if (absorb_size >= eden_capacity) { if (absorb_size >= eden_capacity) {

View file

@ -229,8 +229,8 @@ void PSOldGen::expand(size_t bytes) {
} }
MutexLocker x(ExpandHeap_lock); MutexLocker x(ExpandHeap_lock);
const size_t alignment = virtual_space()->alignment(); const size_t alignment = virtual_space()->alignment();
size_t aligned_bytes = align_size_up(bytes, alignment); size_t aligned_bytes = align_up(bytes, alignment);
size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment); size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment);
if (UseNUMA) { if (UseNUMA) {
// With NUMA we use round-robin page allocation for the old gen. Expand by at least // With NUMA we use round-robin page allocation for the old gen. Expand by at least
@ -244,7 +244,7 @@ void PSOldGen::expand(size_t bytes) {
// but not a guarantee. Align down to give a best effort. This is likely // but not a guarantee. Align down to give a best effort. This is likely
// the most that the generation can expand since it has some capacity to // the most that the generation can expand since it has some capacity to
// start with. // start with.
aligned_bytes = align_size_down(bytes, alignment); aligned_bytes = align_down(bytes, alignment);
} }
bool success = false; bool success = false;
@ -318,7 +318,7 @@ void PSOldGen::shrink(size_t bytes) {
assert_lock_strong(ExpandHeap_lock); assert_lock_strong(ExpandHeap_lock);
assert_locked_or_safepoint(Heap_lock); assert_locked_or_safepoint(Heap_lock);
size_t size = align_size_down(bytes, virtual_space()->alignment()); size_t size = align_down(bytes, virtual_space()->alignment());
if (size > 0) { if (size > 0) {
assert_lock_strong(ExpandHeap_lock); assert_lock_strong(ExpandHeap_lock);
virtual_space()->shrink_by(bytes); virtual_space()->shrink_by(bytes);
@ -343,7 +343,7 @@ void PSOldGen::resize(size_t desired_free_space) {
new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size()); new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?"); assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
new_size = align_size_up(new_size, alignment); new_size = align_up(new_size, alignment);
const size_t current_size = capacity_in_bytes(); const size_t current_size = capacity_in_bytes();

View file

@ -432,7 +432,7 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)
const size_t raw_bytes = count * element_size; const size_t raw_bytes = count * element_size;
const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10); const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
const size_t granularity = os::vm_allocation_granularity(); const size_t granularity = os::vm_allocation_granularity();
_reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity)); _reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
MAX2(page_sz, granularity); MAX2(page_sz, granularity);
@ -1984,7 +1984,7 @@ bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_po
const size_t alignment = old_gen->virtual_space()->alignment(); const size_t alignment = old_gen->virtual_space()->alignment();
const size_t eden_used = eden_space->used_in_bytes(); const size_t eden_used = eden_space->used_in_bytes();
const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
const size_t absorb_size = align_size_up(eden_used + promoted, alignment); const size_t absorb_size = align_up(eden_used + promoted, alignment);
const size_t eden_capacity = eden_space->capacity_in_bytes(); const size_t eden_capacity = eden_space->capacity_in_bytes();
if (absorb_size >= eden_capacity) { if (absorb_size >= eden_capacity) {

View file

@ -88,7 +88,7 @@ inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) {
inline void PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr) { inline void PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr) {
assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr), assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
"must move left or to a different space"); "must move left or to a different space");
assert(is_ptr_object_aligned(old_addr) && is_ptr_object_aligned(new_addr), assert(is_object_aligned(old_addr) && is_object_aligned(new_addr),
"checking alignment"); "checking alignment");
} }
#endif // ASSERT #endif // ASSERT

View file

@ -122,7 +122,7 @@ class PSOldPromotionLAB : public PSPromotionLAB {
// The 'new_top>obj' check is needed to detect overflow of obj+size. // The 'new_top>obj' check is needed to detect overflow of obj+size.
if (new_top > obj && new_top <= end()) { if (new_top > obj && new_top <= end()) {
set_top(new_top); set_top(new_top);
assert(is_ptr_object_aligned(obj) && is_ptr_object_aligned(new_top), assert(is_object_aligned(obj) && is_object_aligned(new_top),
"checking alignment"); "checking alignment");
_start_array->allocate_block(obj); _start_array->allocate_block(obj);
return obj; return obj;

View file

@ -40,7 +40,7 @@ HeapWord* PSYoungPromotionLAB::allocate(size_t size) {
// The 'new_top>obj' check is needed to detect overflow of obj+size. // The 'new_top>obj' check is needed to detect overflow of obj+size.
if (new_top > obj && new_top <= end()) { if (new_top > obj && new_top <= end()) {
set_top(new_top); set_top(new_top);
assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_ptr_object_aligned(new_top), assert(is_aligned(obj, SurvivorAlignmentInBytes) && is_object_aligned(new_top),
"checking alignment"); "checking alignment");
return obj; return obj;
} else { } else {

View file

@ -116,7 +116,7 @@ void PSYoungGen::initialize_work() {
// round the survivor space size down to the nearest alignment // round the survivor space size down to the nearest alignment
// and make sure its size is greater than 0. // and make sure its size is greater than 0.
max_survivor_size = align_size_down(max_survivor_size, alignment); max_survivor_size = align_down(max_survivor_size, alignment);
max_survivor_size = MAX2(max_survivor_size, alignment); max_survivor_size = MAX2(max_survivor_size, alignment);
// set the maximum size of eden to be the size of the young gen // set the maximum size of eden to be the size of the young gen
@ -128,7 +128,7 @@ void PSYoungGen::initialize_work() {
// round the survivor space size down to the nearest alignment // round the survivor space size down to the nearest alignment
// and make sure its size is greater than 0. // and make sure its size is greater than 0.
max_survivor_size = align_size_down(max_survivor_size, alignment); max_survivor_size = align_down(max_survivor_size, alignment);
max_survivor_size = MAX2(max_survivor_size, alignment); max_survivor_size = MAX2(max_survivor_size, alignment);
// set the maximum size of eden to be the size of the young gen // set the maximum size of eden to be the size of the young gen
@ -162,7 +162,7 @@ void PSYoungGen::compute_initial_space_boundaries() {
assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors"); assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors");
size_t survivor_size = size / InitialSurvivorRatio; size_t survivor_size = size / InitialSurvivorRatio;
survivor_size = align_size_down(survivor_size, alignment); survivor_size = align_down(survivor_size, alignment);
// ... but never less than an alignment // ... but never less than an alignment
survivor_size = MAX2(survivor_size, alignment); survivor_size = MAX2(survivor_size, alignment);
@ -193,9 +193,9 @@ void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
char *from_end = from_start + survivor_size; char *from_end = from_start + survivor_size;
assert(from_end == virtual_space()->high(), "just checking"); assert(from_end == virtual_space()->high(), "just checking");
assert(is_ptr_object_aligned(eden_start), "checking alignment"); assert(is_object_aligned(eden_start), "checking alignment");
assert(is_ptr_object_aligned(to_start), "checking alignment"); assert(is_object_aligned(to_start), "checking alignment");
assert(is_ptr_object_aligned(from_start), "checking alignment"); assert(is_object_aligned(from_start), "checking alignment");
MemRegion eden_mr((HeapWord*)eden_start, (HeapWord*)to_start); MemRegion eden_mr((HeapWord*)eden_start, (HeapWord*)to_start);
MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start); MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start);
@ -294,7 +294,7 @@ bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
// Adjust new generation size // Adjust new generation size
const size_t eden_plus_survivors = const size_t eden_plus_survivors =
align_size_up(eden_size + 2 * survivor_size, alignment); align_up(eden_size + 2 * survivor_size, alignment);
size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()), size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()),
min_gen_size()); min_gen_size());
assert(desired_size <= max_size(), "just checking"); assert(desired_size <= max_size(), "just checking");
@ -528,7 +528,7 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
if (from_size == 0) { if (from_size == 0) {
from_size = alignment; from_size = alignment;
} else { } else {
from_size = align_size_up(from_size, alignment); from_size = align_up(from_size, alignment);
} }
from_end = from_start + from_size; from_end = from_start + from_size;
@ -611,9 +611,9 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
"from start moved to the right"); "from start moved to the right");
guarantee((HeapWord*)from_end >= from_space()->top(), guarantee((HeapWord*)from_end >= from_space()->top(),
"from end moved into live data"); "from end moved into live data");
assert(is_ptr_object_aligned(eden_start), "checking alignment"); assert(is_object_aligned(eden_start), "checking alignment");
assert(is_ptr_object_aligned(from_start), "checking alignment"); assert(is_object_aligned(from_start), "checking alignment");
assert(is_ptr_object_aligned(to_start), "checking alignment"); assert(is_object_aligned(to_start), "checking alignment");
MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end); MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
@ -815,7 +815,7 @@ size_t PSYoungGen::available_to_live() {
} }
size_t delta_in_bytes = unused_committed + delta_in_survivor; size_t delta_in_bytes = unused_committed + delta_in_survivor;
delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment); delta_in_bytes = align_down(delta_in_bytes, gen_alignment);
return delta_in_bytes; return delta_in_bytes;
} }
@ -828,7 +828,7 @@ size_t PSYoungGen::limit_gen_shrink(size_t bytes) {
// Allow shrinkage into the current eden but keep eden large enough // Allow shrinkage into the current eden but keep eden large enough
// to maintain the minimum young gen size // to maintain the minimum young gen size
bytes = MIN3(bytes, available_to_min_gen(), available_to_live()); bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
return align_size_down(bytes, virtual_space()->alignment()); return align_down(bytes, virtual_space()->alignment());
} }
void PSYoungGen::reset_after_change() { void PSYoungGen::reset_after_change() {

View file

@ -257,10 +257,10 @@ void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
if (eden_size < minimum_eden_size) { if (eden_size < minimum_eden_size) {
// May happen due to 64Kb rounding, if so adjust eden size back up // May happen due to 64Kb rounding, if so adjust eden size back up
minimum_eden_size = align_size_up(minimum_eden_size, alignment); minimum_eden_size = align_up(minimum_eden_size, alignment);
uintx maximum_survivor_size = (size - minimum_eden_size) / 2; uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
uintx unaligned_survivor_size = uintx unaligned_survivor_size =
align_size_down(maximum_survivor_size, alignment); align_down(maximum_survivor_size, alignment);
survivor_size = MAX2(unaligned_survivor_size, alignment); survivor_size = MAX2(unaligned_survivor_size, alignment);
eden_size = size - (2*survivor_size); eden_size = size - (2*survivor_size);
assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
@ -386,10 +386,10 @@ size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
if (new_size_candidate <= max_uintx - thread_increase_size) { if (new_size_candidate <= max_uintx - thread_increase_size) {
new_size_candidate += thread_increase_size; new_size_candidate += thread_increase_size;
// 3. Check an overflow at 'align_size_up'. // 3. Check an overflow at 'align_up'.
size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1)); size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
if (new_size_candidate <= aligned_max) { if (new_size_candidate <= aligned_max) {
desired_new_size = align_size_up(new_size_candidate, alignment); desired_new_size = align_up(new_size_candidate, alignment);
} }
} }
} }

View file

@ -143,7 +143,7 @@ protected:
// gen_size. // gen_size.
size_t compute_survivor_size(size_t gen_size, size_t alignment) const { size_t compute_survivor_size(size_t gen_size, size_t alignment) const {
size_t n = gen_size / (SurvivorRatio + 2); size_t n = gen_size / (SurvivorRatio + 2);
return n > alignment ? align_size_down(n, alignment) : alignment; return n > alignment ? align_down(n, alignment) : alignment;
} }
public: // was "protected" but caused compile error on win32 public: // was "protected" but caused compile error on win32

View file

@ -73,8 +73,8 @@ void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
// interface, so it is "exactly precise" (if i may be allowed the adverbial // interface, so it is "exactly precise" (if i may be allowed the adverbial
// redundancy for emphasis) and does not include narrow oop slots not // redundancy for emphasis) and does not include narrow oop slots not
// included in the original write interval. // included in the original write interval.
HeapWord* aligned_start = align_ptr_down(start, HeapWordSize); HeapWord* aligned_start = align_down(start, HeapWordSize);
HeapWord* aligned_end = align_ptr_up (end, HeapWordSize); HeapWord* aligned_end = align_up (end, HeapWordSize);
// If compressed oops were not being used, these should already be aligned // If compressed oops were not being used, these should already be aligned
assert(UseCompressedOops || (aligned_start == start && aligned_end == end), assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
"Expected heap word alignment of start and end"); "Expected heap word alignment of start and end");

View file

@ -42,7 +42,7 @@ size_t CardTableModRefBS::compute_byte_map_size()
"uninitialized, check declaration order"); "uninitialized, check declaration order");
assert(_page_size != 0, "uninitialized, check declaration order"); assert(_page_size != 0, "uninitialized, check declaration order");
const size_t granularity = os::vm_allocation_granularity(); const size_t granularity = os::vm_allocation_granularity();
return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); return align_up(_guard_index + 1, MAX2(_page_size, granularity));
} }
CardTableModRefBS::CardTableModRefBS( CardTableModRefBS::CardTableModRefBS(
@ -110,7 +110,7 @@ void CardTableModRefBS::initialize() {
assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
jbyte* guard_card = &_byte_map[_guard_index]; jbyte* guard_card = &_byte_map[_guard_index];
uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); uintptr_t guard_page = align_down((uintptr_t)guard_card, _page_size);
_guard_region = MemRegion((HeapWord*)guard_page, _page_size); _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
!ExecMem, "card table last card"); !ExecMem, "card table last card");
@ -152,7 +152,7 @@ int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
_covered[res].set_start(base); _covered[res].set_start(base);
_covered[res].set_word_size(0); _covered[res].set_word_size(0);
jbyte* ct_start = byte_for(base); jbyte* ct_start = byte_for(base);
uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); uintptr_t ct_start_aligned = align_down((uintptr_t)ct_start, _page_size);
_committed[res].set_start((HeapWord*)ct_start_aligned); _committed[res].set_start((HeapWord*)ct_start_aligned);
_committed[res].set_word_size(0); _committed[res].set_word_size(0);
return res; return res;
@ -212,7 +212,7 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
} }
// Align the end up to a page size (starts are already aligned). // Align the end up to a page size (starts are already aligned).
jbyte* const new_end = byte_after(new_region.last()); jbyte* const new_end = byte_after(new_region.last());
HeapWord* new_end_aligned = (HeapWord*) align_ptr_up(new_end, _page_size); HeapWord* new_end_aligned = (HeapWord*) align_up(new_end, _page_size);
assert((void*)new_end_aligned >= (void*) new_end, "align up, but less"); assert((void*)new_end_aligned >= (void*) new_end, "align up, but less");
// Check the other regions (excludes "ind") to ensure that // Check the other regions (excludes "ind") to ensure that
// the new_end_aligned does not intrude onto the committed // the new_end_aligned does not intrude onto the committed
@ -368,8 +368,8 @@ void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool relea
void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
assert(align_ptr_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
assert(align_ptr_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
jbyte* cur = byte_for(mr.start()); jbyte* cur = byte_for(mr.start());
jbyte* last = byte_after(mr.last()); jbyte* last = byte_after(mr.last());
while (cur < last) { while (cur < last) {
@ -379,8 +379,8 @@ void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
} }
void CardTableModRefBS::invalidate(MemRegion mr) { void CardTableModRefBS::invalidate(MemRegion mr) {
assert(align_ptr_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
assert(align_ptr_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
for (int i = 0; i < _cur_covered_regions; i++) { for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]); MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) dirty_MemRegion(mri); if (!mri.is_empty()) dirty_MemRegion(mri);

View file

@ -168,7 +168,7 @@ public:
// in, um, words. // in, um, words.
inline size_t cards_required(size_t covered_words) { inline size_t cards_required(size_t covered_words) {
// Add one for a guard card, used to detect errors. // Add one for a guard card, used to detect errors.
const size_t words = align_size_up(covered_words, card_size_in_words); const size_t words = align_up(covered_words, card_size_in_words);
return words / card_size_in_words + 1; return words / card_size_in_words + 1;
} }

View file

@ -374,7 +374,7 @@ size_t CollectedHeap::max_tlab_size() const {
size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
sizeof(jint) * sizeof(jint) *
((juint) max_jint / (size_t) HeapWordSize); ((juint) max_jint / (size_t) HeapWordSize);
return align_size_down(max_int_size, MinObjAlignment); return align_down(max_int_size, MinObjAlignment);
} }
// Helper for ReduceInitialCardMarks. For performance, // Helper for ReduceInitialCardMarks. For performance,

View file

@ -268,12 +268,12 @@ inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr,
return addr; return addr;
} }
assert(is_ptr_aligned(addr, HeapWordSize), assert(is_aligned(addr, HeapWordSize),
"Address " PTR_FORMAT " is not properly aligned.", p2i(addr)); "Address " PTR_FORMAT " is not properly aligned.", p2i(addr));
assert(is_size_aligned(alignment_in_bytes, HeapWordSize), assert(is_aligned(alignment_in_bytes, HeapWordSize),
"Alignment size %u is incorrect.", alignment_in_bytes); "Alignment size %u is incorrect.", alignment_in_bytes);
HeapWord* new_addr = align_ptr_up(addr, alignment_in_bytes); HeapWord* new_addr = align_up(addr, alignment_in_bytes);
size_t padding = pointer_delta(new_addr, addr); size_t padding = pointer_delta(new_addr, addr);
if (padding == 0) { if (padding == 0) {

View file

@ -105,9 +105,9 @@ void CollectorPolicy::initialize_flags() {
} }
// User inputs from -Xmx and -Xms must be aligned // User inputs from -Xmx and -Xms must be aligned
_min_heap_byte_size = align_size_up(_min_heap_byte_size, _heap_alignment); _min_heap_byte_size = align_up(_min_heap_byte_size, _heap_alignment);
size_t aligned_initial_heap_size = align_size_up(InitialHeapSize, _heap_alignment); size_t aligned_initial_heap_size = align_up(InitialHeapSize, _heap_alignment);
size_t aligned_max_heap_size = align_size_up(MaxHeapSize, _heap_alignment); size_t aligned_max_heap_size = align_up(MaxHeapSize, _heap_alignment);
// Write back to flags if the values changed // Write back to flags if the values changed
if (aligned_initial_heap_size != InitialHeapSize) { if (aligned_initial_heap_size != InitialHeapSize) {
@ -133,7 +133,7 @@ void CollectorPolicy::initialize_flags() {
_initial_heap_byte_size = InitialHeapSize; _initial_heap_byte_size = InitialHeapSize;
_max_heap_byte_size = MaxHeapSize; _max_heap_byte_size = MaxHeapSize;
FLAG_SET_ERGO(size_t, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment)); FLAG_SET_ERGO(size_t, MinHeapDeltaBytes, align_up(MinHeapDeltaBytes, _space_alignment));
DEBUG_ONLY(CollectorPolicy::assert_flags();) DEBUG_ONLY(CollectorPolicy::assert_flags();)
} }
@ -198,7 +198,7 @@ GenCollectorPolicy::GenCollectorPolicy() :
{} {}
size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
return align_size_down_bounded(base_size / (NewRatio + 1), _gen_alignment); return align_down_bounded(base_size / (NewRatio + 1), _gen_alignment);
} }
size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
@ -221,11 +221,11 @@ void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
size_t GenCollectorPolicy::young_gen_size_lower_bound() { size_t GenCollectorPolicy::young_gen_size_lower_bound() {
// The young generation must be aligned and have room for eden + two survivors // The young generation must be aligned and have room for eden + two survivors
return align_size_up(3 * _space_alignment, _gen_alignment); return align_up(3 * _space_alignment, _gen_alignment);
} }
size_t GenCollectorPolicy::old_gen_size_lower_bound() { size_t GenCollectorPolicy::old_gen_size_lower_bound() {
return align_size_up(_space_alignment, _gen_alignment); return align_up(_space_alignment, _gen_alignment);
} }
#ifdef ASSERT #ifdef ASSERT
@ -287,7 +287,7 @@ void GenCollectorPolicy::initialize_flags() {
// Make sure the heap is large enough for two generations // Make sure the heap is large enough for two generations
size_t smallest_new_size = young_gen_size_lower_bound(); size_t smallest_new_size = young_gen_size_lower_bound();
size_t smallest_heap_size = align_size_up(smallest_new_size + old_gen_size_lower_bound(), size_t smallest_heap_size = align_up(smallest_new_size + old_gen_size_lower_bound(),
_heap_alignment); _heap_alignment);
if (MaxHeapSize < smallest_heap_size) { if (MaxHeapSize < smallest_heap_size) {
FLAG_SET_ERGO(size_t, MaxHeapSize, smallest_heap_size); FLAG_SET_ERGO(size_t, MaxHeapSize, smallest_heap_size);
@ -311,7 +311,7 @@ void GenCollectorPolicy::initialize_flags() {
// Now take the actual NewSize into account. We will silently increase NewSize // Now take the actual NewSize into account. We will silently increase NewSize
// if the user specified a smaller or unaligned value. // if the user specified a smaller or unaligned value.
size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize); size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize);
bounded_new_size = MAX2(smallest_new_size, align_size_down(bounded_new_size, _gen_alignment)); bounded_new_size = MAX2(smallest_new_size, align_down(bounded_new_size, _gen_alignment));
if (bounded_new_size != NewSize) { if (bounded_new_size != NewSize) {
FLAG_SET_ERGO(size_t, NewSize, bounded_new_size); FLAG_SET_ERGO(size_t, NewSize, bounded_new_size);
} }
@ -334,8 +334,8 @@ void GenCollectorPolicy::initialize_flags() {
} }
} else if (MaxNewSize < _initial_young_size) { } else if (MaxNewSize < _initial_young_size) {
FLAG_SET_ERGO(size_t, MaxNewSize, _initial_young_size); FLAG_SET_ERGO(size_t, MaxNewSize, _initial_young_size);
} else if (!is_size_aligned(MaxNewSize, _gen_alignment)) { } else if (!is_aligned(MaxNewSize, _gen_alignment)) {
FLAG_SET_ERGO(size_t, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment)); FLAG_SET_ERGO(size_t, MaxNewSize, align_down(MaxNewSize, _gen_alignment));
} }
_max_young_size = MaxNewSize; _max_young_size = MaxNewSize;
} }
@ -359,8 +359,8 @@ void GenCollectorPolicy::initialize_flags() {
if (OldSize < old_gen_size_lower_bound()) { if (OldSize < old_gen_size_lower_bound()) {
FLAG_SET_ERGO(size_t, OldSize, old_gen_size_lower_bound()); FLAG_SET_ERGO(size_t, OldSize, old_gen_size_lower_bound());
} }
if (!is_size_aligned(OldSize, _gen_alignment)) { if (!is_aligned(OldSize, _gen_alignment)) {
FLAG_SET_ERGO(size_t, OldSize, align_size_down(OldSize, _gen_alignment)); FLAG_SET_ERGO(size_t, OldSize, align_down(OldSize, _gen_alignment));
} }
if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) { if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
@ -370,7 +370,7 @@ void GenCollectorPolicy::initialize_flags() {
assert(NewRatio > 0, "NewRatio should have been set up earlier"); assert(NewRatio > 0, "NewRatio should have been set up earlier");
size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
calculated_heapsize = align_size_up(calculated_heapsize, _heap_alignment); calculated_heapsize = align_up(calculated_heapsize, _heap_alignment);
FLAG_SET_ERGO(size_t, MaxHeapSize, calculated_heapsize); FLAG_SET_ERGO(size_t, MaxHeapSize, calculated_heapsize);
_max_heap_byte_size = MaxHeapSize; _max_heap_byte_size = MaxHeapSize;
FLAG_SET_ERGO(size_t, InitialHeapSize, calculated_heapsize); FLAG_SET_ERGO(size_t, InitialHeapSize, calculated_heapsize);
@ -384,7 +384,7 @@ void GenCollectorPolicy::initialize_flags() {
// exceed it. Adjust New/OldSize as necessary. // exceed it. Adjust New/OldSize as necessary.
size_t calculated_size = NewSize + OldSize; size_t calculated_size = NewSize + OldSize;
double shrink_factor = (double) MaxHeapSize / calculated_size; double shrink_factor = (double) MaxHeapSize / calculated_size;
size_t smaller_new_size = align_size_down((size_t)(NewSize * shrink_factor), _gen_alignment); size_t smaller_new_size = align_down((size_t)(NewSize * shrink_factor), _gen_alignment);
FLAG_SET_ERGO(size_t, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size)); FLAG_SET_ERGO(size_t, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
_initial_young_size = NewSize; _initial_young_size = NewSize;
@ -394,7 +394,7 @@ void GenCollectorPolicy::initialize_flags() {
// is a multiple of _gen_alignment. // is a multiple of _gen_alignment.
FLAG_SET_ERGO(size_t, OldSize, MaxHeapSize - NewSize); FLAG_SET_ERGO(size_t, OldSize, MaxHeapSize - NewSize);
} else { } else {
FLAG_SET_ERGO(size_t, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment)); FLAG_SET_ERGO(size_t, MaxHeapSize, align_up(NewSize + OldSize, _heap_alignment));
_max_heap_byte_size = MaxHeapSize; _max_heap_byte_size = MaxHeapSize;
} }
} }

View file

@ -41,8 +41,8 @@ private:
public: public:
GenerationSpec(Generation::Name name, size_t init_size, size_t max_size, size_t alignment) : GenerationSpec(Generation::Name name, size_t init_size, size_t max_size, size_t alignment) :
_name(name), _name(name),
_init_size(align_size_up(init_size, alignment)), _init_size(align_up(init_size, alignment)),
_max_size(align_size_up(max_size, alignment)) _max_size(align_up(max_size, alignment))
{ } { }
Generation* init(ReservedSpace rs, CardTableRS* remset); Generation* init(ReservedSpace rs, CardTableRS* remset);

View file

@ -656,7 +656,7 @@ HeapWord* ContiguousSpace::allocate_aligned(size_t size) {
if (pointer_delta(end_value, obj) >= size) { if (pointer_delta(end_value, obj) >= size) {
HeapWord* new_top = obj + size; HeapWord* new_top = obj + size;
set_top(new_top); set_top(new_top);
assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top), assert(::is_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top),
"checking alignment"); "checking alignment");
return obj; return obj;
} else { } else {

View file

@ -154,7 +154,7 @@ class Space: public CHeapObj<mtGC> {
// Test whether p is double-aligned // Test whether p is double-aligned
static bool is_aligned(void* p) { static bool is_aligned(void* p) {
return is_ptr_aligned(p, sizeof(double)); return ::is_aligned(p, sizeof(double));
} }
// Size computations. Sizes are in bytes. // Size computations. Sizes are in bytes.

View file

@ -149,7 +149,7 @@ template <class E, MEMFLAGS F>
size_t MmapArrayAllocator<E, F>::size_for(size_t length) { size_t MmapArrayAllocator<E, F>::size_for(size_t length) {
size_t size = length * sizeof(E); size_t size = length * sizeof(E);
int alignment = os::vm_allocation_granularity(); int alignment = os::vm_allocation_granularity();
return align_size_up(size, alignment); return align_up(size, alignment);
} }
template <class E, MEMFLAGS F> template <class E, MEMFLAGS F>

View file

@ -563,7 +563,7 @@ void FileMapInfo::write_bytes(const void* buffer, int nbytes) {
// Align file position to an allocation unit boundary. // Align file position to an allocation unit boundary.
void FileMapInfo::align_file_position() { void FileMapInfo::align_file_position() {
size_t new_file_offset = align_size_up(_file_offset, size_t new_file_offset = align_up(_file_offset,
os::vm_allocation_granularity()); os::vm_allocation_granularity());
if (new_file_offset != _file_offset) { if (new_file_offset != _file_offset) {
_file_offset = new_file_offset; _file_offset = new_file_offset;
@ -613,7 +613,7 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
return true; return true;
} }
size_t used = si->_used; size_t used = si->_used;
size_t size = align_size_up(used, os::vm_allocation_granularity()); size_t size = align_up(used, os::vm_allocation_granularity());
if (!open_for_read()) { if (!open_for_read()) {
return false; return false;
} }
@ -664,7 +664,7 @@ char* FileMapInfo::map_region(int i) {
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i]; struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
size_t used = si->_used; size_t used = si->_used;
size_t alignment = os::vm_allocation_granularity(); size_t alignment = os::vm_allocation_granularity();
size_t size = align_size_up(used, alignment); size_t size = align_up(used, alignment);
char *requested_addr = _header->region_addr(i); char *requested_addr = _header->region_addr(i);
// If a tool agent is in use (debugging enabled), we must map the address space RW // If a tool agent is in use (debugging enabled), we must map the address space RW
@ -831,7 +831,7 @@ void FileMapInfo::unmap_region(int i) {
assert(!MetaspaceShared::is_string_region(i), "sanity"); assert(!MetaspaceShared::is_string_region(i), "sanity");
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i]; struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
size_t used = si->_used; size_t used = si->_used;
size_t size = align_size_up(used, os::vm_allocation_granularity()); size_t size = align_up(used, os::vm_allocation_granularity());
if (used == 0) { if (used == 0) {
return; return;

View file

@ -254,9 +254,9 @@ public:
// The ro+rw+md+mc spaces size // The ro+rw+md+mc spaces size
static size_t core_spaces_size() { static size_t core_spaces_size() {
return align_size_up((SharedReadOnlySize + SharedReadWriteSize + return align_up((SharedReadOnlySize + SharedReadWriteSize +
SharedMiscDataSize + SharedMiscCodeSize), SharedMiscDataSize + SharedMiscCodeSize),
os::vm_allocation_granularity()); os::vm_allocation_granularity());
} }
// The estimated optional space size. // The estimated optional space size.

View file

@ -112,7 +112,7 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
} }
const size_t granularity = os::vm_allocation_granularity(); const size_t granularity = os::vm_allocation_granularity();
const size_t c_size = align_size_up(committed_size, page_size); const size_t c_size = align_up(committed_size, page_size);
os::trace_page_sizes(_name, committed_size, rs.size(), page_size, os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
rs.base(), rs.size()); rs.base(), rs.size());
@ -125,7 +125,7 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
_number_of_reserved_segments = size_to_segments(_memory.reserved_size()); _number_of_reserved_segments = size_to_segments(_memory.reserved_size());
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity); const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment); const size_t reserved_segments_size = align_up(_number_of_reserved_segments, reserved_segments_alignment);
const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments); const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
// reserve space for _segmap // reserve space for _segmap

View file

@ -42,7 +42,7 @@ size_t Metachunk::object_alignment() {
} }
size_t Metachunk::overhead() { size_t Metachunk::overhead() {
return align_size_up(sizeof(Metachunk), object_alignment()) / BytesPerWord; return align_up(sizeof(Metachunk), object_alignment()) / BytesPerWord;
} }
// Metachunk methods // Metachunk methods

View file

@ -462,16 +462,10 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
void print_on(outputStream* st) const; void print_on(outputStream* st) const;
}; };
#define assert_is_ptr_aligned(ptr, alignment) \ #define assert_is_aligned(value, alignment) \
assert(is_ptr_aligned(ptr, alignment), \ assert(is_aligned((value), (alignment)), \
PTR_FORMAT " is not aligned to " \ SIZE_FORMAT_HEX " is not aligned to " \
SIZE_FORMAT, p2i(ptr), alignment) SIZE_FORMAT, (size_t)(uintptr_t)value, (alignment))
#define assert_is_size_aligned(size, alignment) \
assert(is_size_aligned(size, alignment), \
SIZE_FORMAT " is not aligned to " \
SIZE_FORMAT, size, alignment)
// Decide if large pages should be committed when the memory is reserved. // Decide if large pages should be committed when the memory is reserved.
static bool should_commit_large_pages_when_reserving(size_t bytes) { static bool should_commit_large_pages_when_reserving(size_t bytes) {
@ -489,7 +483,7 @@ static bool should_commit_large_pages_when_reserving(size_t bytes) {
// byte_size is the size of the associated virtualspace. // byte_size is the size of the associated virtualspace.
VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); assert_is_aligned(bytes, Metaspace::reserve_alignment());
#if INCLUDE_CDS #if INCLUDE_CDS
// This allocates memory with mmap. For DumpSharedspaces, try to reserve // This allocates memory with mmap. For DumpSharedspaces, try to reserve
@ -497,7 +491,7 @@ VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(
// memory addresses don't conflict. // memory addresses don't conflict.
if (DumpSharedSpaces) { if (DumpSharedSpaces) {
bool large_pages = false; // No large pages when dumping the CDS archive. bool large_pages = false; // No large pages when dumping the CDS archive.
char* shared_base = align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); char* shared_base = align_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base); _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base);
if (_rs.is_reserved()) { if (_rs.is_reserved()) {
@ -522,8 +516,8 @@ VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(
if (_rs.is_reserved()) { if (_rs.is_reserved()) {
assert(_rs.base() != NULL, "Catch if we get a NULL address"); assert(_rs.base() != NULL, "Catch if we get a NULL address");
assert(_rs.size() != 0, "Catch if we get a 0 size"); assert(_rs.size() != 0, "Catch if we get a 0 size");
assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
} }
@ -863,7 +857,7 @@ class SpaceManager : public CHeapObj<mtClass> {
size_t byte_size = word_size * BytesPerWord; size_t byte_size = word_size * BytesPerWord;
size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment()); raw_bytes_size = align_up(raw_bytes_size, Metachunk::object_alignment());
size_t raw_word_size = raw_bytes_size / BytesPerWord; size_t raw_word_size = raw_bytes_size / BytesPerWord;
assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
@ -1068,8 +1062,8 @@ bool VirtualSpaceNode::initialize() {
// These are necessary restriction to make sure that the virtual space always // These are necessary restriction to make sure that the virtual space always
// grows in steps of Metaspace::commit_alignment(). If both base and size are // grows in steps of Metaspace::commit_alignment(). If both base and size are
// aligned only the middle alignment of the VirtualSpace is used. // aligned only the middle alignment of the VirtualSpace is used.
assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
// ReservedSpaces marked as special will have the entire memory // ReservedSpaces marked as special will have the entire memory
// pre-committed. Setting a committed size will make sure that // pre-committed. Setting a committed size will make sure that
@ -1323,7 +1317,7 @@ bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
// Reserve the space // Reserve the space
size_t vs_byte_size = vs_word_size * BytesPerWord; size_t vs_byte_size = vs_word_size * BytesPerWord;
assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
// Allocate the meta virtual space and initialize it. // Allocate the meta virtual space and initialize it.
VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
@ -1378,8 +1372,8 @@ bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
} }
bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); assert_is_aligned(min_words, Metaspace::commit_alignment_words());
assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
assert(min_words <= preferred_words, "Invalid arguments"); assert(min_words <= preferred_words, "Invalid arguments");
if (!MetaspaceGC::can_expand(min_words, this->is_class())) { if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
@ -1404,7 +1398,7 @@ bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
// Get another virtual space. // Get another virtual space.
size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
if (create_new_virtual_space(grow_vs_words)) { if (create_new_virtual_space(grow_vs_words)) {
if (current_virtual_space()->is_pre_committed()) { if (current_virtual_space()->is_pre_committed()) {
@ -1435,8 +1429,8 @@ Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t sugges
// The expand amount is currently only determined by the requested sizes // The expand amount is currently only determined by the requested sizes
// and not how much committed memory is left in the current virtual space. // and not how much committed memory is left in the current virtual space.
size_t min_word_size = align_size_up(chunk_word_size, Metaspace::commit_alignment_words()); size_t min_word_size = align_up(chunk_word_size, Metaspace::commit_alignment_words());
size_t preferred_word_size = align_size_up(suggested_commit_granularity, Metaspace::commit_alignment_words()); size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
if (min_word_size >= preferred_word_size) { if (min_word_size >= preferred_word_size) {
// Can happen when humongous chunks are allocated. // Can happen when humongous chunks are allocated.
preferred_word_size = min_word_size; preferred_word_size = min_word_size;
@ -1488,7 +1482,7 @@ void VirtualSpaceList::print_on(outputStream* st) const {
size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
size_t min_delta = MinMetaspaceExpansion; size_t min_delta = MinMetaspaceExpansion;
size_t max_delta = MaxMetaspaceExpansion; size_t max_delta = MaxMetaspaceExpansion;
size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); size_t delta = align_up(bytes, Metaspace::commit_alignment());
if (delta <= min_delta) { if (delta <= min_delta) {
delta = min_delta; delta = min_delta;
@ -1503,7 +1497,7 @@ size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
delta = delta + min_delta; delta = delta + min_delta;
} }
assert_is_size_aligned(delta, Metaspace::commit_alignment()); assert_is_aligned(delta, Metaspace::commit_alignment());
return delta; return delta;
} }
@ -1515,14 +1509,14 @@ size_t MetaspaceGC::capacity_until_GC() {
} }
bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
assert_is_size_aligned(v, Metaspace::commit_alignment()); assert_is_aligned(v, Metaspace::commit_alignment());
size_t capacity_until_GC = (size_t) _capacity_until_GC; size_t capacity_until_GC = (size_t) _capacity_until_GC;
size_t new_value = capacity_until_GC + v; size_t new_value = capacity_until_GC + v;
if (new_value < capacity_until_GC) { if (new_value < capacity_until_GC) {
// The addition wrapped around, set new_value to aligned max value. // The addition wrapped around, set new_value to aligned max value.
new_value = align_size_down(max_uintx, Metaspace::commit_alignment()); new_value = align_down(max_uintx, Metaspace::commit_alignment());
} }
intptr_t expected = (intptr_t) capacity_until_GC; intptr_t expected = (intptr_t) capacity_until_GC;
@ -1542,7 +1536,7 @@ bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size
} }
size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
assert_is_size_aligned(v, Metaspace::commit_alignment()); assert_is_aligned(v, Metaspace::commit_alignment());
return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
} }
@ -1628,7 +1622,7 @@ void MetaspaceGC::compute_new_size() {
// If we have less capacity below the metaspace HWM, then // If we have less capacity below the metaspace HWM, then
// increment the HWM. // increment the HWM.
size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
// Don't expand unless it's significant // Don't expand unless it's significant
if (expand_bytes >= MinMetaspaceExpansion) { if (expand_bytes >= MinMetaspaceExpansion) {
size_t new_capacity_until_GC = 0; size_t new_capacity_until_GC = 0;
@ -1681,7 +1675,7 @@ void MetaspaceGC::compute_new_size() {
// size without shrinking, it goes back to 0%. // size without shrinking, it goes back to 0%.
shrink_bytes = shrink_bytes / 100 * current_shrink_factor; shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
assert(shrink_bytes <= max_shrink_bytes, assert(shrink_bytes <= max_shrink_bytes,
"invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
@ -2240,7 +2234,7 @@ size_t SpaceManager::calc_chunk_size(size_t word_size) {
// humongous allocations sizes to be aligned up to // humongous allocations sizes to be aligned up to
// the smallest chunk size. // the smallest chunk size.
size_t if_humongous_sized_chunk = size_t if_humongous_sized_chunk =
align_size_up(word_size + Metachunk::overhead(), align_up(word_size + Metachunk::overhead(),
smallest_chunk_size()); smallest_chunk_size());
chunk_word_size = chunk_word_size =
MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
@ -3099,9 +3093,9 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
"Metaspace size is too big"); "Metaspace size is too big");
assert_is_ptr_aligned(requested_addr, _reserve_alignment); assert_is_aligned(requested_addr, _reserve_alignment);
assert_is_ptr_aligned(cds_base, _reserve_alignment); assert_is_aligned(cds_base, _reserve_alignment);
assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
// Don't use large pages for the class space. // Don't use large pages for the class space.
bool large_pages = false; bool large_pages = false;
@ -3130,7 +3124,7 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
// Aix: Search for a place where we can find memory. If we need to load // Aix: Search for a place where we can find memory. If we need to load
// the base, 4G alignment is helpful, too. // the base, 4G alignment is helpful, too.
size_t increment = AARCH64_ONLY(4*)G; size_t increment = AARCH64_ONLY(4*)G;
for (char *a = align_ptr_up(requested_addr, increment); for (char *a = align_up(requested_addr, increment);
a < (char*)(1024*G); a < (char*)(1024*G);
a += increment) { a += increment) {
if (a == (char *)(32*G)) { if (a == (char *)(32*G)) {
@ -3165,7 +3159,7 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
if (!metaspace_rs.is_reserved()) { if (!metaspace_rs.is_reserved()) {
#if INCLUDE_CDS #if INCLUDE_CDS
if (UseSharedSpaces) { if (UseSharedSpaces) {
size_t increment = align_size_up(1*G, _reserve_alignment); size_t increment = align_up(1*G, _reserve_alignment);
// Keep trying to allocate the metaspace, increasing the requested_addr // Keep trying to allocate the metaspace, increasing the requested_addr
// by 1GB each time, until we reach an address that will no longer allow // by 1GB each time, until we reach an address that will no longer allow
@ -3269,20 +3263,20 @@ void Metaspace::ergo_initialize() {
// Ideally, we would be able to set the default value of MaxMetaspaceSize in // Ideally, we would be able to set the default value of MaxMetaspaceSize in
// globals.hpp to the aligned value, but this is not possible, since the // globals.hpp to the aligned value, but this is not possible, since the
// alignment depends on other flags being parsed. // alignment depends on other flags being parsed.
MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
if (MetaspaceSize > MaxMetaspaceSize) { if (MetaspaceSize > MaxMetaspaceSize) {
MetaspaceSize = MaxMetaspaceSize; MetaspaceSize = MaxMetaspaceSize;
} }
MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
set_compressed_class_space_size(CompressedClassSpaceSize); set_compressed_class_space_size(CompressedClassSpaceSize);
} }
@ -3299,16 +3293,16 @@ void Metaspace::global_initialize() {
#if INCLUDE_CDS #if INCLUDE_CDS
MetaspaceShared::estimate_regions_size(); MetaspaceShared::estimate_regions_size();
SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); SharedReadOnlySize = align_up(SharedReadOnlySize, max_alignment);
SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); SharedReadWriteSize = align_up(SharedReadWriteSize, max_alignment);
SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); SharedMiscDataSize = align_up(SharedMiscDataSize, max_alignment);
SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); SharedMiscCodeSize = align_up(SharedMiscCodeSize, max_alignment);
// Initialize with the sum of the shared space sizes. The read-only // Initialize with the sum of the shared space sizes. The read-only
// and read write metaspace chunks will be allocated out of this and the // and read write metaspace chunks will be allocated out of this and the
// remainder is the misc code and data chunks. // remainder is the misc code and data chunks.
cds_total = FileMapInfo::shared_spaces_size(); cds_total = FileMapInfo::shared_spaces_size();
cds_total = align_size_up(cds_total, _reserve_alignment); cds_total = align_up(cds_total, _reserve_alignment);
_space_list = new VirtualSpaceList(cds_total/wordSize); _space_list = new VirtualSpaceList(cds_total/wordSize);
_chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
@ -3355,7 +3349,7 @@ void Metaspace::global_initialize() {
#ifdef _LP64 #ifdef _LP64
if (using_class_space()) { if (using_class_space()) {
char* cds_end = (char*)(cds_address + cds_total); char* cds_end = (char*)(cds_address + cds_total);
cds_end = align_ptr_up(cds_end, _reserve_alignment); cds_end = align_up(cds_end, _reserve_alignment);
// If UseCompressedClassPointers is set then allocate the metaspace area // If UseCompressedClassPointers is set then allocate the metaspace area
// above the heap and above the CDS area (if it exists). // above the heap and above the CDS area (if it exists).
allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
@ -3373,7 +3367,7 @@ void Metaspace::global_initialize() {
#ifdef _LP64 #ifdef _LP64
if (!UseSharedSpaces && using_class_space()) { if (!UseSharedSpaces && using_class_space()) {
char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
allocate_metaspace_compressed_klass_ptrs(base, 0); allocate_metaspace_compressed_klass_ptrs(base, 0);
} }
#endif // _LP64 #endif // _LP64
@ -3390,7 +3384,7 @@ void Metaspace::global_initialize() {
// Arbitrarily set the initial virtual space to a multiple // Arbitrarily set the initial virtual space to a multiple
// of the boot class loader size. // of the boot class loader size.
size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); word_size = align_up(word_size, Metaspace::reserve_alignment_words());
// Initialize the list of virtual spaces. // Initialize the list of virtual spaces.
_space_list = new VirtualSpaceList(word_size); _space_list = new VirtualSpaceList(word_size);
@ -4147,7 +4141,7 @@ class ChunkManagerReturnTestImpl {
return sizes[rand]; return sizes[rand];
} else { } else {
// Note: this affects the max. size of space (see _vsn initialization in ctor). // Note: this affects the max. size of space (see _vsn initialization in ctor).
return align_size_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk); return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
} }
} }
@ -4294,7 +4288,7 @@ class ChunkManagerReturnTestImpl {
public: public:
ChunkManagerReturnTestImpl() ChunkManagerReturnTestImpl()
: _vsn(align_size_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment())) : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
, _cm(SpecializedChunk, SmallChunk, MediumChunk) , _cm(SpecializedChunk, SmallChunk, MediumChunk)
, _chunks_in_chunkmanager(0) , _chunks_in_chunkmanager(0)
, _words_in_chunkmanager(0) , _words_in_chunkmanager(0)

View file

@ -83,8 +83,8 @@ void SharedMiscRegion::initialize(ReservedSpace rs, size_t committed_byte_size,
char* SharedMiscRegion::alloc(size_t num_bytes) { char* SharedMiscRegion::alloc(size_t num_bytes) {
assert(DumpSharedSpaces, "dump time only"); assert(DumpSharedSpaces, "dump time only");
size_t alignment = sizeof(char*); size_t alignment = sizeof(char*);
num_bytes = align_size_up(num_bytes, alignment); num_bytes = align_up(num_bytes, alignment);
_alloc_top = align_ptr_up(_alloc_top, alignment); _alloc_top = align_up(_alloc_top, alignment);
if (_alloc_top + num_bytes > _vs.high()) { if (_alloc_top + num_bytes > _vs.high()) {
report_out_of_shared_space(_space_type); report_out_of_shared_space(_space_type);
} }

View file

@ -33,7 +33,7 @@
// when the start address is not a multiple of alignment; the second maintains // when the start address is not a multiple of alignment; the second maintains
// alignment of starting addresses that happen to be a multiple. // alignment of starting addresses that happen to be a multiple.
#define PADDING_SIZE(type, alignment) \ #define PADDING_SIZE(type, alignment) \
((alignment) + align_size_up_(sizeof(type), alignment)) ((alignment) + align_up_(sizeof(type), (alignment)))
// Templates to create a subclass padded to avoid cache line sharing. These are // Templates to create a subclass padded to avoid cache line sharing. These are
// effective only when applied to derived-most (leaf) classes. // effective only when applied to derived-most (leaf) classes.
@ -68,7 +68,7 @@ class PaddedEndImpl<T, /*pad_size*/ 0> : public T {
// No padding. // No padding.
}; };
#define PADDED_END_SIZE(type, alignment) (align_size_up_(sizeof(type), alignment) - sizeof(type)) #define PADDED_END_SIZE(type, alignment) (align_up_(sizeof(type), (alignment)) - sizeof(type))
// More memory conservative implementation of Padded. The subclass adds the // More memory conservative implementation of Padded. The subclass adds the
// minimal amount of padding needed to make the size of the objects be aligned. // minimal amount of padding needed to make the size of the objects be aligned.

View file

@ -35,13 +35,13 @@
template <class T, MEMFLAGS flags, size_t alignment> template <class T, MEMFLAGS flags, size_t alignment>
PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) { PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) {
// Check that the PaddedEnd class works as intended. // Check that the PaddedEnd class works as intended.
STATIC_ASSERT(is_size_aligned_(sizeof(PaddedEnd<T>), alignment)); STATIC_ASSERT(is_aligned_(sizeof(PaddedEnd<T>), alignment));
// Allocate a chunk of memory large enough to allow for some alignment. // Allocate a chunk of memory large enough to allow for some alignment.
void* chunk = AllocateHeap(length * sizeof(PaddedEnd<T, alignment>) + alignment, flags); void* chunk = AllocateHeap(length * sizeof(PaddedEnd<T, alignment>) + alignment, flags);
// Make the initial alignment. // Make the initial alignment.
PaddedEnd<T>* aligned_padded_array = (PaddedEnd<T>*)align_ptr_up(chunk, alignment); PaddedEnd<T>* aligned_padded_array = (PaddedEnd<T>*)align_up(chunk, alignment);
// Call the default constructor for each element. // Call the default constructor for each element.
for (uint i = 0; i < length; i++) { for (uint i = 0; i < length; i++) {
@ -54,9 +54,9 @@ PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) {
template <class T, MEMFLAGS flags, size_t alignment> template <class T, MEMFLAGS flags, size_t alignment>
T** Padded2DArray<T, flags, alignment>::create_unfreeable(uint rows, uint columns, size_t* allocation_size) { T** Padded2DArray<T, flags, alignment>::create_unfreeable(uint rows, uint columns, size_t* allocation_size) {
// Calculate and align the size of the first dimension's table. // Calculate and align the size of the first dimension's table.
size_t table_size = align_size_up_(rows * sizeof(T*), alignment); size_t table_size = align_up_(rows * sizeof(T*), alignment);
// The size of the separate rows. // The size of the separate rows.
size_t row_size = align_size_up_(columns * sizeof(T), alignment); size_t row_size = align_up_(columns * sizeof(T), alignment);
// Total size consists of the indirection table plus the rows. // Total size consists of the indirection table plus the rows.
size_t total_size = table_size + rows * row_size + alignment; size_t total_size = table_size + rows * row_size + alignment;
@ -65,7 +65,7 @@ T** Padded2DArray<T, flags, alignment>::create_unfreeable(uint rows, uint column
// Clear the allocated memory. // Clear the allocated memory.
memset(chunk, 0, total_size); memset(chunk, 0, total_size);
// Align the chunk of memory. // Align the chunk of memory.
T** result = (T**)align_ptr_up(chunk, alignment); T** result = (T**)align_up(chunk, alignment);
void* data_start = (void*)((uintptr_t)result + table_size); void* data_start = (void*)((uintptr_t)result + table_size);
// Fill in the row table. // Fill in the row table.
@ -87,7 +87,7 @@ T* PaddedPrimitiveArray<T, flags, alignment>::create_unfreeable(size_t length) {
memset(chunk, 0, length * sizeof(T) + alignment); memset(chunk, 0, length * sizeof(T) + alignment);
return (T*)align_ptr_up(chunk, alignment); return (T*)align_up(chunk, alignment);
} }
#endif // SHARE_VM_MEMORY_PADDED_INLINE_HPP #endif // SHARE_VM_MEMORY_PADDED_INLINE_HPP

View file

@ -552,7 +552,7 @@ void Universe::reinitialize_itables(TRAPS) {
bool Universe::on_page_boundary(void* addr) { bool Universe::on_page_boundary(void* addr) {
return is_ptr_aligned(addr, os::vm_page_size()); return is_aligned(addr, os::vm_page_size());
} }
@ -818,11 +818,11 @@ ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
"actual alignment " SIZE_FORMAT " must be within maximum heap alignment " SIZE_FORMAT, "actual alignment " SIZE_FORMAT " must be within maximum heap alignment " SIZE_FORMAT,
alignment, Arguments::conservative_max_heap_alignment()); alignment, Arguments::conservative_max_heap_alignment());
size_t total_reserved = align_size_up(heap_size, alignment); size_t total_reserved = align_up(heap_size, alignment);
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())), assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
"heap size is too big for compressed oops"); "heap size is too big for compressed oops");
bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size()); bool use_large_pages = UseLargePages && is_aligned(alignment, os::large_page_size());
assert(!UseLargePages assert(!UseLargePages
|| UseParallelGC || UseParallelGC
|| use_large_pages, "Wrong alignment to use large pages"); || use_large_pages, "Wrong alignment to use large pages");

View file

@ -47,7 +47,7 @@ ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
// ReservedSpace initialization requires size to be aligned to the given // ReservedSpace initialization requires size to be aligned to the given
// alignment. Align the size up. // alignment. Align the size up.
size = align_size_up(size, alignment); size = align_up(size, alignment);
} else { } else {
// Don't force the alignment to be large page aligned, // Don't force the alignment to be large page aligned,
// since that will waste memory. // since that will waste memory.
@ -172,7 +172,7 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
// Base not aligned, retry // Base not aligned, retry
if (!os::release_memory(base, size)) fatal("os::release_memory failed"); if (!os::release_memory(base, size)) fatal("os::release_memory failed");
// Make sure that size is aligned // Make sure that size is aligned
size = align_size_up(size, alignment); size = align_up(size, alignment);
base = os::reserve_memory_aligned(size, alignment); base = os::reserve_memory_aligned(size, alignment);
if (requested_address != 0 && if (requested_address != 0 &&
@ -227,22 +227,22 @@ ReservedSpace::last_part(size_t partition_size, size_t alignment) {
size_t ReservedSpace::page_align_size_up(size_t size) { size_t ReservedSpace::page_align_size_up(size_t size) {
return align_size_up(size, os::vm_page_size()); return align_up(size, os::vm_page_size());
} }
size_t ReservedSpace::page_align_size_down(size_t size) { size_t ReservedSpace::page_align_size_down(size_t size) {
return align_size_down(size, os::vm_page_size()); return align_down(size, os::vm_page_size());
} }
size_t ReservedSpace::allocation_align_size_up(size_t size) { size_t ReservedSpace::allocation_align_size_up(size_t size) {
return align_size_up(size, os::vm_allocation_granularity()); return align_up(size, os::vm_allocation_granularity());
} }
size_t ReservedSpace::allocation_align_size_down(size_t size) { size_t ReservedSpace::allocation_align_size_down(size_t size) {
return align_size_down(size, os::vm_allocation_granularity()); return align_down(size, os::vm_allocation_granularity());
} }
@ -383,7 +383,7 @@ void ReservedHeapSpace::try_reserve_range(char *highest_start,
const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible); const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
const size_t stepsize = (attach_range == 0) ? // Only one try. const size_t stepsize = (attach_range == 0) ? // Only one try.
(size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment); (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
// Try attach points from top to bottom. // Try attach points from top to bottom.
char* attach_point = highest_start; char* attach_point = highest_start;
@ -463,7 +463,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
NOT_AIX(os::vm_allocation_granularity()); NOT_AIX(os::vm_allocation_granularity());
const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment); const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment); char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ? size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
noaccess_prefix_size(alignment) : 0; noaccess_prefix_size(alignment) : 0;
@ -492,8 +492,8 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) { if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
// Calc address range within we try to attach (range of possible start addresses). // Calc address range within we try to attach (range of possible start addresses).
char* const highest_start = align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment); char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
char* const lowest_start = align_ptr_up(aligned_heap_base_min_address, attach_point_alignment); char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
try_reserve_range(highest_start, lowest_start, attach_point_alignment, try_reserve_range(highest_start, lowest_start, attach_point_alignment,
aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large); aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
} }
@ -502,7 +502,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
// But leave room for the compressed class pointers, which is allocated above // But leave room for the compressed class pointers, which is allocated above
// the heap. // the heap.
char *zerobased_max = (char *)OopEncodingHeapMax; char *zerobased_max = (char *)OopEncodingHeapMax;
const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment); const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
// For small heaps, save some space for compressed class pointer // For small heaps, save some space for compressed class pointer
// space so it can be decoded with no base. // space so it can be decoded with no base.
if (UseCompressedClassPointers && !UseSharedSpaces && if (UseCompressedClassPointers && !UseSharedSpaces &&
@ -517,7 +517,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
(_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address. (_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address.
// Calc address range within we try to attach (range of possible start addresses). // Calc address range within we try to attach (range of possible start addresses).
char *const highest_start = align_ptr_down(zerobased_max - size, attach_point_alignment); char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
// Need to be careful about size being guaranteed to be less // Need to be careful about size being guaranteed to be less
// than UnscaledOopHeapMax due to type constraints. // than UnscaledOopHeapMax due to type constraints.
char *lowest_start = aligned_heap_base_min_address; char *lowest_start = aligned_heap_base_min_address;
@ -525,7 +525,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
lowest_start = MAX2(lowest_start, (char*)unscaled_end); lowest_start = MAX2(lowest_start, (char*)unscaled_end);
} }
lowest_start = align_ptr_up(lowest_start, attach_point_alignment); lowest_start = align_up(lowest_start, attach_point_alignment);
try_reserve_range(highest_start, lowest_start, attach_point_alignment, try_reserve_range(highest_start, lowest_start, attach_point_alignment,
aligned_heap_base_min_address, zerobased_max, size, alignment, large); aligned_heap_base_min_address, zerobased_max, size, alignment, large);
} }
@ -562,7 +562,7 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large)
} }
// Heap size should be aligned to alignment, too. // Heap size should be aligned to alignment, too.
guarantee(is_size_aligned(size, alignment), "set by caller"); guarantee(is_aligned(size, alignment), "set by caller");
if (UseCompressedOops) { if (UseCompressedOops) {
initialize_compressed_heap(size, alignment, large); initialize_compressed_heap(size, alignment, large);
@ -751,8 +751,8 @@ bool VirtualSpace::contains(const void* p) const {
} }
static void pretouch_expanded_memory(void* start, void* end) { static void pretouch_expanded_memory(void* start, void* end) {
assert(is_ptr_aligned(start, os::vm_page_size()), "Unexpected alignment"); assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
assert(is_ptr_aligned(end, os::vm_page_size()), "Unexpected alignment"); assert(is_aligned(end, os::vm_page_size()), "Unexpected alignment");
os::pretouch_memory(start, end); os::pretouch_memory(start, end);
} }
@ -1037,7 +1037,7 @@ class TestReservedSpace : AllStatic {
static void test_reserved_space1(size_t size, size_t alignment) { static void test_reserved_space1(size_t size, size_t alignment) {
test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size); test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
assert(is_size_aligned(size, alignment), "Incorrect input parameters"); assert(is_aligned(size, alignment), "Incorrect input parameters");
ReservedSpace rs(size, // size ReservedSpace rs(size, // size
alignment, // alignment alignment, // alignment
@ -1049,8 +1049,8 @@ class TestReservedSpace : AllStatic {
assert(rs.base() != NULL, "Must be"); assert(rs.base() != NULL, "Must be");
assert(rs.size() == size, "Must be"); assert(rs.size() == size, "Must be");
assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses"); assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses"); assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
if (rs.special()) { if (rs.special()) {
small_page_write(rs.base(), size); small_page_write(rs.base(), size);
@ -1062,7 +1062,7 @@ class TestReservedSpace : AllStatic {
static void test_reserved_space2(size_t size) { static void test_reserved_space2(size_t size) {
test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size); test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned"); assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
ReservedSpace rs(size); ReservedSpace rs(size);
@ -1088,8 +1088,8 @@ class TestReservedSpace : AllStatic {
return; return;
} }
assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned"); assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment"); assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
bool large = maybe_large && UseLargePages && size >= os::large_page_size(); bool large = maybe_large && UseLargePages && size >= os::large_page_size();
@ -1244,7 +1244,7 @@ class TestVirtualSpace : AllStatic {
static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size, static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
TestLargePages mode = Default) { TestLargePages mode = Default) {
size_t granularity = os::vm_allocation_granularity(); size_t granularity = os::vm_allocation_granularity();
size_t reserve_size_aligned = align_size_up(reserve_size, granularity); size_t reserve_size_aligned = align_up(reserve_size, granularity);
ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode); ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);

View file

@ -64,14 +64,14 @@ protected:
// Can't distinguish between array of length 0 and length 1, // Can't distinguish between array of length 0 and length 1,
// will always return 0 in those cases. // will always return 0 in those cases.
static int bytes_to_length(size_t bytes) { static int bytes_to_length(size_t bytes) {
assert(is_size_aligned(bytes, BytesPerWord), "Must be, for now"); assert(is_aligned(bytes, BytesPerWord), "Must be, for now");
if (sizeof(Array<T>) >= bytes) { if (sizeof(Array<T>) >= bytes) {
return 0; return 0;
} }
size_t left = bytes - sizeof(Array<T>); size_t left = bytes - sizeof(Array<T>);
assert(is_size_aligned(left, sizeof(T)), "Must be"); assert(is_aligned(left, sizeof(T)), "Must be");
size_t elements = left / sizeof(T); size_t elements = left / sizeof(T);
assert(elements <= (size_t)INT_MAX, "number of elements " SIZE_FORMAT "doesn't fit into an int.", elements); assert(elements <= (size_t)INT_MAX, "number of elements " SIZE_FORMAT "doesn't fit into an int.", elements);
@ -122,7 +122,7 @@ protected:
void release_at_put(int which, T contents) { OrderAccess::release_store(adr_at(which), contents); } void release_at_put(int which, T contents) { OrderAccess::release_store(adr_at(which), contents); }
static int size(int length) { static int size(int length) {
size_t bytes = align_size_up(byte_sizeof(length), BytesPerWord); size_t bytes = align_up(byte_sizeof(length), BytesPerWord);
size_t words = bytes / BytesPerWord; size_t words = bytes / BytesPerWord;
assert(words <= INT_MAX, "Overflow: " SIZE_FORMAT, words); assert(words <= INT_MAX, "Overflow: " SIZE_FORMAT, words);

View file

@ -50,7 +50,7 @@ class arrayOopDesc : public oopDesc {
// Returns the aligned header_size_in_bytes. This is not equivalent to // Returns the aligned header_size_in_bytes. This is not equivalent to
// sizeof(arrayOopDesc) which should not appear in the code. // sizeof(arrayOopDesc) which should not appear in the code.
static int header_size_in_bytes() { static int header_size_in_bytes() {
size_t hs = align_size_up(length_offset_in_bytes() + sizeof(int), size_t hs = align_up(length_offset_in_bytes() + sizeof(int),
HeapWordSize); HeapWordSize);
#ifdef ASSERT #ifdef ASSERT
// make sure it isn't called before UseCompressedOops is initialized. // make sure it isn't called before UseCompressedOops is initialized.
@ -112,7 +112,7 @@ class arrayOopDesc : public oopDesc {
assert(type2aelembytes(type) != 0, "wrong type"); assert(type2aelembytes(type) != 0, "wrong type");
const size_t max_element_words_per_size_t = const size_t max_element_words_per_size_t =
align_size_down((SIZE_MAX/HeapWordSize - header_size(type)), MinObjAlignment); align_down((SIZE_MAX/HeapWordSize - header_size(type)), MinObjAlignment);
const size_t max_elements_per_size_t = const size_t max_elements_per_size_t =
HeapWordSize * max_element_words_per_size_t / type2aelembytes(type); HeapWordSize * max_element_words_per_size_t / type2aelembytes(type);
if ((size_t)max_jint < max_elements_per_size_t) { if ((size_t)max_jint < max_elements_per_size_t) {
@ -120,7 +120,7 @@ class arrayOopDesc : public oopDesc {
// (CollectedHeap, Klass::oop_oop_iterate(), and more) uses an int for // (CollectedHeap, Klass::oop_oop_iterate(), and more) uses an int for
// passing around the size (in words) of an object. So, we need to avoid // passing around the size (in words) of an object. So, we need to avoid
// overflowing an int when we add the header. See CRs 4718400 and 7110613. // overflowing an int when we add the header. See CRs 4718400 and 7110613.
return align_size_down(max_jint - header_size(type), MinObjAlignment); return align_down(max_jint - header_size(type), MinObjAlignment);
} }
return (int32_t)max_elements_per_size_t; return (int32_t)max_elements_per_size_t;
} }

View file

@ -128,7 +128,7 @@ int ConstMethod::size(int code_size,
} }
// Align sizes up to a word. // Align sizes up to a word.
extra_bytes = align_size_up(extra_bytes, BytesPerWord); extra_bytes = align_up(extra_bytes, BytesPerWord);
// One pointer per annotation array // One pointer per annotation array
if (sizes->method_annotations_length() > 0) { if (sizes->method_annotations_length() > 0) {
@ -144,7 +144,7 @@ int ConstMethod::size(int code_size,
extra_bytes += sizeof(AnnotationArray*); extra_bytes += sizeof(AnnotationArray*);
} }
int extra_words = align_size_up(extra_bytes, BytesPerWord) / BytesPerWord; int extra_words = align_up(extra_bytes, BytesPerWord) / BytesPerWord;
assert(extra_words == extra_bytes/BytesPerWord, "should already be aligned"); assert(extra_words == extra_bytes/BytesPerWord, "should already be aligned");
return align_metadata_size(header_size() + extra_words); return align_metadata_size(header_size() + extra_words);
} }

View file

@ -360,7 +360,7 @@ public:
// Sizing // Sizing
static int header_size() { static int header_size() {
return align_size_up((int)sizeof(ConstMethod), wordSize) / wordSize; return align_up((int)sizeof(ConstMethod), wordSize) / wordSize;
} }
// Size needed // Size needed

View file

@ -756,7 +756,7 @@ class ConstantPool : public Metadata {
// Sizing (in words) // Sizing (in words)
static int header_size() { static int header_size() {
return align_size_up((int)sizeof(ConstantPool), wordSize) / wordSize; return align_up((int)sizeof(ConstantPool), wordSize) / wordSize;
} }
static int size(int length) { return align_metadata_size(header_size() + length); } static int size(int length) { return align_metadata_size(header_size() + length); }
int size() const { return size(length()); } int size() const { return size(length()); }

View file

@ -362,7 +362,7 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
// Code generation support // Code generation support
static WordSize size() { static WordSize size() {
return in_WordSize(align_size_up((int)sizeof(ConstantPoolCacheEntry), wordSize) / wordSize); return in_WordSize(align_up((int)sizeof(ConstantPoolCacheEntry), wordSize) / wordSize);
} }
static ByteSize size_in_bytes() { return in_ByteSize(sizeof(ConstantPoolCacheEntry)); } static ByteSize size_in_bytes() { return in_ByteSize(sizeof(ConstantPoolCacheEntry)); }
static ByteSize indices_offset() { return byte_offset_of(ConstantPoolCacheEntry, _indices); } static ByteSize indices_offset() { return byte_offset_of(ConstantPoolCacheEntry, _indices); }

View file

@ -100,7 +100,7 @@ class OopMapBlock VALUE_OBJ_CLASS_SPEC {
// sizeof(OopMapBlock) in words. // sizeof(OopMapBlock) in words.
static const int size_in_words() { static const int size_in_words() {
return align_size_up((int)sizeof(OopMapBlock), wordSize) >> return align_up((int)sizeof(OopMapBlock), wordSize) >>
LogBytesPerWord; LogBytesPerWord;
} }

View file

@ -294,7 +294,7 @@ address Method::bcp_from(address bcp) const {
int Method::size(bool is_native) { int Method::size(bool is_native) {
// If native, then include pointers for native_function and signature_handler // If native, then include pointers for native_function and signature_handler
int extra_bytes = (is_native) ? 2*sizeof(address*) : 0; int extra_bytes = (is_native) ? 2*sizeof(address*) : 0;
int extra_words = align_size_up(extra_bytes, BytesPerWord) / BytesPerWord; int extra_words = align_up(extra_bytes, BytesPerWord) / BytesPerWord;
return align_metadata_size(header_size() + extra_words); return align_metadata_size(header_size() + extra_words);
} }

View file

@ -667,7 +667,7 @@ class Method : public Metadata {
// sizing // sizing
static int header_size() { static int header_size() {
return align_size_up((int)sizeof(Method), wordSize) / wordSize; return align_up((int)sizeof(Method), wordSize) / wordSize;
} }
static int size(bool is_native); static int size(bool is_native);
int size() const { return method_size(); } int size() const { return method_size(); }

View file

@ -118,7 +118,7 @@ class MethodCounters : public Metadata {
AOT_ONLY(Method* method() const { return _method; }) AOT_ONLY(Method* method() const { return _method; })
static int size() { static int size() {
return align_size_up((int)sizeof(MethodCounters), wordSize) / wordSize; return align_up((int)sizeof(MethodCounters), wordSize) / wordSize;
} }
void clear_counters(); void clear_counters();

View file

@ -937,7 +937,7 @@ int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) {
// profiling information about a given method. Size is in words // profiling information about a given method. Size is in words
int MethodData::compute_allocation_size_in_words(const methodHandle& method) { int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
int byte_size = compute_allocation_size_in_bytes(method); int byte_size = compute_allocation_size_in_bytes(method);
int word_size = align_size_up(byte_size, BytesPerWord) / BytesPerWord; int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord;
return align_metadata_size(word_size); return align_metadata_size(word_size);
} }

View file

@ -2338,7 +2338,7 @@ public:
// My size // My size
int size_in_bytes() const { return _size; } int size_in_bytes() const { return _size; }
int size() const { return align_metadata_size(align_size_up(_size, BytesPerWord)/BytesPerWord); } int size() const { return align_metadata_size(align_up(_size, BytesPerWord)/BytesPerWord); }
#if INCLUDE_SERVICES #if INCLUDE_SERVICES
void collect_statistics(KlassSizeStats *sz) const; void collect_statistics(KlassSizeStats *sz) const;
#endif #endif

View file

@ -62,7 +62,7 @@ private:
if (HeapWordsPerOop > 0) { if (HeapWordsPerOop > 0) {
old_res = length * HeapWordsPerOop; old_res = length * HeapWordsPerOop;
} else { } else {
old_res = align_size_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord; old_res = align_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord;
} }
assert(res == old_res, "Inconsistency between old and new."); assert(res == old_res, "Inconsistency between old and new.");
#endif // ASSERT #endif // ASSERT

View file

@ -3889,7 +3889,7 @@ void Compile::ConstantTable::calculate_offsets_and_size() {
// Align offset for type. // Align offset for type.
int typesize = type_to_size_in_bytes(con->type()); int typesize = type_to_size_in_bytes(con->type());
offset = align_size_up(offset, typesize); offset = align_up(offset, typesize);
con->set_offset(offset); // set constant's offset con->set_offset(offset); // set constant's offset
if (con->type() == T_VOID) { if (con->type() == T_VOID) {
@ -3903,7 +3903,7 @@ void Compile::ConstantTable::calculate_offsets_and_size() {
// Align size up to the next section start (which is insts; see // Align size up to the next section start (which is insts; see
// CodeBuffer::align_at_start). // CodeBuffer::align_at_start).
assert(_size == -1, "already set?"); assert(_size == -1, "already set?");
_size = align_size_up(offset, (int)CodeEntryAlignment); _size = align_up(offset, (int)CodeEntryAlignment);
} }
void Compile::ConstantTable::emit(CodeBuffer& cb) { void Compile::ConstantTable::emit(CodeBuffer& cb) {

View file

@ -3560,7 +3560,7 @@ InitializeNode::coalesce_subword_stores(intptr_t header_size,
intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize); intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize);
intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, ti_limit); intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, ti_limit);
size_limit = MIN2(size_limit, ti_limit); size_limit = MIN2(size_limit, ti_limit);
size_limit = align_size_up(size_limit, BytesPerLong); size_limit = align_up(size_limit, BytesPerLong);
int num_tiles = size_limit / BytesPerLong; int num_tiles = size_limit / BytesPerLong;
// allocate space for the tile map: // allocate space for the tile map:
@ -3791,7 +3791,7 @@ intptr_t InitializeNode::find_next_fullword_store(uint start, PhaseGVN* phase) {
// update the map: // update the map:
intptr_t this_int_off = align_size_down(st_off, BytesPerInt); intptr_t this_int_off = align_down(st_off, BytesPerInt);
if (this_int_off != int_map_off) { if (this_int_off != int_map_off) {
// reset the map: // reset the map:
int_map = 0; int_map = 0;
@ -3805,7 +3805,7 @@ intptr_t InitializeNode::find_next_fullword_store(uint start, PhaseGVN* phase) {
} }
// Did this store hit or cross the word boundary? // Did this store hit or cross the word boundary?
intptr_t next_int_off = align_size_down(st_off + st_size, BytesPerInt); intptr_t next_int_off = align_down(st_off + st_size, BytesPerInt);
if (next_int_off == this_int_off + BytesPerInt) { if (next_int_off == this_int_off + BytesPerInt) {
// We passed the current int, without fully initializing it. // We passed the current int, without fully initializing it.
int_map_off = next_int_off; int_map_off = next_int_off;
@ -3895,7 +3895,7 @@ Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
// zsize 0 0 0 0 4 0 4 // zsize 0 0 0 0 4 0 4
if (next_full_store < 0) { if (next_full_store < 0) {
// Conservative tack: Zero to end of current word. // Conservative tack: Zero to end of current word.
zeroes_needed = align_size_up(zeroes_needed, BytesPerInt); zeroes_needed = align_up(zeroes_needed, BytesPerInt);
} else { } else {
// Zero to beginning of next fully initialized word. // Zero to beginning of next fully initialized word.
// Or, don't zero at all, if we are already in that word. // Or, don't zero at all, if we are already in that word.
@ -3908,7 +3908,7 @@ Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
if (zeroes_needed > zeroes_done) { if (zeroes_needed > zeroes_done) {
intptr_t zsize = zeroes_needed - zeroes_done; intptr_t zsize = zeroes_needed - zeroes_done;
// Do some incremental zeroing on rawmem, in parallel with inits. // Do some incremental zeroing on rawmem, in parallel with inits.
zeroes_done = align_size_down(zeroes_done, BytesPerInt); zeroes_done = align_down(zeroes_done, BytesPerInt);
rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr, rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
zeroes_done, zeroes_needed, zeroes_done, zeroes_needed,
phase); phase);
@ -3941,7 +3941,7 @@ Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
assert(st_off >= last_init_end, "tiles do not overwrite inits"); assert(st_off >= last_init_end, "tiles do not overwrite inits");
last_tile_end = MAX2(last_tile_end, next_init_off); last_tile_end = MAX2(last_tile_end, next_init_off);
} else { } else {
intptr_t st_tile_end = align_size_up(next_init_off, BytesPerLong); intptr_t st_tile_end = align_up(next_init_off, BytesPerLong);
assert(st_tile_end >= last_tile_end, "inits stay with tiles"); assert(st_tile_end >= last_tile_end, "inits stay with tiles");
assert(st_off >= last_init_end, "inits do not overlap"); assert(st_off >= last_init_end, "inits do not overlap");
last_init_end = next_init_off; // it's a non-tile last_init_end = next_init_off; // it's a non-tile
@ -3954,7 +3954,7 @@ Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
if (!(UseTLAB && ZeroTLAB)) { if (!(UseTLAB && ZeroTLAB)) {
// If anything remains to be zeroed, zero it all now. // If anything remains to be zeroed, zero it all now.
zeroes_done = align_size_down(zeroes_done, BytesPerInt); zeroes_done = align_down(zeroes_done, BytesPerInt);
// if it is the last unused 4 bytes of an instance, forget about it // if it is the last unused 4 bytes of an instance, forget about it
intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint); intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
if (zeroes_done + BytesPerLong >= size_limit) { if (zeroes_done + BytesPerLong >= size_limit) {

View file

@ -1543,7 +1543,7 @@ WB_ENTRY(jlong, WB_IncMetaspaceCapacityUntilGC(JNIEnv* env, jobject wb, jlong in
} }
size_t new_cap_until_GC = 0; size_t new_cap_until_GC = 0;
size_t aligned_inc = align_size_down((size_t) inc, Metaspace::commit_alignment()); size_t aligned_inc = align_down((size_t) inc, Metaspace::commit_alignment());
bool success = MetaspaceGC::inc_capacity_until_GC(aligned_inc, &new_cap_until_GC); bool success = MetaspaceGC::inc_capacity_until_GC(aligned_inc, &new_cap_until_GC);
if (!success) { if (!success) {
THROW_MSG_0(vmSymbols::java_lang_IllegalStateException(), THROW_MSG_0(vmSymbols::java_lang_IllegalStateException(),

View file

@ -1555,8 +1555,8 @@ void Arguments::set_cms_and_parnew_gc_flags() {
set_parnew_gc_flags(); set_parnew_gc_flags();
size_t max_heap = align_size_down(MaxHeapSize, size_t max_heap = align_down(MaxHeapSize,
CardTableRS::ct_max_alignment_constraint()); CardTableRS::ct_max_alignment_constraint());
// Now make adjustments for CMS // Now make adjustments for CMS
intx tenuring_default = (intx)6; intx tenuring_default = (intx)6;
@ -1567,7 +1567,7 @@ void Arguments::set_cms_and_parnew_gc_flags() {
const size_t preferred_max_new_size_unaligned = const size_t preferred_max_new_size_unaligned =
MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads)); MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
size_t preferred_max_new_size = size_t preferred_max_new_size =
align_size_up(preferred_max_new_size_unaligned, os::vm_page_size()); align_up(preferred_max_new_size_unaligned, os::vm_page_size());
// Unless explicitly requested otherwise, size young gen // Unless explicitly requested otherwise, size young gen
// for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads // for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads
@ -1681,8 +1681,8 @@ size_t Arguments::max_heap_for_compressed_oops() {
// keeping alignment constraints of the heap. To guarantee the latter, as the // keeping alignment constraints of the heap. To guarantee the latter, as the
// NULL page is located before the heap, we pad the NULL page to the conservative // NULL page is located before the heap, we pad the NULL page to the conservative
// maximum alignment that the GC may ever impose upon the heap. // maximum alignment that the GC may ever impose upon the heap.
size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(), size_t displacement_due_to_null_page = align_up_(os::vm_page_size(),
_conservative_max_heap_alignment); _conservative_max_heap_alignment);
LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page); LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
NOT_LP64(ShouldNotReachHere(); return 0); NOT_LP64(ShouldNotReachHere(); return 0);
@ -2763,7 +2763,7 @@ jint Arguments::parse_xss(const JavaVMOption* option, const char* tail, intx* ou
const julong min_size = min_ThreadStackSize * K; const julong min_size = min_ThreadStackSize * K;
const julong max_size = max_ThreadStackSize * K; const julong max_size = max_ThreadStackSize * K;
assert(is_size_aligned_(max_size, (size_t)os::vm_page_size()), "Implementation assumption"); assert(is_aligned_(max_size, (size_t)os::vm_page_size()), "Implementation assumption");
julong size = 0; julong size = 0;
ArgsRange errcode = parse_memory_size(tail, &size, min_size, max_size); ArgsRange errcode = parse_memory_size(tail, &size, min_size, max_size);
@ -2778,7 +2778,7 @@ jint Arguments::parse_xss(const JavaVMOption* option, const char* tail, intx* ou
} }
// Internally track ThreadStackSize in units of 1024 bytes. // Internally track ThreadStackSize in units of 1024 bytes.
const julong size_aligned = align_size_up_(size, K); const julong size_aligned = align_up_(size, K);
assert(size <= size_aligned, assert(size <= size_aligned,
"Overflow: " JULONG_FORMAT " " JULONG_FORMAT, "Overflow: " JULONG_FORMAT " " JULONG_FORMAT,
size, size_aligned); size, size_aligned);
@ -2789,7 +2789,7 @@ jint Arguments::parse_xss(const JavaVMOption* option, const char* tail, intx* ou
size_in_K); size_in_K);
// Check that code expanding ThreadStackSize to a page aligned number of bytes won't overflow. // Check that code expanding ThreadStackSize to a page aligned number of bytes won't overflow.
const julong max_expanded = align_size_up_(size_in_K * K, (size_t)os::vm_page_size()); const julong max_expanded = align_up_(size_in_K * K, (size_t)os::vm_page_size());
assert(max_expanded < max_uintx && max_expanded >= size_in_K, assert(max_expanded < max_uintx && max_expanded >= size_in_K,
"Expansion overflowed: " JULONG_FORMAT " " JULONG_FORMAT, "Expansion overflowed: " JULONG_FORMAT " " JULONG_FORMAT,
max_expanded, size_in_K); max_expanded, size_in_K);

View file

@ -153,7 +153,7 @@ inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest,
jbyte compare_value, cmpxchg_memory_order order) { jbyte compare_value, cmpxchg_memory_order order) {
STATIC_ASSERT(sizeof(jbyte) == 1); STATIC_ASSERT(sizeof(jbyte) == 1);
volatile jint* dest_int = volatile jint* dest_int =
reinterpret_cast<volatile jint*>(align_ptr_down(dest, sizeof(jint))); reinterpret_cast<volatile jint*>(align_down(dest, sizeof(jint)));
size_t offset = pointer_delta(dest, dest_int, 1); size_t offset = pointer_delta(dest, dest_int, 1);
jint cur = *dest_int; jint cur = *dest_int;
jbyte* cur_as_bytes = reinterpret_cast<jbyte*>(&cur); jbyte* cur_as_bytes = reinterpret_cast<jbyte*>(&cur);

View file

@ -607,7 +607,7 @@ Flag::Error GCPauseIntervalMillisConstraintFunc(uintx value, bool verbose) {
} }
Flag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose) { Flag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose) {
size_t aligned_max = align_size_down(max_uintx/2, Metaspace::reserve_alignment_words()); size_t aligned_max = align_down(max_uintx/2, Metaspace::reserve_alignment_words());
if (value > aligned_max) { if (value > aligned_max) {
CommandLineError::print(verbose, CommandLineError::print(verbose,
"InitialBootClassLoaderMetaspaceSize (" SIZE_FORMAT ") must be " "InitialBootClassLoaderMetaspaceSize (" SIZE_FORMAT ") must be "
@ -618,7 +618,7 @@ Flag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool
return Flag::SUCCESS; return Flag::SUCCESS;
} }
// To avoid an overflow by 'align_size_up(value, alignment)'. // To avoid an overflow by 'align_up(value, alignment)'.
static Flag::Error MaxSizeForAlignment(const char* name, size_t value, size_t alignment, bool verbose) { static Flag::Error MaxSizeForAlignment(const char* name, size_t value, size_t alignment, bool verbose) {
size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1)); size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
if (value > aligned_max) { if (value > aligned_max) {

Some files were not shown because too many files have changed in this diff Show more