From 4fe99da74f557461c31293cdc48af1199dd2b85c Mon Sep 17 00:00:00 2001 From: Afshin Zafari Date: Tue, 7 Feb 2023 14:08:01 +0000 Subject: [PATCH] 8151413: os::allocation_granularity/page_size and friends return signed values Reviewed-by: stefank, ccheung, ysr --- .../cpu/aarch64/c1_MacroAssembler_aarch64.cpp | 2 +- .../cpu/aarch64/interp_masm_aarch64.cpp | 4 +-- .../cpu/aarch64/macroAssembler_aarch64.cpp | 8 ++--- .../cpu/aarch64/sharedRuntime_aarch64.cpp | 2 +- .../templateInterpreterGenerator_aarch64.cpp | 4 +-- src/hotspot/cpu/arm/macroAssembler_arm.cpp | 2 +- src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp | 2 +- src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.cpp | 4 +-- .../ppc/templateInterpreterGenerator_ppc.cpp | 4 +-- .../cpu/riscv/c1_MacroAssembler_riscv.cpp | 4 +-- src/hotspot/cpu/riscv/interp_masm_riscv.cpp | 4 +-- .../cpu/riscv/macroAssembler_riscv.cpp | 6 ++-- src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp | 2 +- .../templateInterpreterGenerator_riscv.cpp | 4 +-- .../templateInterpreterGenerator_s390.cpp | 6 ++-- src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp | 4 +-- src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp | 10 +++---- src/hotspot/cpu/x86/interp_masm_x86.cpp | 4 +-- src/hotspot/cpu/x86/macroAssembler_x86.cpp | 10 +++---- src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp | 4 +-- src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp | 4 +-- .../x86/templateInterpreterGenerator_x86.cpp | 4 +-- src/hotspot/os/aix/os_aix.cpp | 10 +++---- src/hotspot/os/bsd/os_bsd.cpp | 6 ++-- src/hotspot/os/linux/os_linux.cpp | 29 ++++++++++--------- src/hotspot/os/windows/os_windows.cpp | 6 ++-- src/hotspot/os/windows/os_windows.inline.hpp | 4 +-- src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp | 2 +- src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp | 2 +- src/hotspot/share/asm/assembler.cpp | 6 ++-- src/hotspot/share/classfile/javaClasses.cpp | 2 +- src/hotspot/share/code/codeCache.cpp | 4 +-- .../share/gc/epsilon/epsilonArguments.cpp | 2 +- .../share/gc/g1/g1PageBasedVirtualSpace.cpp | 4 +-- .../share/gc/parallel/mutableNUMASpace.cpp | 2 +- .../share/gc/parallel/parMarkBitMap.cpp | 2 +- .../share/gc/parallel/psParallelCompact.cpp | 2 +- src/hotspot/share/gc/shared/cardTable.cpp | 6 ++-- .../gc/shenandoah/shenandoahCollectionSet.cpp | 4 +-- .../share/gc/shenandoah/shenandoahHeap.cpp | 6 ++-- .../gc/shenandoah/shenandoahHeapRegion.cpp | 5 ++-- src/hotspot/share/gc/z/zBarrier.cpp | 2 +- src/hotspot/share/jvmci/jvmciCompilerToVM.hpp | 4 +-- .../share/jvmci/jvmciCompilerToVMInit.cpp | 2 +- src/hotspot/share/jvmci/vmStructs_jvmci.cpp | 2 +- .../share/memory/allocation.inline.hpp | 2 +- src/hotspot/share/memory/heap.cpp | 4 +-- .../memory/metaspace/metaspaceSettings.cpp | 8 ++--- src/hotspot/share/memory/virtualspace.cpp | 16 +++++----- src/hotspot/share/oops/compressedOops.cpp | 4 +-- src/hotspot/share/opto/output.cpp | 4 +-- src/hotspot/share/prims/whitebox.cpp | 2 +- src/hotspot/share/runtime/arguments.cpp | 4 +-- .../share/runtime/continuationFreezeThaw.cpp | 4 +-- .../flags/jvmFlagConstraintsRuntime.cpp | 4 +-- src/hotspot/share/runtime/javaCalls.cpp | 2 +- src/hotspot/share/runtime/os.hpp | 4 +-- src/hotspot/share/runtime/osInfo.cpp | 6 ++-- src/hotspot/share/runtime/osInfo.hpp | 20 ++++++------- src/hotspot/share/runtime/perfMemory.cpp | 6 ++-- .../share/utilities/globalDefinitions.hpp | 2 +- .../gtest/memory/test_virtualspace.cpp | 6 ++-- test/hotspot/gtest/runtime/test_arguments.cpp | 4 +-- test/hotspot/gtest/runtime/test_os.cpp | 8 ++--- test/hotspot/gtest/runtime/test_os_linux.cpp | 2 +- .../utilities/test_globalDefinitions.cpp | 4 +-- 66 files changed, 165 insertions(+), 163 deletions(-) diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp index 3aa7acf71b3..1fd71eb6330 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp @@ -108,7 +108,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr // significant 2 bits cleared and page_size is a power of 2 mov(rscratch1, sp); sub(hdr, hdr, rscratch1); - ands(hdr, hdr, aligned_mask - os::vm_page_size()); + ands(hdr, hdr, aligned_mask - (int)os::vm_page_size()); // for recursive locking, the result is zero => save it in the displaced header // location (NULL in the displaced hdr location indicates recursive locking) str(hdr, Address(disp_hdr, 0)); diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp index 2857c442582..c6611b89956 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -802,7 +802,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) // copy mov(rscratch1, sp); sub(swap_reg, swap_reg, rscratch1); - ands(swap_reg, swap_reg, (uint64_t)(7 - os::vm_page_size())); + ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size())); // Save the test result, for recursive case, the result is zero str(swap_reg, Address(lock_reg, mark_offset)); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index 76bf1d082ad..3a695606090 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -4577,9 +4577,9 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) { // Bang one page at a time because large size can bang beyond yellow and // red zones. Label loop; - mov(rscratch1, os::vm_page_size()); + mov(rscratch1, (int)os::vm_page_size()); bind(loop); - lea(tmp, Address(tmp, -os::vm_page_size())); + lea(tmp, Address(tmp, -(int)os::vm_page_size())); subsw(size, size, rscratch1); str(size, Address(tmp)); br(Assembler::GT, loop); @@ -4590,10 +4590,10 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) { // was post-decremented.) Skip this address by starting at i=1, and // touch a few more pages below. N.B. It is important to touch all // the way down to and including i=StackShadowPages. - for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / os::vm_page_size()) - 1; i++) { + for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) { // this could be any sized move but this is can be a debugging crumb // so the bigger the better. - lea(tmp, Address(tmp, -os::vm_page_size())); + lea(tmp, Address(tmp, -(int)os::vm_page_size())); str(size, Address(tmp)); } } diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp index 4ca90e584be..6129077212a 100644 --- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp @@ -1798,7 +1798,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ sub(swap_reg, sp, swap_reg); __ neg(swap_reg, swap_reg); - __ ands(swap_reg, swap_reg, 3 - os::vm_page_size()); + __ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size()); // Save the test result, for recursive case, the result is zero __ str(swap_reg, Address(lock_reg, mark_word_offset)); diff --git a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp index ef271b85088..8b1081ad922 100644 --- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp @@ -655,7 +655,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { const int overhead_size = -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; - const int page_size = os::vm_page_size(); + const size_t page_size = os::vm_page_size(); Label after_frame_check; @@ -1063,7 +1063,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { // See more discussion in stackOverflow.hpp. const int shadow_zone_size = checked_cast(StackOverflow::stack_shadow_zone_size()); - const int page_size = os::vm_page_size(); + const int page_size = (int)os::vm_page_size(); const int n_shadow_pages = shadow_zone_size / page_size; #ifdef ASSERT diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.cpp b/src/hotspot/cpu/arm/macroAssembler_arm.cpp index a45387d8cfe..a922f964d79 100644 --- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp +++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp @@ -970,7 +970,7 @@ void MacroAssembler::zero_memory(Register start, Register end, Register tmp) { void MacroAssembler::arm_stack_overflow_check(int frame_size_in_bytes, Register tmp) { // Version of AbstractAssembler::generate_stack_overflow_check optimized for ARM - const int page_size = os::vm_page_size(); + const int page_size = (int)os::vm_page_size(); sub_slow(tmp, SP, StackOverflow::stack_shadow_zone_size()); strb(R0, Address(tmp)); diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp index 9db62b93a95..2ca16bb9c5a 100644 --- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * diff --git a/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.cpp b/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.cpp index 1e6c2040490..fbf6d13dc8f 100644 --- a/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/z/zGlobals_ppc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -107,7 +107,7 @@ static unsigned int probe_valid_max_address_bit(size_t init_bit, size_t min_bit) unsigned int max_valid_address_bit = 0; void* last_allocatable_address = nullptr; - const unsigned int page_size = os::vm_page_size(); + const size_t page_size = os::vm_page_size(); for (size_t i = init_bit; i >= min_bit; --i) { void* base_addr = (void*) (((unsigned long) 1U) << i); diff --git a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp index 939b2e3510d..3cb71810897 100644 --- a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp @@ -1162,8 +1162,8 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { // Bang each page in the shadow zone. We can't assume it's been done for // an interpreter frame with greater than a page of locals, so each page // needs to be checked. Only true for non-native. - const int page_size = os::vm_page_size(); - const int n_shadow_pages = ((int)StackOverflow::stack_shadow_zone_size()) / page_size; + const size_t page_size = os::vm_page_size(); + const int n_shadow_pages = StackOverflow::stack_shadow_zone_size() / page_size; const int start_page = native_call ? n_shadow_pages : 1; BLOCK_COMMENT("bang_stack_shadow_pages:"); for (int pages = start_page; pages <= n_shadow_pages; pages++) { diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp index 5989d5ab809..ef61e5a26e4 100644 --- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -96,7 +96,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr // assuming both the stack pointer and page_size have their least // significant 2 bits cleared and page_size is a power of 2 sub(hdr, hdr, sp); - mv(t0, aligned_mask - os::vm_page_size()); + mv(t0, aligned_mask - (int)os::vm_page_size()); andr(hdr, hdr, t0); // for recursive locking, the result is zero => save it in the displaced header // location (NULL in the displaced hdr location indicates recursive locking) diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp index b083e904d5c..06412dd613d 100644 --- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp +++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -831,7 +831,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) // least significant 3 bits clear. // NOTE: the oopMark is in swap_reg x10 as the result of cmpxchg sub(swap_reg, swap_reg, sp); - mv(t0, (int64_t)(7 - os::vm_page_size())); + mv(t0, (int64_t)(7 - (int)os::vm_page_size())); andr(swap_reg, swap_reg, t0); // Save the test result, for recursive case, the result is zero diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp index 19b47a5cea6..6954a29e182 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp @@ -1933,7 +1933,7 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) { // Bang stack for total size given plus shadow page size. // Bang one page at a time because large size can bang beyond yellow and // red zones. - mv(t0, os::vm_page_size()); + mv(t0, (int)os::vm_page_size()); Label loop; bind(loop); sub(tmp, sp, t0); @@ -1947,10 +1947,10 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) { // was post-decremented.) Skip this address by starting at i=1, and // touch a few more pages below. N.B. It is important to touch all // the way down to and including i=StackShadowPages. - for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / os::vm_page_size()) - 1; i++) { + for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) { // this could be any sized move but this is can be a debugging crumb // so the bigger the better. - sub(tmp, tmp, os::vm_page_size()); + sub(tmp, tmp, (int)os::vm_page_size()); sd(size, Address(tmp, 0)); } } diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp index dfa0d4b48d5..a61784b0f2d 100644 --- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp +++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp @@ -1692,7 +1692,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // NOTE: the oopMark is in swap_reg % 10 as the result of cmpxchg __ sub(swap_reg, swap_reg, sp); - __ andi(swap_reg, swap_reg, 3 - os::vm_page_size()); + __ andi(swap_reg, swap_reg, 3 - (int)os::vm_page_size()); // Save the test result, for recursive case, the result is zero __ sd(swap_reg, Address(lock_reg, mark_word_offset)); diff --git a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp index 56aa3b048f4..0ce70328e35 100644 --- a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp @@ -602,7 +602,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { const int overhead_size = -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; - const int page_size = os::vm_page_size(); + const int page_size = (int)os::vm_page_size(); Label after_frame_check; @@ -889,7 +889,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { // See more discussion in stackOverflow.hpp. const int shadow_zone_size = checked_cast(StackOverflow::stack_shadow_zone_size()); - const int page_size = os::vm_page_size(); + const int page_size = (int)os::vm_page_size(); const int n_shadow_pages = shadow_zone_size / page_size; #ifdef ASSERT diff --git a/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp b/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp index 9b5b6f64a39..05983b20b2a 100644 --- a/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -787,7 +787,7 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) void TemplateInterpreterGenerator::generate_stack_overflow_check(Register frame_size, Register tmp1) { Register tmp2 = Z_R1_scratch; - const int page_size = os::vm_page_size(); + const int page_size = (int)os::vm_page_size(); NearLabel after_frame_check; BLOCK_COMMENT("stack_overflow_check {"); @@ -2020,7 +2020,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { // Bang each page in the shadow zone. We can't assume it's been done for // an interpreter frame with greater than a page of locals, so each page // needs to be checked. Only true for non-native. For native, we only bang the last page. - const int page_size = os::vm_page_size(); + const size_t page_size = os::vm_page_size(); const int n_shadow_pages = (int)(StackOverflow::stack_shadow_zone_size()/page_size); const int start_page_num = native_call ? n_shadow_pages : 1; for (int pages = start_page_num; pages <= n_shadow_pages; pages++) { diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index 68d089f13e7..7e2807cf20d 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -87,7 +87,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr // assuming both the stack pointer and page_size have their least // significant 2 bits cleared and page_size is a power of 2 subptr(hdr, rsp); - andptr(hdr, aligned_mask - os::vm_page_size()); + andptr(hdr, aligned_mask - (int)os::vm_page_size()); // for recursive locking, the result is zero => save it in the displaced header // location (NULL in the displaced hdr location indicates recursive locking) movptr(Address(disp_hdr, 0), hdr); diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp index cebca24bbbe..b508429f0ab 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp @@ -613,7 +613,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp // Locked by current thread if difference with current SP is less than one page. subptr(tmpReg, rsp); // Next instruction set ZFlag == 1 (Success) if difference is less then one page. - andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) ); + andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - (int)os::vm_page_size())) ); movptr(Address(boxReg, 0), tmpReg); } else { // Clear ZF so that we take the slow path at the DONE label. objReg is known to be not 0. @@ -2784,8 +2784,8 @@ void C2_MacroAssembler::string_indexof(Register str1, Register str2, // since heaps are aligned and mapped by pages. assert(os::vm_page_size() < (int)G, "default page should be small"); movl(result, str2); // We need only low 32 bits - andl(result, (os::vm_page_size()-1)); - cmpl(result, (os::vm_page_size()-16)); + andl(result, ((int)os::vm_page_size()-1)); + cmpl(result, ((int)os::vm_page_size()-16)); jccb(Assembler::belowEqual, CHECK_STR); // Move small strings to stack to allow load 16 bytes into vec. @@ -2814,8 +2814,8 @@ void C2_MacroAssembler::string_indexof(Register str1, Register str2, // Check cross page boundary. movl(result, str1); // We need only low 32 bits - andl(result, (os::vm_page_size()-1)); - cmpl(result, (os::vm_page_size()-16)); + andl(result, ((int)os::vm_page_size()-1)); + cmpl(result, ((int)os::vm_page_size()-16)); jccb(Assembler::belowEqual, BIG_STRINGS); subptr(rsp, 16); diff --git a/src/hotspot/cpu/x86/interp_masm_x86.cpp b/src/hotspot/cpu/x86/interp_masm_x86.cpp index 70338f34208..54925be3516 100644 --- a/src/hotspot/cpu/x86/interp_masm_x86.cpp +++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1269,7 +1269,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) { // least significant bits clear. // NOTE: the mark is in swap_reg %rax as the result of cmpxchg subptr(swap_reg, rsp); - andptr(swap_reg, zero_bits - os::vm_page_size()); + andptr(swap_reg, zero_bits - (int)os::vm_page_size()); // Save the test result, for recursive case, the result is zero movptr(Address(lock_reg, mark_offset), swap_reg); diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index cc088983740..1cb24d9eb6d 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -1273,9 +1273,9 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) { // red zones. Label loop; bind(loop); - movl(Address(tmp, (-os::vm_page_size())), size ); - subptr(tmp, os::vm_page_size()); - subl(size, os::vm_page_size()); + movl(Address(tmp, (-(int)os::vm_page_size())), size ); + subptr(tmp, (int)os::vm_page_size()); + subl(size, (int)os::vm_page_size()); jcc(Assembler::greater, loop); // Bang down shadow pages too. @@ -1284,10 +1284,10 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) { // was post-decremented.) Skip this address by starting at i=1, and // touch a few more pages below. N.B. It is important to touch all // the way down including all pages in the shadow zone. - for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / os::vm_page_size()); i++) { + for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) { // this could be any sized move but this is can be a debugging crumb // so the bigger the better. - movptr(Address(tmp, (-i*os::vm_page_size())), size ); + movptr(Address(tmp, (-i*(int)os::vm_page_size())), size ); } } diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp index 1f464714e84..7cf7b76af36 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1702,7 +1702,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg __ subptr(swap_reg, rsp); - __ andptr(swap_reg, 3 - os::vm_page_size()); + __ andptr(swap_reg, 3 - (int)os::vm_page_size()); // Save the test result, for recursive case, the result is zero __ movptr(Address(lock_reg, mark_word_offset), swap_reg); diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp index 8a4a7aa22b1..d2b4c6a57b9 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2172,7 +2172,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg __ subptr(swap_reg, rsp); - __ andptr(swap_reg, 3 - os::vm_page_size()); + __ andptr(swap_reg, 3 - (int)os::vm_page_size()); // Save the test result, for recursive case, the result is zero __ movptr(Address(lock_reg, mark_word_offset), swap_reg); diff --git a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp index 11ef5a5357d..05bc6edd425 100644 --- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp +++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp @@ -480,7 +480,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { const int overhead_size = -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; - const int page_size = os::vm_page_size(); + const int page_size = (int)os::vm_page_size(); Label after_frame_check; @@ -732,7 +732,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { // method receiver, so do the banging after locking the receiver.) const int shadow_zone_size = checked_cast(StackOverflow::stack_shadow_zone_size()); - const int page_size = os::vm_page_size(); + const int page_size = (int)os::vm_page_size(); const int n_shadow_pages = shadow_zone_size / page_size; const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp index 3d16f0f21f9..a09865c41e9 100644 --- a/src/hotspot/os/aix/os_aix.cpp +++ b/src/hotspot/os/aix/os_aix.cpp @@ -1772,10 +1772,10 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, bool os::pd_commit_memory(char* addr, size_t size, bool exec) { assert(is_aligned_to(addr, os::vm_page_size()), - "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", + "addr " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")", p2i(addr), os::vm_page_size()); assert(is_aligned_to(size, os::vm_page_size()), - "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", + "size " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")", size, os::vm_page_size()); vmembk_t* const vmi = vmembk_find(addr); @@ -1807,10 +1807,10 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) { assert(is_aligned_to(addr, os::vm_page_size()), - "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", + "addr " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")", p2i(addr), os::vm_page_size()); assert(is_aligned_to(size, os::vm_page_size()), - "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", + "size " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")", size, os::vm_page_size()); // Dynamically do different things for mmap/shmat. @@ -2215,7 +2215,7 @@ extern "C" { } } -static void set_page_size(int page_size) { +static void set_page_size(size_t page_size) { OSInfo::set_vm_page_size(page_size); OSInfo::set_vm_allocation_granularity(page_size); } diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp index e9196c0e388..332d9b2ce5f 100644 --- a/src/hotspot/os/bsd/os_bsd.cpp +++ b/src/hotspot/os/bsd/os_bsd.cpp @@ -1345,7 +1345,7 @@ void os::print_memory_info(outputStream* st) { size_t size = sizeof(swap_usage); st->print("Memory:"); - st->print(" %dk page", os::vm_page_size()>>10); + st->print(" " SIZE_FORMAT "k page", os::vm_page_size()>>10); st->print(", physical " UINT64_FORMAT "k", os::physical_memory() >> 10); @@ -1910,10 +1910,10 @@ extern void report_error(char* file_name, int line_no, char* title, void os::init(void) { char dummy; // used to get a guess on initial stack address - int page_size = getpagesize(); + size_t page_size = (size_t)getpagesize(); OSInfo::set_vm_page_size(page_size); OSInfo::set_vm_allocation_granularity(page_size); - if (os::vm_page_size() <= 0) { + if (os::vm_page_size() == 0) { fatal("os_bsd.cpp: os::init: getpagesize() failed (%s)", os::strerror(errno)); } _page_sizes.add(os::vm_page_size()); diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp index 3de725a950f..864c7c3a411 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -803,7 +803,7 @@ static size_t get_static_tls_area_size(const pthread_attr_t *attr) { // // The following 'minstack_size > os::vm_page_size() + PTHREAD_STACK_MIN' // if check is done for precaution. - if (minstack_size > (size_t)os::vm_page_size() + PTHREAD_STACK_MIN) { + if (minstack_size > os::vm_page_size() + PTHREAD_STACK_MIN) { tls_size = minstack_size - os::vm_page_size() - PTHREAD_STACK_MIN; } } @@ -1108,7 +1108,7 @@ void os::Linux::capture_initial_stack(size_t max_size) { // lower end of primordial stack; reduce ulimit -s value a little bit // so we won't install guard page on ld.so's data section. // But ensure we don't underflow the stack size - allow 1 page spare - if (stack_size >= (size_t)(3 * os::vm_page_size())) { + if (stack_size >= 3 * os::vm_page_size()) { stack_size -= 2 * os::vm_page_size(); } @@ -2263,7 +2263,7 @@ void os::Linux::print_steal_info(outputStream* st) { void os::print_memory_info(outputStream* st) { st->print("Memory:"); - st->print(" %dk page", os::vm_page_size()>>10); + st->print(" " SIZE_FORMAT "k page", os::vm_page_size()>>10); // values in struct sysinfo are "unsigned long" struct sysinfo si; @@ -2705,7 +2705,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, } void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { - if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) { + if (UseTransparentHugePages && alignment_hint > vm_page_size()) { // We don't check the return value: madvise(MADV_HUGEPAGE) may not // be supported or the memory may already be backed by huge pages. ::madvise(addr, bytes, MADV_HUGEPAGE); @@ -2718,7 +2718,7 @@ void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { // uncommitted at all. We don't do anything in this case to avoid creating a segment with // small pages on top of the SHM segment. This method always works for small pages, so we // allow that in any case. - if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) { + if (alignment_hint <= os::vm_page_size() || can_commit_large_page_memory()) { commit_memory(addr, bytes, alignment_hint, !ExecMem); } } @@ -3449,7 +3449,7 @@ bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) { byte_size_in_exact_unit(page_size), exact_unit_for_byte_size(page_size)); for (size_t page_size_ = _page_sizes.next_smaller(page_size); - page_size_ != (size_t)os::vm_page_size(); + page_size_ != os::vm_page_size(); page_size_ = _page_sizes.next_smaller(page_size_)) { flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size_); p = mmap(nullptr, page_size_, PROT_READ|PROT_WRITE, flags, -1, 0); @@ -3919,7 +3919,7 @@ bool os::Linux::commit_memory_special(size_t bytes, int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED; // For large pages additional flags are required. - if (page_size > (size_t) os::vm_page_size()) { + if (page_size > os::vm_page_size()) { flags |= MAP_HUGETLB | hugetlbfs_page_size_flag(page_size); } char* addr = (char*)::mmap(req_addr, bytes, prot, flags, -1, 0); @@ -3949,7 +3949,7 @@ char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, assert(is_aligned(req_addr, page_size), "Must be"); assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be"); assert(_page_sizes.contains(page_size), "Must be a valid page size"); - assert(page_size > (size_t)os::vm_page_size(), "Must be a large page size"); + assert(page_size > os::vm_page_size(), "Must be a large page size"); assert(bytes >= page_size, "Shouldn't allocate large pages for small sizes"); // We only end up here when at least 1 large page can be used. @@ -4279,14 +4279,17 @@ void os::init(void) { char dummy; // used to get a guess on initial stack address clock_tics_per_sec = sysconf(_SC_CLK_TCK); - - int page_size = sysconf(_SC_PAGESIZE); - OSInfo::set_vm_page_size(page_size); - OSInfo::set_vm_allocation_granularity(page_size); - if (os::vm_page_size() <= 0) { + int sys_pg_size = sysconf(_SC_PAGESIZE); + if (sys_pg_size < 0) { fatal("os_linux.cpp: os::init: sysconf failed (%s)", os::strerror(errno)); } + size_t page_size = (size_t) sys_pg_size; + OSInfo::set_vm_page_size(page_size); + OSInfo::set_vm_allocation_granularity(page_size); + if (os::vm_page_size() == 0) { + fatal("os_linux.cpp: os::init: OSInfo::set_vm_page_size failed"); + } _page_sizes.add(os::vm_page_size()); Linux::initialize_system_info(); diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp index 00148c04308..296f819f992 100644 --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -1903,7 +1903,7 @@ void os::get_summary_cpu_info(char* buf, size_t buflen) { void os::print_memory_info(outputStream* st) { st->print("Memory:"); - st->print(" %dk page", os::vm_page_size()>>10); + st->print(" " SIZE_FORMAT "k page", os::vm_page_size()>>10); // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect // value if total memory is larger than 4GB @@ -2502,7 +2502,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { address addr = (address) exception_record->ExceptionInformation[1]; if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { - int page_size = os::vm_page_size(); + size_t page_size = os::vm_page_size(); // Make sure the pc and the faulting address are sane. // @@ -3159,7 +3159,7 @@ void os::large_page_init() { } _large_page_size = large_page_init_decide_size(); - const size_t default_page_size = (size_t) os::vm_page_size(); + const size_t default_page_size = os::vm_page_size(); if (_large_page_size > default_page_size) { _page_sizes.add(_large_page_size); } diff --git a/src/hotspot/os/windows/os_windows.inline.hpp b/src/hotspot/os/windows/os_windows.inline.hpp index 276863e95b5..176d47ad636 100644 --- a/src/hotspot/os/windows/os_windows.inline.hpp +++ b/src/hotspot/os/windows/os_windows.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,7 +49,7 @@ inline void os::map_stack_shadow_pages(address sp) { // If we decrement stack pointer more than one page // the OS may not map an intervening page into our space // and may fault on a memory access to interior of our frame. - const int page_size = os::vm_page_size(); + const size_t page_size = os::vm_page_size(); const size_t n_pages = StackOverflow::stack_shadow_zone_size() / page_size; for (size_t pages = 1; pages <= n_pages; pages++) { sp -= page_size; diff --git a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp index 6adbaa88b40..b822df42a46 100644 --- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp +++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp @@ -555,7 +555,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, stub == nullptr && (sig == SIGSEGV || sig == SIGBUS) && uc->context_trapno == trap_page_fault) { - int page_size = os::vm_page_size(); + size_t page_size = os::vm_page_size(); address addr = (address) info->si_addr; address pc = os::Posix::ucontext_get_pc(uc); // Make sure the pc and the faulting address are sane. diff --git a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp index b67e6b9a064..90c7d3c938a 100644 --- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp +++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp @@ -349,7 +349,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, stub == nullptr && (sig == SIGSEGV || sig == SIGBUS) && uc->uc_mcontext.gregs[REG_TRAPNO] == trap_page_fault) { - int page_size = os::vm_page_size(); + size_t page_size = os::vm_page_size(); address addr = (address) info->si_addr; address pc = os::Posix::ucontext_get_pc(uc); // Make sure the pc and the faulting address are sane. diff --git a/src/hotspot/share/asm/assembler.cpp b/src/hotspot/share/asm/assembler.cpp index 6339dac5e13..df42c005c32 100644 --- a/src/hotspot/share/asm/assembler.cpp +++ b/src/hotspot/share/asm/assembler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -133,7 +133,7 @@ void AbstractAssembler::generate_stack_overflow_check(int frame_size_in_bytes) { // The entry code may need to bang additional pages if the framesize // is greater than a page. - const int page_size = os::vm_page_size(); + const int page_size = (int)os::vm_page_size(); int bang_end = (int)StackOverflow::stack_shadow_zone_size(); // This is how far the previous frame's stack banging extended. @@ -246,5 +246,5 @@ bool MacroAssembler::needs_explicit_null_check(intptr_t offset) { // an explicit null check for -1. // Check if offset is outside of [0, os::vm_page_size()] - return offset < 0 || offset >= os::vm_page_size(); + return offset < 0 || offset >= static_cast(os::vm_page_size()); } diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp index 664323effec..816a7e3f45f 100644 --- a/src/hotspot/share/classfile/javaClasses.cpp +++ b/src/hotspot/share/classfile/javaClasses.cpp @@ -4668,7 +4668,7 @@ public: UnsafeConstantsFixup() { // round up values for all static final fields _address_size = sizeof(void*); - _page_size = os::vm_page_size(); + _page_size = (int)os::vm_page_size(); _big_endian = LITTLE_ENDIAN_ONLY(false) BIG_ENDIAN_ONLY(true); _use_unaligned_access = UseUnalignedAccesses; _data_cache_line_flush_size = (int)VM_Version::data_cache_line_flush_size(); diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp index c73c3c856db..87487c9d4dd 100644 --- a/src/hotspot/share/code/codeCache.cpp +++ b/src/hotspot/share/code/codeCache.cpp @@ -311,7 +311,7 @@ void CodeCache::initialize_heaps() { // If large page support is enabled, align code heaps according to large // page size to make sure that code cache is covered by large pages. - const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity()); + const size_t alignment = MAX2(page_size(false, 8), os::vm_allocation_granularity()); non_nmethod_size = align_up(non_nmethod_size, alignment); profiled_size = align_down(profiled_size, alignment); non_profiled_size = align_down(non_profiled_size, alignment); @@ -353,7 +353,7 @@ size_t CodeCache::page_size(bool aligned, size_t min_pages) { ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { // Align and reserve space for code cache const size_t rs_ps = page_size(); - const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity()); + const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity()); const size_t rs_size = align_up(size, rs_align); ReservedCodeSpace rs(rs_size, rs_align, rs_ps); if (!rs.is_reserved()) { diff --git a/src/hotspot/share/gc/epsilon/epsilonArguments.cpp b/src/hotspot/share/gc/epsilon/epsilonArguments.cpp index 9196d49e18b..ba023e8799b 100644 --- a/src/hotspot/share/gc/epsilon/epsilonArguments.cpp +++ b/src/hotspot/share/gc/epsilon/epsilonArguments.cpp @@ -68,7 +68,7 @@ void EpsilonArguments::initialize() { void EpsilonArguments::initialize_alignments() { size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); - size_t align = MAX2((size_t)os::vm_allocation_granularity(), page_size); + size_t align = MAX2(os::vm_allocation_granularity(), page_size); SpaceAlignment = align; HeapAlignment = align; } diff --git a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp index 94f51c8fdd1..60dc48f3cb7 100644 --- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp +++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,7 +49,7 @@ void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t guarantee(is_aligned(rs.base(), page_size), "Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size); guarantee(is_aligned(used_size, os::vm_page_size()), - "Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size); + "Given used reserved space size needs to be OS page size aligned (" SIZE_FORMAT " bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size); guarantee(used_size <= rs.size(), "Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size()); guarantee(is_aligned(rs.size(), page_size), diff --git a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp index 7d7eb68dad9..581a353d72c 100644 --- a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp +++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp @@ -507,7 +507,7 @@ void MutableNUMASpace::initialize(MemRegion mr, // Try small pages if the chunk size is too small if (base_space_size_pages / lgrp_spaces()->length() == 0 - && page_size() > (size_t)os::vm_page_size()) { + && page_size() > os::vm_page_size()) { // Changing the page size below can lead to freeing of memory. So we fail initialization. if (_must_use_large_pages) { vm_exit_during_initialization("Failed initializing NUMA with large pages. Too small heap size"); diff --git a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp index 2c5318054bd..b394cb07d35 100644 --- a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp +++ b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp @@ -47,7 +47,7 @@ ParMarkBitMap::initialize(MemRegion covered_region) const size_t granularity = os::vm_allocation_granularity(); _reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity)); - const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : + const size_t rs_align = page_sz == os::vm_page_size() ? 0 : MAX2(page_sz, granularity); ReservedSpace rs(_reserved_byte_size, rs_align, page_sz); const size_t used_page_sz = rs.page_size(); diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index 2b6a5ee52a0..e346a62d50f 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -445,7 +445,7 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size) const size_t granularity = os::vm_allocation_granularity(); _reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity)); - const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : + const size_t rs_align = page_sz == os::vm_page_size() ? 0 : MAX2(page_sz, granularity); ReservedSpace rs(_reserved_byte_size, rs_align, page_sz); os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, page_sz, rs.base(), diff --git a/src/hotspot/share/gc/shared/cardTable.cpp b/src/hotspot/share/gc/shared/cardTable.cpp index 6970d51cd29..4de1f87b824 100644 --- a/src/hotspot/share/gc/shared/cardTable.cpp +++ b/src/hotspot/share/gc/shared/cardTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -99,8 +99,8 @@ void CardTable::initialize() { _cur_covered_regions = 0; - const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : - MAX2(_page_size, (size_t) os::vm_allocation_granularity()); + const size_t rs_align = _page_size == os::vm_page_size() ? 0 : + MAX2(_page_size, os::vm_allocation_granularity()); ReservedSpace heap_rs(_byte_map_size, rs_align, _page_size); MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp index 5f269a7f64a..4ce0400013a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved. + * Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,7 +59,7 @@ ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedS MemTracker::record_virtual_memory_type(_map_space.base(), mtGC); - size_t page_size = (size_t)os::vm_page_size(); + size_t page_size = os::vm_page_size(); if (!_map_space.special()) { // Commit entire pages that cover the heap cset map. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 256f647d0a8..28b8a4aa4a4 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -178,9 +178,9 @@ jint ShenandoahHeap::initialize() { _committed = _initial_size; - size_t heap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); - size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); - size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); + size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); + size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); + size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); // // Reserve and commit memory for heap diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index 0e9a50af3c6..e37bbeabab1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -1,4 +1,5 @@ /* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -552,12 +553,12 @@ size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) { // region size to regular page size. // Figure out page size to use, and aligns up heap to page size - int page_size = os::vm_page_size(); + size_t page_size = os::vm_page_size(); if (UseLargePages) { size_t large_page_size = os::large_page_size(); max_heap_size = align_up(max_heap_size, large_page_size); if ((max_heap_size / align_up(region_size, large_page_size)) >= MIN_NUM_REGIONS) { - page_size = (int)large_page_size; + page_size = large_page_size; } else { // Should have been checked during argument initialization assert(!ShenandoahUncommit, "Uncommit requires region size aligns to large page size"); diff --git a/src/hotspot/share/gc/z/zBarrier.cpp b/src/hotspot/share/gc/z/zBarrier.cpp index 469e5fce935..63730c58b19 100644 --- a/src/hotspot/share/gc/z/zBarrier.cpp +++ b/src/hotspot/share/gc/z/zBarrier.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp index 9b6dd072bbd..192e04b2f09 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,7 +70,7 @@ class CompilerToVM { static CardTable::CardValue* cardtable_start_address; static int cardtable_shift; - static int vm_page_size; + static size_t vm_page_size; static int sizeof_vtableEntry; static int sizeof_ExceptionTableElement; diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp index 7bbc7fba978..cb17787e186 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp @@ -74,7 +74,7 @@ int CompilerToVM::Data::_fields_annotations_base_offset; CardTable::CardValue* CompilerToVM::Data::cardtable_start_address; int CompilerToVM::Data::cardtable_shift; -int CompilerToVM::Data::vm_page_size; +size_t CompilerToVM::Data::vm_page_size; int CompilerToVM::Data::sizeof_vtableEntry = sizeof(vtableEntry); int CompilerToVM::Data::sizeof_ExceptionTableElement = sizeof(ExceptionTableElement); diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp index 0dd582e80cf..bb21ee78fa8 100644 --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -79,7 +79,7 @@ static_field(CompilerToVM::Data, cardtable_start_address, CardTable::CardValue*) \ static_field(CompilerToVM::Data, cardtable_shift, int) \ \ - static_field(CompilerToVM::Data, vm_page_size, int) \ + static_field(CompilerToVM::Data, vm_page_size, size_t) \ \ static_field(CompilerToVM::Data, sizeof_vtableEntry, int) \ static_field(CompilerToVM::Data, sizeof_ExceptionTableElement, int) \ diff --git a/src/hotspot/share/memory/allocation.inline.hpp b/src/hotspot/share/memory/allocation.inline.hpp index 5ba58704856..fa2b2a7a7e3 100644 --- a/src/hotspot/share/memory/allocation.inline.hpp +++ b/src/hotspot/share/memory/allocation.inline.hpp @@ -50,7 +50,7 @@ inline void inc_stat_counter(volatile julong* dest, julong add_value) { template size_t MmapArrayAllocator::size_for(size_t length) { size_t size = length * sizeof(E); - int alignment = os::vm_allocation_granularity(); + size_t alignment = os::vm_allocation_granularity(); return align_up(size, alignment); } diff --git a/src/hotspot/share/memory/heap.cpp b/src/hotspot/share/memory/heap.cpp index f9ff1dc318b..c2fa593ea92 100644 --- a/src/hotspot/share/memory/heap.cpp +++ b/src/hotspot/share/memory/heap.cpp @@ -183,7 +183,7 @@ void CodeHeap::clear() { static size_t align_to_page_size(size_t size) { - const size_t alignment = (size_t)os::vm_page_size(); + const size_t alignment = os::vm_page_size(); assert(is_power_of_2(alignment), "no kidding ???"); return (size + alignment - 1) & ~(alignment - 1); } @@ -222,7 +222,7 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s _number_of_committed_segments = size_to_segments(_memory.committed_size()); _number_of_reserved_segments = size_to_segments(_memory.reserved_size()); assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); - const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity); + const size_t reserved_segments_alignment = MAX2(os::vm_page_size(), granularity); const size_t reserved_segments_size = align_up(_number_of_reserved_segments, reserved_segments_alignment); const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments); diff --git a/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp b/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp index 68000695571..9e7035d73b4 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -47,7 +47,7 @@ DEBUG_ONLY(bool Settings::_use_allocation_guard = false;) void Settings::ergo_initialize() { if (strcmp(MetaspaceReclaimPolicy, "none") == 0) { log_info(metaspace)("Initialized with strategy: no reclaim."); - _commit_granule_bytes = MAX2((size_t)os::vm_page_size(), 64 * K); + _commit_granule_bytes = MAX2(os::vm_page_size(), 64 * K); _commit_granule_words = _commit_granule_bytes / BytesPerWord; // In "none" reclamation mode, we do not uncommit, and we commit new chunks fully; // that very closely mimics the behaviour of old Metaspace. @@ -57,13 +57,13 @@ void Settings::ergo_initialize() { log_info(metaspace)("Initialized with strategy: aggressive reclaim."); // Set the granule size rather small; may increase // mapping fragmentation but also increase chance to uncommit. - _commit_granule_bytes = MAX2((size_t)os::vm_page_size(), 16 * K); + _commit_granule_bytes = MAX2(os::vm_page_size(), 16 * K); _commit_granule_words = _commit_granule_bytes / BytesPerWord; _new_chunks_are_fully_committed = false; _uncommit_free_chunks = true; } else if (strcmp(MetaspaceReclaimPolicy, "balanced") == 0) { log_info(metaspace)("Initialized with strategy: balanced reclaim."); - _commit_granule_bytes = MAX2((size_t)os::vm_page_size(), 64 * K); + _commit_granule_bytes = MAX2(os::vm_page_size(), 64 * K); _commit_granule_words = _commit_granule_bytes / BytesPerWord; _new_chunks_are_fully_committed = false; _uncommit_free_chunks = true; diff --git a/src/hotspot/share/memory/virtualspace.cpp b/src/hotspot/share/memory/virtualspace.cpp index 2e3c17426ef..de585cd0e11 100644 --- a/src/hotspot/share/memory/virtualspace.cpp +++ b/src/hotspot/share/memory/virtualspace.cpp @@ -58,7 +58,7 @@ ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_ // and normal pages. If the size is not a multiple of the // page size it will be aligned up to achieve this. size_t alignment = os::vm_allocation_granularity();; - if (preferred_page_size != (size_t)os::vm_page_size()) { + if (preferred_page_size != os::vm_page_size()) { alignment = MAX2(preferred_page_size, alignment); size = align_up(size, alignment); } @@ -131,7 +131,7 @@ static bool failed_to_reserve_as_requested(char* base, char* requested_address) static bool use_explicit_large_pages(size_t page_size) { return !os::can_commit_large_page_memory() && - page_size != (size_t) os::vm_page_size(); + page_size != os::vm_page_size(); } static bool large_pages_requested() { @@ -256,12 +256,12 @@ void ReservedSpace::reserve(size_t size, return; } page_size = os::page_sizes().next_smaller(page_size); - } while (page_size > (size_t) os::vm_page_size()); + } while (page_size > os::vm_page_size()); // Failed to reserve explicit large pages, do proper logging. log_on_large_pages_failure(requested_address, size); // Now fall back to normal reservation. - assert(page_size == (size_t) os::vm_page_size(), "inv"); + assert(page_size == os::vm_page_size(), "inv"); } // == Case 3 == @@ -284,7 +284,7 @@ void ReservedSpace::initialize(size_t size, "alignment not aligned to os::vm_allocation_granularity()"); assert(alignment == 0 || is_power_of_2((intptr_t)alignment), "not a power of 2"); - assert(page_size >= (size_t) os::vm_page_size(), "Invalid page size"); + assert(page_size >= os::vm_page_size(), "Invalid page size"); assert(is_power_of_2(page_size), "Invalid page size"); clear_members(); @@ -294,7 +294,7 @@ void ReservedSpace::initialize(size_t size, } // Adjust alignment to not be 0. - alignment = MAX2(alignment, (size_t)os::vm_page_size()); + alignment = MAX2(alignment, os::vm_page_size()); // Reserve the memory. reserve(size, alignment, page_size, requested_address, executable); @@ -359,7 +359,7 @@ static size_t noaccess_prefix_size(size_t alignment) { } void ReservedHeapSpace::establish_noaccess_prefix() { - assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big"); + assert(_alignment >= os::vm_page_size(), "must be at least page size big"); _noaccess_prefix = noaccess_prefix_size(_alignment); if (base() && base() + _size > (char *)OopEncodingHeapMax) { @@ -492,7 +492,7 @@ static char** get_attach_addresses_for_disjoint_mode() { void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, size_t page_size) { guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax, "can not allocate compressed oop heap for this size"); - guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small"); + guarantee(alignment == MAX2(alignment, os::vm_page_size()), "alignment too small"); const size_t granularity = os::vm_allocation_granularity(); assert((size & (granularity - 1)) == 0, diff --git a/src/hotspot/share/oops/compressedOops.cpp b/src/hotspot/share/oops/compressedOops.cpp index 659f6169337..a488df00b0e 100644 --- a/src/hotspot/share/oops/compressedOops.cpp +++ b/src/hotspot/share/oops/compressedOops.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -79,7 +79,7 @@ void CompressedOops::initialize(const ReservedHeapSpace& heap_space) { false)); // base() is one page below the heap. - assert((intptr_t)base() <= ((intptr_t)_heap_address_range.start() - os::vm_page_size()) || + assert((intptr_t)base() <= ((intptr_t)_heap_address_range.start() - (intptr_t)os::vm_page_size()) || base() == NULL, "invalid value"); assert(shift() == LogMinObjAlignmentInBytes || shift() == 0, "invalid value"); diff --git a/src/hotspot/share/opto/output.cpp b/src/hotspot/share/opto/output.cpp index 0b5cc251bbd..7ee4000efde 100644 --- a/src/hotspot/share/opto/output.cpp +++ b/src/hotspot/share/opto/output.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -370,7 +370,7 @@ bool PhaseOutput::need_stack_bang(int frame_size_in_bytes) const { // guarantee it doesn't happen) so we always need the stack bang in // a debug VM. return (C->stub_function() == NULL && - (C->has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3 + (C->has_java_calls() || frame_size_in_bytes > (int)(os::vm_page_size())>>3 DEBUG_ONLY(|| true))); } diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index 64db26d1223..588c2d181ae 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -158,7 +158,7 @@ WB_ENTRY(jint, WB_GetHeapOopSize(JNIEnv* env, jobject o)) WB_END WB_ENTRY(jint, WB_GetVMPageSize(JNIEnv* env, jobject o)) - return os::vm_page_size(); + return (jint)os::vm_page_size(); WB_END WB_ENTRY(jlong, WB_GetVMAllocationGranularity(JNIEnv* env, jobject o)) diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index 0323bb1397d..5950e7fe6e3 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -1479,7 +1479,7 @@ size_t Arguments::max_heap_for_compressed_oops() { // keeping alignment constraints of the heap. To guarantee the latter, as the // null page is located before the heap, we pad the null page to the conservative // maximum alignment that the GC may ever impose upon the heap. - size_t displacement_due_to_null_page = align_up((size_t)os::vm_page_size(), + size_t displacement_due_to_null_page = align_up(os::vm_page_size(), _conservative_max_heap_alignment); LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page); @@ -1548,7 +1548,7 @@ void Arguments::set_conservative_max_heap_alignment() { // itself and the maximum page size we may run the VM with. size_t heap_alignment = GCConfig::arguments()->conservative_max_heap_alignment(); _conservative_max_heap_alignment = MAX4(heap_alignment, - (size_t)os::vm_allocation_granularity(), + os::vm_allocation_granularity(), os::max_page_size(), GCArguments::compute_heap_alignment()); } diff --git a/src/hotspot/share/runtime/continuationFreezeThaw.cpp b/src/hotspot/share/runtime/continuationFreezeThaw.cpp index 0f05307862b..b09a0090644 100644 --- a/src/hotspot/share/runtime/continuationFreezeThaw.cpp +++ b/src/hotspot/share/runtime/continuationFreezeThaw.cpp @@ -272,8 +272,8 @@ public: } }; -static bool stack_overflow_check(JavaThread* thread, int size, address sp) { - const int page_size = os::vm_page_size(); +static bool stack_overflow_check(JavaThread* thread, size_t size, address sp) { + const size_t page_size = os::vm_page_size(); if (size > page_size) { if (sp - size < thread->stack_overflow_state()->shadow_zone_safe_limit()) { return false; diff --git a/src/hotspot/share/runtime/flags/jvmFlagConstraintsRuntime.cpp b/src/hotspot/share/runtime/flags/jvmFlagConstraintsRuntime.cpp index 78dc81fda67..0a58c559cba 100644 --- a/src/hotspot/share/runtime/flags/jvmFlagConstraintsRuntime.cpp +++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintsRuntime.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,7 +44,7 @@ JVMFlag::Error ObjectAlignmentInBytesConstraintFunc(int value, bool verbose) { if (value >= (intx)os::vm_page_size()) { JVMFlag::printError(verbose, "ObjectAlignmentInBytes (%d) must be " - "less than page size (%d)\n", + "less than page size (" SIZE_FORMAT ")\n", value, os::vm_page_size()); return JVMFlag::VIOLATES_CONSTRAINT; } diff --git a/src/hotspot/share/runtime/javaCalls.cpp b/src/hotspot/share/runtime/javaCalls.cpp index 7245a36dc74..0ae0d4540e4 100644 --- a/src/hotspot/share/runtime/javaCalls.cpp +++ b/src/hotspot/share/runtime/javaCalls.cpp @@ -549,7 +549,7 @@ class SignatureChekker : public SignatureIterator { if (v != 0) { // v is a "handle" referring to an oop, cast to integral type. // There shouldn't be any handles in very low memory. - guarantee((size_t)v >= (size_t)os::vm_page_size(), + guarantee((size_t)v >= os::vm_page_size(), "Bad JNI oop argument %d: " PTR_FORMAT, _pos, v); // Verify the pointee. oop vv = resolve_indirect_oop(v, _value_state[_pos]); diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp index 85e8942a528..231deb2a6d2 100644 --- a/src/hotspot/share/runtime/os.hpp +++ b/src/hotspot/share/runtime/os.hpp @@ -370,7 +370,7 @@ class os: AllStatic { // OS interface to Virtual Memory // Return the default page size. - static int vm_page_size() { return OSInfo::vm_page_size(); } + static size_t vm_page_size() { return OSInfo::vm_page_size(); } // The set of page sizes which the VM is allowed to use (may be a subset of // the page sizes actually available on the platform). @@ -411,7 +411,7 @@ class os: AllStatic { const char* base, const size_t size); - static int vm_allocation_granularity() { return OSInfo::vm_allocation_granularity(); } + static size_t vm_allocation_granularity() { return OSInfo::vm_allocation_granularity(); } inline static size_t cds_core_region_alignment(); diff --git a/src/hotspot/share/runtime/osInfo.cpp b/src/hotspot/share/runtime/osInfo.cpp index bd22344e749..c9fa01bcbf8 100644 --- a/src/hotspot/share/runtime/osInfo.cpp +++ b/src/hotspot/share/runtime/osInfo.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,6 @@ #include "precompiled.hpp" #include "runtime/osInfo.hpp" -int OSInfo::_vm_page_size = -1; -int OSInfo::_vm_allocation_granularity = -1; +size_t OSInfo::_vm_page_size = 0; +size_t OSInfo::_vm_allocation_granularity = 0; diff --git a/src/hotspot/share/runtime/osInfo.hpp b/src/hotspot/share/runtime/osInfo.hpp index 3129b9ae073..7b3a3e92a85 100644 --- a/src/hotspot/share/runtime/osInfo.hpp +++ b/src/hotspot/share/runtime/osInfo.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,25 +31,23 @@ // Static information about the operating system. Initialized exactly once // at VM start-up and never changes again. class OSInfo : AllStatic { - static int _vm_page_size; - static int _vm_allocation_granularity; + static size_t _vm_page_size; + static size_t _vm_allocation_granularity; public: // Returns the byte size of a virtual memory page - static int vm_page_size() { return _vm_page_size; } + static size_t vm_page_size() { return _vm_page_size; } // Returns the size, in bytes, of the granularity with which memory can be reserved using os::reserve_memory(). - static int vm_allocation_granularity() { return _vm_allocation_granularity; } + static size_t vm_allocation_granularity() { return _vm_allocation_granularity; } - static void set_vm_page_size(int n) { - assert(_vm_page_size < 0, "init only once"); - assert(n > 0, "sanity"); + static void set_vm_page_size(size_t n) { + assert(_vm_page_size == 0, "init only once"); _vm_page_size = n; } - static void set_vm_allocation_granularity(int n) { - assert(_vm_allocation_granularity < 0, "init only once"); - assert(n > 0, "sanity"); + static void set_vm_allocation_granularity(size_t n) { + assert(_vm_allocation_granularity == 0, "init only once"); _vm_allocation_granularity = n; } }; diff --git a/src/hotspot/share/runtime/perfMemory.cpp b/src/hotspot/share/runtime/perfMemory.cpp index 9f6cee47826..83604b6a5b5 100644 --- a/src/hotspot/share/runtime/perfMemory.cpp +++ b/src/hotspot/share/runtime/perfMemory.cpp @@ -93,12 +93,12 @@ void PerfMemory::initialize() { // initialization already performed return; - size_t capacity = align_up(PerfDataMemorySize, + size_t capacity = align_up((size_t)PerfDataMemorySize, os::vm_allocation_granularity()); log_debug(perf, memops)("PerfDataMemorySize = %d," - " os::vm_allocation_granularity = %d," - " adjusted size = " SIZE_FORMAT, + " os::vm_allocation_granularity = " SIZE_FORMAT + ", adjusted size = " SIZE_FORMAT, PerfDataMemorySize, os::vm_allocation_granularity(), capacity); diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp index 4f6e8bb191f..11cac97fff1 100644 --- a/src/hotspot/share/utilities/globalDefinitions.hpp +++ b/src/hotspot/share/utilities/globalDefinitions.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it diff --git a/test/hotspot/gtest/memory/test_virtualspace.cpp b/test/hotspot/gtest/memory/test_virtualspace.cpp index e64a822708d..2391ff00df4 100644 --- a/test/hotspot/gtest/memory/test_virtualspace.cpp +++ b/test/hotspot/gtest/memory/test_virtualspace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,7 +96,7 @@ namespace { static void test_reserved_size_alignment_page_type(size_t size, size_t alignment, bool maybe_large) { if (size < alignment) { // Tests might set -XX:LargePageSizeInBytes= and cause unexpected input arguments for this test. - ASSERT_EQ((size_t) os::vm_page_size(), os::large_page_size()) << "Test needs further refinement"; + ASSERT_EQ(os::vm_page_size(), os::large_page_size()) << "Test needs further refinement"; return; } @@ -402,7 +402,7 @@ class TestReservedSpace : AllStatic { static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) { if (size < alignment) { // Tests might set -XX:LargePageSizeInBytes= and cause unexpected input arguments for this test. - ASSERT_EQ((size_t)os::vm_page_size(), os::large_page_size()) << "Test needs further refinement"; + ASSERT_EQ(os::vm_page_size(), os::large_page_size()) << "Test needs further refinement"; return; } diff --git a/test/hotspot/gtest/runtime/test_arguments.cpp b/test/hotspot/gtest/runtime/test_arguments.cpp index f92770b9afa..8d312d4c913 100644 --- a/test/hotspot/gtest/runtime/test_arguments.cpp +++ b/test/hotspot/gtest/runtime/test_arguments.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -201,7 +201,7 @@ TEST_VM_F(ArgumentsTest, parse_xss) { // Test value aligned both to K and vm_page_size. { EXPECT_TRUE(is_aligned(32 * M, K)); - EXPECT_TRUE(is_aligned(32 * M, (size_t)os::vm_page_size())); + EXPECT_TRUE(is_aligned(32 * M, os::vm_page_size())); EXPECT_EQ(parse_xss_inner(to_string(32 * M), JNI_OK), (intx)(32 * M / K)); } diff --git a/test/hotspot/gtest/runtime/test_os.cpp b/test/hotspot/gtest/runtime/test_os.cpp index b404fc30ceb..bad74fdf60b 100644 --- a/test/hotspot/gtest/runtime/test_os.cpp +++ b/test/hotspot/gtest/runtime/test_os.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -698,16 +698,16 @@ TEST_VM(os, find_mapping_3) { TEST_VM(os, os_pagesizes) { ASSERT_EQ(os::min_page_size(), 4 * K); - ASSERT_LE(os::min_page_size(), (size_t)os::vm_page_size()); + ASSERT_LE(os::min_page_size(), os::vm_page_size()); // The vm_page_size should be the smallest in the set of allowed page sizes // (contract says "default" page size but a lot of code actually assumes // this to be the smallest page size; notable, deliberate exception is // AIX which can have smaller page sizes but those are not part of the // page_sizes() set). - ASSERT_EQ(os::page_sizes().smallest(), (size_t)os::vm_page_size()); + ASSERT_EQ(os::page_sizes().smallest(), os::vm_page_size()); // The large page size, if it exists, shall be part of the set if (UseLargePages) { - ASSERT_GT(os::large_page_size(), (size_t)os::vm_page_size()); + ASSERT_GT(os::large_page_size(), os::vm_page_size()); ASSERT_TRUE(os::page_sizes().contains(os::large_page_size())); } os::page_sizes().print_on(tty); diff --git a/test/hotspot/gtest/runtime/test_os_linux.cpp b/test/hotspot/gtest/runtime/test_os_linux.cpp index 5d194a37d18..b1826276e00 100644 --- a/test/hotspot/gtest/runtime/test_os_linux.cpp +++ b/test/hotspot/gtest/runtime/test_os_linux.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it diff --git a/test/hotspot/gtest/utilities/test_globalDefinitions.cpp b/test/hotspot/gtest/utilities/test_globalDefinitions.cpp index 783bb6d326a..dfaa4a31a29 100644 --- a/test/hotspot/gtest/utilities/test_globalDefinitions.cpp +++ b/test/hotspot/gtest/utilities/test_globalDefinitions.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,7 +56,7 @@ static ::testing::AssertionResult testPageAddress( } TEST_VM(globalDefinitions, clamp_address_in_page) { - const intptr_t page_sizes[] = {os::vm_page_size(), 4096, 8192, 65536, 2 * 1024 * 1024}; + const intptr_t page_sizes[] = {static_cast(os::vm_page_size()), 4096, 8192, 65536, 2 * 1024 * 1024}; const int num_page_sizes = sizeof(page_sizes) / sizeof(page_sizes[0]); for (int i = 0; i < num_page_sizes; i++) {