8151413: os::allocation_granularity/page_size and friends return signed values

Reviewed-by: stefank, ccheung, ysr
This commit is contained in:
Afshin Zafari 2023-02-07 14:08:01 +00:00 committed by Jesper Wilhelmsson
parent 09b8a19597
commit 4fe99da74f
66 changed files with 165 additions and 163 deletions
src/hotspot
test/hotspot/gtest

@ -108,7 +108,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
// significant 2 bits cleared and page_size is a power of 2
mov(rscratch1, sp);
sub(hdr, hdr, rscratch1);
ands(hdr, hdr, aligned_mask - os::vm_page_size());
ands(hdr, hdr, aligned_mask - (int)os::vm_page_size());
// for recursive locking, the result is zero => save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking)
str(hdr, Address(disp_hdr, 0));

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -802,7 +802,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
// copy
mov(rscratch1, sp);
sub(swap_reg, swap_reg, rscratch1);
ands(swap_reg, swap_reg, (uint64_t)(7 - os::vm_page_size()));
ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
// Save the test result, for recursive case, the result is zero
str(swap_reg, Address(lock_reg, mark_offset));

@ -4577,9 +4577,9 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
// Bang one page at a time because large size can bang beyond yellow and
// red zones.
Label loop;
mov(rscratch1, os::vm_page_size());
mov(rscratch1, (int)os::vm_page_size());
bind(loop);
lea(tmp, Address(tmp, -os::vm_page_size()));
lea(tmp, Address(tmp, -(int)os::vm_page_size()));
subsw(size, size, rscratch1);
str(size, Address(tmp));
br(Assembler::GT, loop);
@ -4590,10 +4590,10 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
// was post-decremented.) Skip this address by starting at i=1, and
// touch a few more pages below. N.B. It is important to touch all
// the way down to and including i=StackShadowPages.
for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / os::vm_page_size()) - 1; i++) {
for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) {
// this could be any sized move but this is can be a debugging crumb
// so the bigger the better.
lea(tmp, Address(tmp, -os::vm_page_size()));
lea(tmp, Address(tmp, -(int)os::vm_page_size()));
str(size, Address(tmp));
}
}

@ -1798,7 +1798,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ sub(swap_reg, sp, swap_reg);
__ neg(swap_reg, swap_reg);
__ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
__ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
// Save the test result, for recursive case, the result is zero
__ str(swap_reg, Address(lock_reg, mark_word_offset));

@ -655,7 +655,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
const int overhead_size =
-(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
const int page_size = os::vm_page_size();
const size_t page_size = os::vm_page_size();
Label after_frame_check;
@ -1063,7 +1063,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// See more discussion in stackOverflow.hpp.
const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size());
const int page_size = os::vm_page_size();
const int page_size = (int)os::vm_page_size();
const int n_shadow_pages = shadow_zone_size / page_size;
#ifdef ASSERT

@ -970,7 +970,7 @@ void MacroAssembler::zero_memory(Register start, Register end, Register tmp) {
void MacroAssembler::arm_stack_overflow_check(int frame_size_in_bytes, Register tmp) {
// Version of AbstractAssembler::generate_stack_overflow_check optimized for ARM
const int page_size = os::vm_page_size();
const int page_size = (int)os::vm_page_size();
sub_slow(tmp, SP, StackOverflow::stack_shadow_zone_size());
strb(R0, Address(tmp));

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -107,7 +107,7 @@ static unsigned int probe_valid_max_address_bit(size_t init_bit, size_t min_bit)
unsigned int max_valid_address_bit = 0;
void* last_allocatable_address = nullptr;
const unsigned int page_size = os::vm_page_size();
const size_t page_size = os::vm_page_size();
for (size_t i = init_bit; i >= min_bit; --i) {
void* base_addr = (void*) (((unsigned long) 1U) << i);

@ -1162,8 +1162,8 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// Bang each page in the shadow zone. We can't assume it's been done for
// an interpreter frame with greater than a page of locals, so each page
// needs to be checked. Only true for non-native.
const int page_size = os::vm_page_size();
const int n_shadow_pages = ((int)StackOverflow::stack_shadow_zone_size()) / page_size;
const size_t page_size = os::vm_page_size();
const int n_shadow_pages = StackOverflow::stack_shadow_zone_size() / page_size;
const int start_page = native_call ? n_shadow_pages : 1;
BLOCK_COMMENT("bang_stack_shadow_pages:");
for (int pages = start_page; pages <= n_shadow_pages; pages++) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -96,7 +96,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
// assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2
sub(hdr, hdr, sp);
mv(t0, aligned_mask - os::vm_page_size());
mv(t0, aligned_mask - (int)os::vm_page_size());
andr(hdr, hdr, t0);
// for recursive locking, the result is zero => save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking)

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -831,7 +831,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
// least significant 3 bits clear.
// NOTE: the oopMark is in swap_reg x10 as the result of cmpxchg
sub(swap_reg, swap_reg, sp);
mv(t0, (int64_t)(7 - os::vm_page_size()));
mv(t0, (int64_t)(7 - (int)os::vm_page_size()));
andr(swap_reg, swap_reg, t0);
// Save the test result, for recursive case, the result is zero

@ -1933,7 +1933,7 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
// Bang stack for total size given plus shadow page size.
// Bang one page at a time because large size can bang beyond yellow and
// red zones.
mv(t0, os::vm_page_size());
mv(t0, (int)os::vm_page_size());
Label loop;
bind(loop);
sub(tmp, sp, t0);
@ -1947,10 +1947,10 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
// was post-decremented.) Skip this address by starting at i=1, and
// touch a few more pages below. N.B. It is important to touch all
// the way down to and including i=StackShadowPages.
for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / os::vm_page_size()) - 1; i++) {
for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) {
// this could be any sized move but this is can be a debugging crumb
// so the bigger the better.
sub(tmp, tmp, os::vm_page_size());
sub(tmp, tmp, (int)os::vm_page_size());
sd(size, Address(tmp, 0));
}
}

@ -1692,7 +1692,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// NOTE: the oopMark is in swap_reg % 10 as the result of cmpxchg
__ sub(swap_reg, swap_reg, sp);
__ andi(swap_reg, swap_reg, 3 - os::vm_page_size());
__ andi(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
// Save the test result, for recursive case, the result is zero
__ sd(swap_reg, Address(lock_reg, mark_word_offset));

@ -602,7 +602,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
const int overhead_size =
-(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
const int page_size = os::vm_page_size();
const int page_size = (int)os::vm_page_size();
Label after_frame_check;
@ -889,7 +889,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// See more discussion in stackOverflow.hpp.
const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size());
const int page_size = os::vm_page_size();
const int page_size = (int)os::vm_page_size();
const int n_shadow_pages = shadow_zone_size / page_size;
#ifdef ASSERT

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -787,7 +787,7 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue)
void TemplateInterpreterGenerator::generate_stack_overflow_check(Register frame_size, Register tmp1) {
Register tmp2 = Z_R1_scratch;
const int page_size = os::vm_page_size();
const int page_size = (int)os::vm_page_size();
NearLabel after_frame_check;
BLOCK_COMMENT("stack_overflow_check {");
@ -2020,7 +2020,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// Bang each page in the shadow zone. We can't assume it's been done for
// an interpreter frame with greater than a page of locals, so each page
// needs to be checked. Only true for non-native. For native, we only bang the last page.
const int page_size = os::vm_page_size();
const size_t page_size = os::vm_page_size();
const int n_shadow_pages = (int)(StackOverflow::stack_shadow_zone_size()/page_size);
const int start_page_num = native_call ? n_shadow_pages : 1;
for (int pages = start_page_num; pages <= n_shadow_pages; pages++) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -87,7 +87,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
// assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2
subptr(hdr, rsp);
andptr(hdr, aligned_mask - os::vm_page_size());
andptr(hdr, aligned_mask - (int)os::vm_page_size());
// for recursive locking, the result is zero => save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking)
movptr(Address(disp_hdr, 0), hdr);

@ -613,7 +613,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
// Locked by current thread if difference with current SP is less than one page.
subptr(tmpReg, rsp);
// Next instruction set ZFlag == 1 (Success) if difference is less then one page.
andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - (int)os::vm_page_size())) );
movptr(Address(boxReg, 0), tmpReg);
} else {
// Clear ZF so that we take the slow path at the DONE label. objReg is known to be not 0.
@ -2784,8 +2784,8 @@ void C2_MacroAssembler::string_indexof(Register str1, Register str2,
// since heaps are aligned and mapped by pages.
assert(os::vm_page_size() < (int)G, "default page should be small");
movl(result, str2); // We need only low 32 bits
andl(result, (os::vm_page_size()-1));
cmpl(result, (os::vm_page_size()-16));
andl(result, ((int)os::vm_page_size()-1));
cmpl(result, ((int)os::vm_page_size()-16));
jccb(Assembler::belowEqual, CHECK_STR);
// Move small strings to stack to allow load 16 bytes into vec.
@ -2814,8 +2814,8 @@ void C2_MacroAssembler::string_indexof(Register str1, Register str2,
// Check cross page boundary.
movl(result, str1); // We need only low 32 bits
andl(result, (os::vm_page_size()-1));
cmpl(result, (os::vm_page_size()-16));
andl(result, ((int)os::vm_page_size()-1));
cmpl(result, ((int)os::vm_page_size()-16));
jccb(Assembler::belowEqual, BIG_STRINGS);
subptr(rsp, 16);

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1269,7 +1269,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
// least significant bits clear.
// NOTE: the mark is in swap_reg %rax as the result of cmpxchg
subptr(swap_reg, rsp);
andptr(swap_reg, zero_bits - os::vm_page_size());
andptr(swap_reg, zero_bits - (int)os::vm_page_size());
// Save the test result, for recursive case, the result is zero
movptr(Address(lock_reg, mark_offset), swap_reg);

@ -1273,9 +1273,9 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
// red zones.
Label loop;
bind(loop);
movl(Address(tmp, (-os::vm_page_size())), size );
subptr(tmp, os::vm_page_size());
subl(size, os::vm_page_size());
movl(Address(tmp, (-(int)os::vm_page_size())), size );
subptr(tmp, (int)os::vm_page_size());
subl(size, (int)os::vm_page_size());
jcc(Assembler::greater, loop);
// Bang down shadow pages too.
@ -1284,10 +1284,10 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
// was post-decremented.) Skip this address by starting at i=1, and
// touch a few more pages below. N.B. It is important to touch all
// the way down including all pages in the shadow zone.
for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / os::vm_page_size()); i++) {
for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) {
// this could be any sized move but this is can be a debugging crumb
// so the bigger the better.
movptr(Address(tmp, (-i*os::vm_page_size())), size );
movptr(Address(tmp, (-i*(int)os::vm_page_size())), size );
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1702,7 +1702,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
__ subptr(swap_reg, rsp);
__ andptr(swap_reg, 3 - os::vm_page_size());
__ andptr(swap_reg, 3 - (int)os::vm_page_size());
// Save the test result, for recursive case, the result is zero
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2172,7 +2172,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
__ subptr(swap_reg, rsp);
__ andptr(swap_reg, 3 - os::vm_page_size());
__ andptr(swap_reg, 3 - (int)os::vm_page_size());
// Save the test result, for recursive case, the result is zero
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);

@ -480,7 +480,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
const int overhead_size =
-(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
const int page_size = os::vm_page_size();
const int page_size = (int)os::vm_page_size();
Label after_frame_check;
@ -732,7 +732,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// method receiver, so do the banging after locking the receiver.)
const int shadow_zone_size = checked_cast<int>(StackOverflow::stack_shadow_zone_size());
const int page_size = os::vm_page_size();
const int page_size = (int)os::vm_page_size();
const int n_shadow_pages = shadow_zone_size / page_size;
const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);

@ -1772,10 +1772,10 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
assert(is_aligned_to(addr, os::vm_page_size()),
"addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
"addr " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")",
p2i(addr), os::vm_page_size());
assert(is_aligned_to(size, os::vm_page_size()),
"size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
"size " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")",
size, os::vm_page_size());
vmembk_t* const vmi = vmembk_find(addr);
@ -1807,10 +1807,10 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size,
bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
assert(is_aligned_to(addr, os::vm_page_size()),
"addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
"addr " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")",
p2i(addr), os::vm_page_size());
assert(is_aligned_to(size, os::vm_page_size()),
"size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
"size " PTR_FORMAT " not aligned to vm_page_size (" SIZE_FORMAT ")",
size, os::vm_page_size());
// Dynamically do different things for mmap/shmat.
@ -2215,7 +2215,7 @@ extern "C" {
}
}
static void set_page_size(int page_size) {
static void set_page_size(size_t page_size) {
OSInfo::set_vm_page_size(page_size);
OSInfo::set_vm_allocation_granularity(page_size);
}

@ -1345,7 +1345,7 @@ void os::print_memory_info(outputStream* st) {
size_t size = sizeof(swap_usage);
st->print("Memory:");
st->print(" %dk page", os::vm_page_size()>>10);
st->print(" " SIZE_FORMAT "k page", os::vm_page_size()>>10);
st->print(", physical " UINT64_FORMAT "k",
os::physical_memory() >> 10);
@ -1910,10 +1910,10 @@ extern void report_error(char* file_name, int line_no, char* title,
void os::init(void) {
char dummy; // used to get a guess on initial stack address
int page_size = getpagesize();
size_t page_size = (size_t)getpagesize();
OSInfo::set_vm_page_size(page_size);
OSInfo::set_vm_allocation_granularity(page_size);
if (os::vm_page_size() <= 0) {
if (os::vm_page_size() == 0) {
fatal("os_bsd.cpp: os::init: getpagesize() failed (%s)", os::strerror(errno));
}
_page_sizes.add(os::vm_page_size());

@ -803,7 +803,7 @@ static size_t get_static_tls_area_size(const pthread_attr_t *attr) {
//
// The following 'minstack_size > os::vm_page_size() + PTHREAD_STACK_MIN'
// if check is done for precaution.
if (minstack_size > (size_t)os::vm_page_size() + PTHREAD_STACK_MIN) {
if (minstack_size > os::vm_page_size() + PTHREAD_STACK_MIN) {
tls_size = minstack_size - os::vm_page_size() - PTHREAD_STACK_MIN;
}
}
@ -1108,7 +1108,7 @@ void os::Linux::capture_initial_stack(size_t max_size) {
// lower end of primordial stack; reduce ulimit -s value a little bit
// so we won't install guard page on ld.so's data section.
// But ensure we don't underflow the stack size - allow 1 page spare
if (stack_size >= (size_t)(3 * os::vm_page_size())) {
if (stack_size >= 3 * os::vm_page_size()) {
stack_size -= 2 * os::vm_page_size();
}
@ -2263,7 +2263,7 @@ void os::Linux::print_steal_info(outputStream* st) {
void os::print_memory_info(outputStream* st) {
st->print("Memory:");
st->print(" %dk page", os::vm_page_size()>>10);
st->print(" " SIZE_FORMAT "k page", os::vm_page_size()>>10);
// values in struct sysinfo are "unsigned long"
struct sysinfo si;
@ -2705,7 +2705,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size,
}
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
if (UseTransparentHugePages && alignment_hint > vm_page_size()) {
// We don't check the return value: madvise(MADV_HUGEPAGE) may not
// be supported or the memory may already be backed by huge pages.
::madvise(addr, bytes, MADV_HUGEPAGE);
@ -2718,7 +2718,7 @@ void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
// uncommitted at all. We don't do anything in this case to avoid creating a segment with
// small pages on top of the SHM segment. This method always works for small pages, so we
// allow that in any case.
if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
if (alignment_hint <= os::vm_page_size() || can_commit_large_page_memory()) {
commit_memory(addr, bytes, alignment_hint, !ExecMem);
}
}
@ -3449,7 +3449,7 @@ bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
byte_size_in_exact_unit(page_size),
exact_unit_for_byte_size(page_size));
for (size_t page_size_ = _page_sizes.next_smaller(page_size);
page_size_ != (size_t)os::vm_page_size();
page_size_ != os::vm_page_size();
page_size_ = _page_sizes.next_smaller(page_size_)) {
flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size_);
p = mmap(nullptr, page_size_, PROT_READ|PROT_WRITE, flags, -1, 0);
@ -3919,7 +3919,7 @@ bool os::Linux::commit_memory_special(size_t bytes,
int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED;
// For large pages additional flags are required.
if (page_size > (size_t) os::vm_page_size()) {
if (page_size > os::vm_page_size()) {
flags |= MAP_HUGETLB | hugetlbfs_page_size_flag(page_size);
}
char* addr = (char*)::mmap(req_addr, bytes, prot, flags, -1, 0);
@ -3949,7 +3949,7 @@ char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes,
assert(is_aligned(req_addr, page_size), "Must be");
assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be");
assert(_page_sizes.contains(page_size), "Must be a valid page size");
assert(page_size > (size_t)os::vm_page_size(), "Must be a large page size");
assert(page_size > os::vm_page_size(), "Must be a large page size");
assert(bytes >= page_size, "Shouldn't allocate large pages for small sizes");
// We only end up here when at least 1 large page can be used.
@ -4279,14 +4279,17 @@ void os::init(void) {
char dummy; // used to get a guess on initial stack address
clock_tics_per_sec = sysconf(_SC_CLK_TCK);
int page_size = sysconf(_SC_PAGESIZE);
OSInfo::set_vm_page_size(page_size);
OSInfo::set_vm_allocation_granularity(page_size);
if (os::vm_page_size() <= 0) {
int sys_pg_size = sysconf(_SC_PAGESIZE);
if (sys_pg_size < 0) {
fatal("os_linux.cpp: os::init: sysconf failed (%s)",
os::strerror(errno));
}
size_t page_size = (size_t) sys_pg_size;
OSInfo::set_vm_page_size(page_size);
OSInfo::set_vm_allocation_granularity(page_size);
if (os::vm_page_size() == 0) {
fatal("os_linux.cpp: os::init: OSInfo::set_vm_page_size failed");
}
_page_sizes.add(os::vm_page_size());
Linux::initialize_system_info();

@ -1903,7 +1903,7 @@ void os::get_summary_cpu_info(char* buf, size_t buflen) {
void os::print_memory_info(outputStream* st) {
st->print("Memory:");
st->print(" %dk page", os::vm_page_size()>>10);
st->print(" " SIZE_FORMAT "k page", os::vm_page_size()>>10);
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
// value if total memory is larger than 4GB
@ -2502,7 +2502,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
address addr = (address) exception_record->ExceptionInformation[1];
if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
int page_size = os::vm_page_size();
size_t page_size = os::vm_page_size();
// Make sure the pc and the faulting address are sane.
//
@ -3159,7 +3159,7 @@ void os::large_page_init() {
}
_large_page_size = large_page_init_decide_size();
const size_t default_page_size = (size_t) os::vm_page_size();
const size_t default_page_size = os::vm_page_size();
if (_large_page_size > default_page_size) {
_page_sizes.add(_large_page_size);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,7 +49,7 @@ inline void os::map_stack_shadow_pages(address sp) {
// If we decrement stack pointer more than one page
// the OS may not map an intervening page into our space
// and may fault on a memory access to interior of our frame.
const int page_size = os::vm_page_size();
const size_t page_size = os::vm_page_size();
const size_t n_pages = StackOverflow::stack_shadow_zone_size() / page_size;
for (size_t pages = 1; pages <= n_pages; pages++) {
sp -= page_size;

@ -555,7 +555,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
stub == nullptr &&
(sig == SIGSEGV || sig == SIGBUS) &&
uc->context_trapno == trap_page_fault) {
int page_size = os::vm_page_size();
size_t page_size = os::vm_page_size();
address addr = (address) info->si_addr;
address pc = os::Posix::ucontext_get_pc(uc);
// Make sure the pc and the faulting address are sane.

@ -349,7 +349,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
stub == nullptr &&
(sig == SIGSEGV || sig == SIGBUS) &&
uc->uc_mcontext.gregs[REG_TRAPNO] == trap_page_fault) {
int page_size = os::vm_page_size();
size_t page_size = os::vm_page_size();
address addr = (address) info->si_addr;
address pc = os::Posix::ucontext_get_pc(uc);
// Make sure the pc and the faulting address are sane.

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -133,7 +133,7 @@ void AbstractAssembler::generate_stack_overflow_check(int frame_size_in_bytes) {
// The entry code may need to bang additional pages if the framesize
// is greater than a page.
const int page_size = os::vm_page_size();
const int page_size = (int)os::vm_page_size();
int bang_end = (int)StackOverflow::stack_shadow_zone_size();
// This is how far the previous frame's stack banging extended.
@ -246,5 +246,5 @@ bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
// an explicit null check for -1.
// Check if offset is outside of [0, os::vm_page_size()]
return offset < 0 || offset >= os::vm_page_size();
return offset < 0 || offset >= static_cast<intptr_t>(os::vm_page_size());
}

@ -4668,7 +4668,7 @@ public:
UnsafeConstantsFixup() {
// round up values for all static final fields
_address_size = sizeof(void*);
_page_size = os::vm_page_size();
_page_size = (int)os::vm_page_size();
_big_endian = LITTLE_ENDIAN_ONLY(false) BIG_ENDIAN_ONLY(true);
_use_unaligned_access = UseUnalignedAccesses;
_data_cache_line_flush_size = (int)VM_Version::data_cache_line_flush_size();

@ -311,7 +311,7 @@ void CodeCache::initialize_heaps() {
// If large page support is enabled, align code heaps according to large
// page size to make sure that code cache is covered by large pages.
const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity());
const size_t alignment = MAX2(page_size(false, 8), os::vm_allocation_granularity());
non_nmethod_size = align_up(non_nmethod_size, alignment);
profiled_size = align_down(profiled_size, alignment);
non_profiled_size = align_down(non_profiled_size, alignment);
@ -353,7 +353,7 @@ size_t CodeCache::page_size(bool aligned, size_t min_pages) {
ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
// Align and reserve space for code cache
const size_t rs_ps = page_size();
const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity());
const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
const size_t rs_size = align_up(size, rs_align);
ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
if (!rs.is_reserved()) {

@ -68,7 +68,7 @@ void EpsilonArguments::initialize() {
void EpsilonArguments::initialize_alignments() {
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
size_t align = MAX2((size_t)os::vm_allocation_granularity(), page_size);
size_t align = MAX2(os::vm_allocation_granularity(), page_size);
SpaceAlignment = align;
HeapAlignment = align;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,7 +49,7 @@ void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t
guarantee(is_aligned(rs.base(), page_size),
"Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size);
guarantee(is_aligned(used_size, os::vm_page_size()),
"Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size);
"Given used reserved space size needs to be OS page size aligned (" SIZE_FORMAT " bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size);
guarantee(used_size <= rs.size(),
"Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size());
guarantee(is_aligned(rs.size(), page_size),

@ -507,7 +507,7 @@ void MutableNUMASpace::initialize(MemRegion mr,
// Try small pages if the chunk size is too small
if (base_space_size_pages / lgrp_spaces()->length() == 0
&& page_size() > (size_t)os::vm_page_size()) {
&& page_size() > os::vm_page_size()) {
// Changing the page size below can lead to freeing of memory. So we fail initialization.
if (_must_use_large_pages) {
vm_exit_during_initialization("Failed initializing NUMA with large pages. Too small heap size");

@ -47,7 +47,7 @@ ParMarkBitMap::initialize(MemRegion covered_region)
const size_t granularity = os::vm_allocation_granularity();
_reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
const size_t rs_align = page_sz == os::vm_page_size() ? 0 :
MAX2(page_sz, granularity);
ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
const size_t used_page_sz = rs.page_size();

@ -445,7 +445,7 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)
const size_t granularity = os::vm_allocation_granularity();
_reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
const size_t rs_align = page_sz == os::vm_page_size() ? 0 :
MAX2(page_sz, granularity);
ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, page_sz, rs.base(),

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -99,8 +99,8 @@ void CardTable::initialize() {
_cur_covered_regions = 0;
const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
MAX2(_page_size, (size_t) os::vm_allocation_granularity());
const size_t rs_align = _page_size == os::vm_page_size() ? 0 :
MAX2(_page_size, os::vm_allocation_granularity());
ReservedSpace heap_rs(_byte_map_size, rs_align, _page_size);
MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved.
* Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,7 +59,7 @@ ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedS
MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
size_t page_size = (size_t)os::vm_page_size();
size_t page_size = os::vm_page_size();
if (!_map_space.special()) {
// Commit entire pages that cover the heap cset map.

@ -178,9 +178,9 @@ jint ShenandoahHeap::initialize() {
_committed = _initial_size;
size_t heap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
size_t heap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
//
// Reserve and commit memory for heap

@ -1,4 +1,5 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -552,12 +553,12 @@ size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
// region size to regular page size.
// Figure out page size to use, and aligns up heap to page size
int page_size = os::vm_page_size();
size_t page_size = os::vm_page_size();
if (UseLargePages) {
size_t large_page_size = os::large_page_size();
max_heap_size = align_up(max_heap_size, large_page_size);
if ((max_heap_size / align_up(region_size, large_page_size)) >= MIN_NUM_REGIONS) {
page_size = (int)large_page_size;
page_size = large_page_size;
} else {
// Should have been checked during argument initialization
assert(!ShenandoahUncommit, "Uncommit requires region size aligns to large page size");

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,7 +70,7 @@ class CompilerToVM {
static CardTable::CardValue* cardtable_start_address;
static int cardtable_shift;
static int vm_page_size;
static size_t vm_page_size;
static int sizeof_vtableEntry;
static int sizeof_ExceptionTableElement;

@ -74,7 +74,7 @@ int CompilerToVM::Data::_fields_annotations_base_offset;
CardTable::CardValue* CompilerToVM::Data::cardtable_start_address;
int CompilerToVM::Data::cardtable_shift;
int CompilerToVM::Data::vm_page_size;
size_t CompilerToVM::Data::vm_page_size;
int CompilerToVM::Data::sizeof_vtableEntry = sizeof(vtableEntry);
int CompilerToVM::Data::sizeof_ExceptionTableElement = sizeof(ExceptionTableElement);

@ -79,7 +79,7 @@
static_field(CompilerToVM::Data, cardtable_start_address, CardTable::CardValue*) \
static_field(CompilerToVM::Data, cardtable_shift, int) \
\
static_field(CompilerToVM::Data, vm_page_size, int) \
static_field(CompilerToVM::Data, vm_page_size, size_t) \
\
static_field(CompilerToVM::Data, sizeof_vtableEntry, int) \
static_field(CompilerToVM::Data, sizeof_ExceptionTableElement, int) \

@ -50,7 +50,7 @@ inline void inc_stat_counter(volatile julong* dest, julong add_value) {
template <class E>
size_t MmapArrayAllocator<E>::size_for(size_t length) {
size_t size = length * sizeof(E);
int alignment = os::vm_allocation_granularity();
size_t alignment = os::vm_allocation_granularity();
return align_up(size, alignment);
}

@ -183,7 +183,7 @@ void CodeHeap::clear() {
static size_t align_to_page_size(size_t size) {
const size_t alignment = (size_t)os::vm_page_size();
const size_t alignment = os::vm_page_size();
assert(is_power_of_2(alignment), "no kidding ???");
return (size + alignment - 1) & ~(alignment - 1);
}
@ -222,7 +222,7 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
_number_of_committed_segments = size_to_segments(_memory.committed_size());
_number_of_reserved_segments = size_to_segments(_memory.reserved_size());
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
const size_t reserved_segments_alignment = MAX2(os::vm_page_size(), granularity);
const size_t reserved_segments_size = align_up(_number_of_reserved_segments, reserved_segments_alignment);
const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -47,7 +47,7 @@ DEBUG_ONLY(bool Settings::_use_allocation_guard = false;)
void Settings::ergo_initialize() {
if (strcmp(MetaspaceReclaimPolicy, "none") == 0) {
log_info(metaspace)("Initialized with strategy: no reclaim.");
_commit_granule_bytes = MAX2((size_t)os::vm_page_size(), 64 * K);
_commit_granule_bytes = MAX2(os::vm_page_size(), 64 * K);
_commit_granule_words = _commit_granule_bytes / BytesPerWord;
// In "none" reclamation mode, we do not uncommit, and we commit new chunks fully;
// that very closely mimics the behaviour of old Metaspace.
@ -57,13 +57,13 @@ void Settings::ergo_initialize() {
log_info(metaspace)("Initialized with strategy: aggressive reclaim.");
// Set the granule size rather small; may increase
// mapping fragmentation but also increase chance to uncommit.
_commit_granule_bytes = MAX2((size_t)os::vm_page_size(), 16 * K);
_commit_granule_bytes = MAX2(os::vm_page_size(), 16 * K);
_commit_granule_words = _commit_granule_bytes / BytesPerWord;
_new_chunks_are_fully_committed = false;
_uncommit_free_chunks = true;
} else if (strcmp(MetaspaceReclaimPolicy, "balanced") == 0) {
log_info(metaspace)("Initialized with strategy: balanced reclaim.");
_commit_granule_bytes = MAX2((size_t)os::vm_page_size(), 64 * K);
_commit_granule_bytes = MAX2(os::vm_page_size(), 64 * K);
_commit_granule_words = _commit_granule_bytes / BytesPerWord;
_new_chunks_are_fully_committed = false;
_uncommit_free_chunks = true;

@ -58,7 +58,7 @@ ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_
// and normal pages. If the size is not a multiple of the
// page size it will be aligned up to achieve this.
size_t alignment = os::vm_allocation_granularity();;
if (preferred_page_size != (size_t)os::vm_page_size()) {
if (preferred_page_size != os::vm_page_size()) {
alignment = MAX2(preferred_page_size, alignment);
size = align_up(size, alignment);
}
@ -131,7 +131,7 @@ static bool failed_to_reserve_as_requested(char* base, char* requested_address)
static bool use_explicit_large_pages(size_t page_size) {
return !os::can_commit_large_page_memory() &&
page_size != (size_t) os::vm_page_size();
page_size != os::vm_page_size();
}
static bool large_pages_requested() {
@ -256,12 +256,12 @@ void ReservedSpace::reserve(size_t size,
return;
}
page_size = os::page_sizes().next_smaller(page_size);
} while (page_size > (size_t) os::vm_page_size());
} while (page_size > os::vm_page_size());
// Failed to reserve explicit large pages, do proper logging.
log_on_large_pages_failure(requested_address, size);
// Now fall back to normal reservation.
assert(page_size == (size_t) os::vm_page_size(), "inv");
assert(page_size == os::vm_page_size(), "inv");
}
// == Case 3 ==
@ -284,7 +284,7 @@ void ReservedSpace::initialize(size_t size,
"alignment not aligned to os::vm_allocation_granularity()");
assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
"not a power of 2");
assert(page_size >= (size_t) os::vm_page_size(), "Invalid page size");
assert(page_size >= os::vm_page_size(), "Invalid page size");
assert(is_power_of_2(page_size), "Invalid page size");
clear_members();
@ -294,7 +294,7 @@ void ReservedSpace::initialize(size_t size,
}
// Adjust alignment to not be 0.
alignment = MAX2(alignment, (size_t)os::vm_page_size());
alignment = MAX2(alignment, os::vm_page_size());
// Reserve the memory.
reserve(size, alignment, page_size, requested_address, executable);
@ -359,7 +359,7 @@ static size_t noaccess_prefix_size(size_t alignment) {
}
void ReservedHeapSpace::establish_noaccess_prefix() {
assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
assert(_alignment >= os::vm_page_size(), "must be at least page size big");
_noaccess_prefix = noaccess_prefix_size(_alignment);
if (base() && base() + _size > (char *)OopEncodingHeapMax) {
@ -492,7 +492,7 @@ static char** get_attach_addresses_for_disjoint_mode() {
void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, size_t page_size) {
guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
"can not allocate compressed oop heap for this size");
guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
guarantee(alignment == MAX2(alignment, os::vm_page_size()), "alignment too small");
const size_t granularity = os::vm_allocation_granularity();
assert((size & (granularity - 1)) == 0,

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -79,7 +79,7 @@ void CompressedOops::initialize(const ReservedHeapSpace& heap_space) {
false));
// base() is one page below the heap.
assert((intptr_t)base() <= ((intptr_t)_heap_address_range.start() - os::vm_page_size()) ||
assert((intptr_t)base() <= ((intptr_t)_heap_address_range.start() - (intptr_t)os::vm_page_size()) ||
base() == NULL, "invalid value");
assert(shift() == LogMinObjAlignmentInBytes ||
shift() == 0, "invalid value");

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -370,7 +370,7 @@ bool PhaseOutput::need_stack_bang(int frame_size_in_bytes) const {
// guarantee it doesn't happen) so we always need the stack bang in
// a debug VM.
return (C->stub_function() == NULL &&
(C->has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3
(C->has_java_calls() || frame_size_in_bytes > (int)(os::vm_page_size())>>3
DEBUG_ONLY(|| true)));
}

@ -158,7 +158,7 @@ WB_ENTRY(jint, WB_GetHeapOopSize(JNIEnv* env, jobject o))
WB_END
WB_ENTRY(jint, WB_GetVMPageSize(JNIEnv* env, jobject o))
return os::vm_page_size();
return (jint)os::vm_page_size();
WB_END
WB_ENTRY(jlong, WB_GetVMAllocationGranularity(JNIEnv* env, jobject o))

@ -1479,7 +1479,7 @@ size_t Arguments::max_heap_for_compressed_oops() {
// keeping alignment constraints of the heap. To guarantee the latter, as the
// null page is located before the heap, we pad the null page to the conservative
// maximum alignment that the GC may ever impose upon the heap.
size_t displacement_due_to_null_page = align_up((size_t)os::vm_page_size(),
size_t displacement_due_to_null_page = align_up(os::vm_page_size(),
_conservative_max_heap_alignment);
LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
@ -1548,7 +1548,7 @@ void Arguments::set_conservative_max_heap_alignment() {
// itself and the maximum page size we may run the VM with.
size_t heap_alignment = GCConfig::arguments()->conservative_max_heap_alignment();
_conservative_max_heap_alignment = MAX4(heap_alignment,
(size_t)os::vm_allocation_granularity(),
os::vm_allocation_granularity(),
os::max_page_size(),
GCArguments::compute_heap_alignment());
}

@ -272,8 +272,8 @@ public:
}
};
static bool stack_overflow_check(JavaThread* thread, int size, address sp) {
const int page_size = os::vm_page_size();
static bool stack_overflow_check(JavaThread* thread, size_t size, address sp) {
const size_t page_size = os::vm_page_size();
if (size > page_size) {
if (sp - size < thread->stack_overflow_state()->shadow_zone_safe_limit()) {
return false;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,7 @@ JVMFlag::Error ObjectAlignmentInBytesConstraintFunc(int value, bool verbose) {
if (value >= (intx)os::vm_page_size()) {
JVMFlag::printError(verbose,
"ObjectAlignmentInBytes (%d) must be "
"less than page size (%d)\n",
"less than page size (" SIZE_FORMAT ")\n",
value, os::vm_page_size());
return JVMFlag::VIOLATES_CONSTRAINT;
}

@ -549,7 +549,7 @@ class SignatureChekker : public SignatureIterator {
if (v != 0) {
// v is a "handle" referring to an oop, cast to integral type.
// There shouldn't be any handles in very low memory.
guarantee((size_t)v >= (size_t)os::vm_page_size(),
guarantee((size_t)v >= os::vm_page_size(),
"Bad JNI oop argument %d: " PTR_FORMAT, _pos, v);
// Verify the pointee.
oop vv = resolve_indirect_oop(v, _value_state[_pos]);

@ -370,7 +370,7 @@ class os: AllStatic {
// OS interface to Virtual Memory
// Return the default page size.
static int vm_page_size() { return OSInfo::vm_page_size(); }
static size_t vm_page_size() { return OSInfo::vm_page_size(); }
// The set of page sizes which the VM is allowed to use (may be a subset of
// the page sizes actually available on the platform).
@ -411,7 +411,7 @@ class os: AllStatic {
const char* base,
const size_t size);
static int vm_allocation_granularity() { return OSInfo::vm_allocation_granularity(); }
static size_t vm_allocation_granularity() { return OSInfo::vm_allocation_granularity(); }
inline static size_t cds_core_region_alignment();

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,6 @@
#include "precompiled.hpp"
#include "runtime/osInfo.hpp"
int OSInfo::_vm_page_size = -1;
int OSInfo::_vm_allocation_granularity = -1;
size_t OSInfo::_vm_page_size = 0;
size_t OSInfo::_vm_allocation_granularity = 0;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,25 +31,23 @@
// Static information about the operating system. Initialized exactly once
// at VM start-up and never changes again.
class OSInfo : AllStatic {
static int _vm_page_size;
static int _vm_allocation_granularity;
static size_t _vm_page_size;
static size_t _vm_allocation_granularity;
public:
// Returns the byte size of a virtual memory page
static int vm_page_size() { return _vm_page_size; }
static size_t vm_page_size() { return _vm_page_size; }
// Returns the size, in bytes, of the granularity with which memory can be reserved using os::reserve_memory().
static int vm_allocation_granularity() { return _vm_allocation_granularity; }
static size_t vm_allocation_granularity() { return _vm_allocation_granularity; }
static void set_vm_page_size(int n) {
assert(_vm_page_size < 0, "init only once");
assert(n > 0, "sanity");
static void set_vm_page_size(size_t n) {
assert(_vm_page_size == 0, "init only once");
_vm_page_size = n;
}
static void set_vm_allocation_granularity(int n) {
assert(_vm_allocation_granularity < 0, "init only once");
assert(n > 0, "sanity");
static void set_vm_allocation_granularity(size_t n) {
assert(_vm_allocation_granularity == 0, "init only once");
_vm_allocation_granularity = n;
}
};

@ -93,12 +93,12 @@ void PerfMemory::initialize() {
// initialization already performed
return;
size_t capacity = align_up(PerfDataMemorySize,
size_t capacity = align_up((size_t)PerfDataMemorySize,
os::vm_allocation_granularity());
log_debug(perf, memops)("PerfDataMemorySize = %d,"
" os::vm_allocation_granularity = %d,"
" adjusted size = " SIZE_FORMAT,
" os::vm_allocation_granularity = " SIZE_FORMAT
", adjusted size = " SIZE_FORMAT,
PerfDataMemorySize,
os::vm_allocation_granularity(),
capacity);

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -96,7 +96,7 @@ namespace {
static void test_reserved_size_alignment_page_type(size_t size, size_t alignment, bool maybe_large) {
if (size < alignment) {
// Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
ASSERT_EQ((size_t) os::vm_page_size(), os::large_page_size()) << "Test needs further refinement";
ASSERT_EQ(os::vm_page_size(), os::large_page_size()) << "Test needs further refinement";
return;
}
@ -402,7 +402,7 @@ class TestReservedSpace : AllStatic {
static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
if (size < alignment) {
// Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
ASSERT_EQ((size_t)os::vm_page_size(), os::large_page_size()) << "Test needs further refinement";
ASSERT_EQ(os::vm_page_size(), os::large_page_size()) << "Test needs further refinement";
return;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -201,7 +201,7 @@ TEST_VM_F(ArgumentsTest, parse_xss) {
// Test value aligned both to K and vm_page_size.
{
EXPECT_TRUE(is_aligned(32 * M, K));
EXPECT_TRUE(is_aligned(32 * M, (size_t)os::vm_page_size()));
EXPECT_TRUE(is_aligned(32 * M, os::vm_page_size()));
EXPECT_EQ(parse_xss_inner(to_string(32 * M), JNI_OK), (intx)(32 * M / K));
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -698,16 +698,16 @@ TEST_VM(os, find_mapping_3) {
TEST_VM(os, os_pagesizes) {
ASSERT_EQ(os::min_page_size(), 4 * K);
ASSERT_LE(os::min_page_size(), (size_t)os::vm_page_size());
ASSERT_LE(os::min_page_size(), os::vm_page_size());
// The vm_page_size should be the smallest in the set of allowed page sizes
// (contract says "default" page size but a lot of code actually assumes
// this to be the smallest page size; notable, deliberate exception is
// AIX which can have smaller page sizes but those are not part of the
// page_sizes() set).
ASSERT_EQ(os::page_sizes().smallest(), (size_t)os::vm_page_size());
ASSERT_EQ(os::page_sizes().smallest(), os::vm_page_size());
// The large page size, if it exists, shall be part of the set
if (UseLargePages) {
ASSERT_GT(os::large_page_size(), (size_t)os::vm_page_size());
ASSERT_GT(os::large_page_size(), os::vm_page_size());
ASSERT_TRUE(os::page_sizes().contains(os::large_page_size()));
}
os::page_sizes().print_on(tty);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,7 +56,7 @@ static ::testing::AssertionResult testPageAddress(
}
TEST_VM(globalDefinitions, clamp_address_in_page) {
const intptr_t page_sizes[] = {os::vm_page_size(), 4096, 8192, 65536, 2 * 1024 * 1024};
const intptr_t page_sizes[] = {static_cast<intptr_t>(os::vm_page_size()), 4096, 8192, 65536, 2 * 1024 * 1024};
const int num_page_sizes = sizeof(page_sizes) / sizeof(page_sizes[0]);
for (int i = 0; i < num_page_sizes; i++) {