8080775: Better argument formatting for assert() and friends
Reviewed-by: kbarrett, pliden
This commit is contained in:
parent
aa0818a98a
commit
1e71f67736
hotspot
make/linux/makefiles
src
cpu
aarch64/vm
ppc/vm
sparc/vm
x86/vm
os
aix/vm
bsd/vm
linux/vm
posix/vm
solaris/vm
windows/vm
os_cpu
aix_ppc/vm
bsd_x86/vm
bsd_zero/vm
linux_aarch64/vm
linux_ppc/vm
linux_sparc/vm
linux_x86/vm
linux_zero/vm
solaris_sparc/vm
solaris_x86/vm
share/vm
asm
c1
ci
classfile
altHashing.cppclassFileParser.cppclassLoaderData.cppmetadataOnStackMark.cppstringTable.cppsystemDictionary.cpp
code
compiler
gc
cms
adaptiveFreeList.cppallocationStats.hppcompactibleFreeListSpace.cppconcurrentMarkSweepGeneration.cppconcurrentMarkSweepGeneration.hppparCardTableModRefBS.cpppromotionInfo.hpp
g1
bufferingOopClosure.cppcollectionSetChooser.cppcollectionSetChooser.hppconcurrentMark.cppconcurrentMark.inline.hppg1AllocRegion.cppg1AllocRegion.hppg1AllocRegion.inline.hppg1Allocator.cppg1Allocator.hppg1Allocator.inline.hppg1BiasedArray.cppg1BiasedArray.hppg1BlockOffsetTable.cppg1BlockOffsetTable.hppg1BlockOffsetTable.inline.hppg1CardCounts.cppg1CardCounts.hppg1CodeCacheRemSet.cppg1CollectedHeap.cppg1CollectedHeap.hppg1CollectedHeap.inline.hppg1CollectorPolicy.cppg1CollectorPolicy.hppg1EvacFailure.cppg1EvacStats.cppg1GCPhaseTimes.cpp
@ -207,7 +207,7 @@ ifeq ($(USE_CLANG), true)
|
||||
WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
|
||||
endif
|
||||
|
||||
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2 -Wreturn-type -Woverloaded-virtual
|
||||
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2 -Wreturn-type -Woverloaded-virtual -Wno-format-zero-length
|
||||
|
||||
ifeq ($(USE_CLANG),)
|
||||
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
|
||||
|
@ -2238,7 +2238,7 @@ void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
|
||||
ttyLocker ttyl;
|
||||
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
|
||||
msg);
|
||||
assert(false, err_msg("DEBUG MESSAGE: %s", msg));
|
||||
assert(false, "DEBUG MESSAGE: %s", msg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_
|
||||
|
||||
#ifdef ASSERT
|
||||
static int check_nonzero(const char* xname, int x) {
|
||||
assert(x != 0, err_msg("%s should be nonzero", xname));
|
||||
assert(x != 0, "%s should be nonzero", xname);
|
||||
return x;
|
||||
}
|
||||
#define NONZERO(x) check_nonzero(#x, x)
|
||||
@ -407,7 +407,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
}
|
||||
|
||||
default:
|
||||
fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
|
||||
fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1194,7 +1194,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
|
||||
} else if (iid == vmIntrinsics::_invokeBasic) {
|
||||
has_receiver = true;
|
||||
} else {
|
||||
fatal(err_msg_res("unexpected intrinsic id %d", iid));
|
||||
fatal("unexpected intrinsic id %d", iid);
|
||||
}
|
||||
|
||||
if (member_reg != noreg) {
|
||||
|
@ -4292,7 +4292,7 @@ const char* stop_types[] = {
|
||||
|
||||
static void stop_on_request(int tp, const char* msg) {
|
||||
tty->print("PPC assembly code requires stop: (%s) %s\n", stop_types[tp%/*stop_end*/4], msg);
|
||||
guarantee(false, err_msg("PPC assembly code requires stop: %s", msg));
|
||||
guarantee(false, "PPC assembly code requires stop: %s", msg);
|
||||
}
|
||||
|
||||
// Call a C-function that prints output.
|
||||
|
@ -60,7 +60,7 @@ void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_
|
||||
|
||||
#ifdef ASSERT
|
||||
static int check_nonzero(const char* xname, int x) {
|
||||
assert(x != 0, err_msg("%s should be nonzero", xname));
|
||||
assert(x != 0, "%s should be nonzero", xname);
|
||||
return x;
|
||||
}
|
||||
#define NONZERO(x) check_nonzero(#x, x)
|
||||
@ -434,7 +434,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
}
|
||||
|
||||
default:
|
||||
fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
|
||||
fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -149,7 +149,7 @@ void NativeCall::verify() {
|
||||
if (!NativeCall::is_call_at(addr)) {
|
||||
tty->print_cr("not a NativeCall at " PTR_FORMAT, p2i(addr));
|
||||
// TODO: PPC port: Disassembler::decode(addr - 20, addr + 20, tty);
|
||||
fatal(err_msg("not a NativeCall at " PTR_FORMAT, p2i(addr)));
|
||||
fatal("not a NativeCall at " PTR_FORMAT, p2i(addr));
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
@ -162,7 +162,7 @@ void NativeFarCall::verify() {
|
||||
if (!NativeFarCall::is_far_call_at(addr)) {
|
||||
tty->print_cr("not a NativeFarCall at " PTR_FORMAT, p2i(addr));
|
||||
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
|
||||
fatal(err_msg("not a NativeFarCall at " PTR_FORMAT, p2i(addr)));
|
||||
fatal("not a NativeFarCall at " PTR_FORMAT, p2i(addr));
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
@ -308,7 +308,7 @@ void NativeMovConstReg::verify() {
|
||||
! MacroAssembler::is_bl(*((int*) addr))) {
|
||||
tty->print_cr("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr));
|
||||
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
|
||||
fatal(err_msg("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr)));
|
||||
fatal("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -346,7 +346,7 @@ void NativeJump::verify() {
|
||||
if (!NativeJump::is_jump_at(addr)) {
|
||||
tty->print_cr("not a NativeJump at " PTR_FORMAT, p2i(addr));
|
||||
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
|
||||
fatal(err_msg("not a NativeJump at " PTR_FORMAT, p2i(addr)));
|
||||
fatal("not a NativeJump at " PTR_FORMAT, p2i(addr));
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
@ -475,9 +475,8 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_siz
|
||||
|
||||
// Is vector's size (in bytes) bigger than a size saved by default?
|
||||
bool SharedRuntime::is_wide_vector(int size) {
|
||||
ResourceMark rm;
|
||||
// Note, MaxVectorSize == 8 on PPC64.
|
||||
assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size));
|
||||
assert(size <= 8, "%d bytes vectors are not supported", size);
|
||||
return size > 8;
|
||||
}
|
||||
#ifdef COMPILER2
|
||||
@ -1631,7 +1630,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
|
||||
} else if (iid == vmIntrinsics::_invokeBasic) {
|
||||
has_receiver = true;
|
||||
} else {
|
||||
fatal(err_msg_res("unexpected intrinsic id %d", iid));
|
||||
fatal("unexpected intrinsic id %d", iid);
|
||||
}
|
||||
|
||||
if (member_reg != noreg) {
|
||||
|
@ -389,7 +389,7 @@ class Assembler : public AbstractAssembler {
|
||||
|
||||
static void assert_signed_range(intptr_t x, int nbits) {
|
||||
assert(nbits == 32 || (-(1 << nbits-1) <= x && x < ( 1 << nbits-1)),
|
||||
err_msg("value out of range: x=" INTPTR_FORMAT ", nbits=%d", x, nbits));
|
||||
"value out of range: x=" INTPTR_FORMAT ", nbits=%d", x, nbits);
|
||||
}
|
||||
|
||||
static void assert_signed_word_disp_range(intptr_t x, int nbits) {
|
||||
|
@ -1596,7 +1596,7 @@ void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) {
|
||||
else {
|
||||
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
|
||||
}
|
||||
assert(false, err_msg("DEBUG MESSAGE: %s", msg));
|
||||
assert(false, "DEBUG MESSAGE: %s", msg);
|
||||
}
|
||||
|
||||
|
||||
|
@ -56,7 +56,7 @@ void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_
|
||||
|
||||
#ifdef ASSERT
|
||||
static int check_nonzero(const char* xname, int x) {
|
||||
assert(x != 0, err_msg("%s should be nonzero", xname));
|
||||
assert(x != 0, "%s should be nonzero", xname);
|
||||
return x;
|
||||
}
|
||||
#define NONZERO(x) check_nonzero(#x, x)
|
||||
@ -453,7 +453,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
}
|
||||
|
||||
default:
|
||||
fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
|
||||
fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -316,7 +316,7 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
|
||||
// 8 bytes FP registers are saved by default on SPARC.
|
||||
bool SharedRuntime::is_wide_vector(int size) {
|
||||
// Note, MaxVectorSize == 8 on SPARC.
|
||||
assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size));
|
||||
assert(size <= 8, "%d bytes vectors are not supported", size);
|
||||
return size > 8;
|
||||
}
|
||||
|
||||
@ -464,7 +464,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
break;
|
||||
|
||||
default:
|
||||
fatal(err_msg_res("unknown basic type %d", sig_bt[i]));
|
||||
fatal("unknown basic type %d", sig_bt[i]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1859,7 +1859,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
|
||||
} else if (iid == vmIntrinsics::_invokeBasic) {
|
||||
has_receiver = true;
|
||||
} else {
|
||||
fatal(err_msg_res("unexpected intrinsic id %d", iid));
|
||||
fatal("unexpected intrinsic id %d", iid);
|
||||
}
|
||||
|
||||
if (member_reg != noreg) {
|
||||
|
@ -1098,7 +1098,7 @@ void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
|
||||
Register r = as_Register(ra_->get_encode(this));
|
||||
CodeSection* consts_section = __ code()->consts();
|
||||
int consts_size = consts_section->align_at_start(consts_section->size());
|
||||
assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size));
|
||||
assert(constant_table.size() == consts_size, "must be: %d == %d", constant_table.size(), consts_size);
|
||||
|
||||
if (UseRDPCForConstantTableBase) {
|
||||
// For the following RDPC logic to work correctly the consts
|
||||
|
@ -733,11 +733,11 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
|
||||
// these asserts are somewhat nonsensical
|
||||
#ifndef _LP64
|
||||
assert(which == imm_operand || which == disp32_operand,
|
||||
err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)));
|
||||
"which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip));
|
||||
#else
|
||||
assert((which == call32_operand || which == imm_operand) && is_64bit ||
|
||||
which == narrow_oop_operand && !is_64bit,
|
||||
err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)));
|
||||
"which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip));
|
||||
#endif // _LP64
|
||||
return ip;
|
||||
|
||||
|
@ -2684,7 +2684,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
|
||||
#endif // _LP64
|
||||
}
|
||||
} else {
|
||||
fatal(err_msg("unexpected type: %s", basictype_to_str(c->type())));
|
||||
fatal("unexpected type: %s", basictype_to_str(c->type()));
|
||||
}
|
||||
// cpu register - address
|
||||
} else if (opr2->is_address()) {
|
||||
|
@ -417,7 +417,7 @@ void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rd
|
||||
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
|
||||
}
|
||||
// Don't assert holding the ttyLock
|
||||
assert(false, err_msg("DEBUG MESSAGE: %s", msg));
|
||||
assert(false, "DEBUG MESSAGE: %s", msg);
|
||||
ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
|
||||
}
|
||||
|
||||
@ -883,7 +883,7 @@ void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
|
||||
ttyLocker ttyl;
|
||||
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
|
||||
msg);
|
||||
assert(false, err_msg("DEBUG MESSAGE: %s", msg));
|
||||
assert(false, "DEBUG MESSAGE: %s", msg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -53,7 +53,7 @@ void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_
|
||||
|
||||
#ifdef ASSERT
|
||||
static int check_nonzero(const char* xname, int x) {
|
||||
assert(x != 0, err_msg("%s should be nonzero", xname));
|
||||
assert(x != 0, "%s should be nonzero", xname);
|
||||
return x;
|
||||
}
|
||||
#define NONZERO(x) check_nonzero(#x, x)
|
||||
@ -456,7 +456,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
}
|
||||
|
||||
default:
|
||||
fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
|
||||
fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -162,7 +162,7 @@ class XMMRegisterImpl: public AbstractRegisterImpl {
|
||||
XMMRegister successor() const { return as_XMMRegister(encoding() + 1); }
|
||||
|
||||
// accessors
|
||||
int encoding() const { assert(is_valid(), err_msg("invalid register (%d)", (int)(intptr_t)this )); return (intptr_t)this; }
|
||||
int encoding() const { assert(is_valid(), "invalid register (%d)", (int)(intptr_t)this ); return (intptr_t)this; }
|
||||
bool is_valid() const { return 0 <= (intptr_t)this && (intptr_t)this < number_of_registers; }
|
||||
const char* name() const;
|
||||
};
|
||||
@ -245,7 +245,7 @@ public:
|
||||
KRegister successor() const { return as_KRegister(encoding() + 1); }
|
||||
|
||||
// accessors
|
||||
int encoding() const { assert(is_valid(), err_msg("invalid register (%d)", (int)(intptr_t)this)); return (intptr_t)this; }
|
||||
int encoding() const { assert(is_valid(), "invalid register (%d)", (int)(intptr_t)this); return (intptr_t)this; }
|
||||
bool is_valid() const { return 0 <= (intptr_t)this && (intptr_t)this < number_of_registers; }
|
||||
const char* name() const;
|
||||
};
|
||||
|
@ -1434,7 +1434,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
|
||||
} else if (iid == vmIntrinsics::_invokeBasic) {
|
||||
has_receiver = true;
|
||||
} else {
|
||||
fatal(err_msg_res("unexpected intrinsic id %d", iid));
|
||||
fatal("unexpected intrinsic id %d", iid);
|
||||
}
|
||||
|
||||
if (member_reg != noreg) {
|
||||
|
@ -1695,7 +1695,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
|
||||
} else if (iid == vmIntrinsics::_invokeBasic) {
|
||||
has_receiver = true;
|
||||
} else {
|
||||
fatal(err_msg_res("unexpected intrinsic id %d", iid));
|
||||
fatal("unexpected intrinsic id %d", iid);
|
||||
}
|
||||
|
||||
if (member_reg != noreg) {
|
||||
|
@ -2286,7 +2286,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
|
||||
if (!pd_commit_memory(addr, size, exec)) {
|
||||
// Add extra info in product mode for vm_exit_out_of_memory():
|
||||
PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3120,8 +3120,8 @@ void os::Aix::set_signal_handler(int sig, bool set_installed) {
|
||||
// libjsig also interposes the sigaction() call below and saves the
|
||||
// old sigaction on it own.
|
||||
} else {
|
||||
fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
|
||||
"%#lx for signal %d.", (long)oldhand, sig));
|
||||
fatal("Encountered unexpected pre-existing sigaction handler "
|
||||
"%#lx for signal %d.", (long)oldhand, sig);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3699,7 +3699,7 @@ void os::make_polling_page_unreadable(void) {
|
||||
void os::make_polling_page_readable(void) {
|
||||
// Changed according to os_linux.cpp.
|
||||
if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
|
||||
fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
|
||||
fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -117,8 +117,7 @@ static void crash_handler(int sig, siginfo_t* info, void* ucVoid) {
|
||||
return;
|
||||
}
|
||||
|
||||
VMError err(NULL, sig, pc, info, ucVoid);
|
||||
err.report_and_die();
|
||||
VMError::report_and_die(NULL, sig, pc, info, ucVoid);
|
||||
}
|
||||
|
||||
void VMError::reset_signal_handlers() {
|
||||
|
@ -1977,7 +1977,7 @@ static const char* sem_init_strerror(kern_return_t value) {
|
||||
OSXSemaphore::OSXSemaphore(uint value) {
|
||||
kern_return_t ret = SEM_INIT(_semaphore, value);
|
||||
|
||||
guarantee(ret == KERN_SUCCESS, err_msg("Failed to create semaphore: %s", sem_init_strerror(ret)));
|
||||
guarantee(ret == KERN_SUCCESS, "Failed to create semaphore: %s", sem_init_strerror(ret));
|
||||
}
|
||||
|
||||
OSXSemaphore::~OSXSemaphore() {
|
||||
@ -2213,7 +2213,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
|
||||
if (!pd_commit_memory(addr, size, exec)) {
|
||||
// add extra info in product mode for vm_exit_out_of_memory():
|
||||
PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3100,8 +3100,8 @@ void os::Bsd::set_signal_handler(int sig, bool set_installed) {
|
||||
// libjsig also interposes the sigaction() call below and saves the
|
||||
// old sigaction on it own.
|
||||
} else {
|
||||
fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
|
||||
"%#lx for signal %d.", (long)oldhand, sig));
|
||||
fatal("Encountered unexpected pre-existing sigaction handler "
|
||||
"%#lx for signal %d.", (long)oldhand, sig);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3459,8 +3459,7 @@ void os::init(void) {
|
||||
|
||||
Bsd::set_page_size(getpagesize());
|
||||
if (Bsd::page_size() == -1) {
|
||||
fatal(err_msg("os_bsd.cpp: os::init: sysconf failed (%s)",
|
||||
strerror(errno)));
|
||||
fatal("os_bsd.cpp: os::init: sysconf failed (%s)", strerror(errno));
|
||||
}
|
||||
init_page_sizes((size_t) Bsd::page_size());
|
||||
|
||||
|
@ -121,8 +121,7 @@ static void crash_handler(int sig, siginfo_t* info, void* ucVoid) {
|
||||
return;
|
||||
}
|
||||
|
||||
VMError err(NULL, sig, pc, info, ucVoid);
|
||||
err.report_and_die();
|
||||
VMError::report_and_die(NULL, sig, pc, info, ucVoid);
|
||||
}
|
||||
|
||||
void VMError::reset_signal_handlers() {
|
||||
|
@ -2680,7 +2680,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
|
||||
if (err != 0) {
|
||||
// the caller wants all commit errors to exit with the specified mesg:
|
||||
warn_fail_commit_memory(addr, size, exec, err);
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2716,7 +2716,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size,
|
||||
if (err != 0) {
|
||||
// the caller wants all commit errors to exit with the specified mesg:
|
||||
warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4278,8 +4278,8 @@ void os::Linux::set_signal_handler(int sig, bool set_installed) {
|
||||
// libjsig also interposes the sigaction() call below and saves the
|
||||
// old sigaction on it own.
|
||||
} else {
|
||||
fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
|
||||
"%#lx for signal %d.", (long)oldhand, sig));
|
||||
fatal("Encountered unexpected pre-existing sigaction handler "
|
||||
"%#lx for signal %d.", (long)oldhand, sig);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4611,8 +4611,8 @@ void os::init(void) {
|
||||
|
||||
Linux::set_page_size(sysconf(_SC_PAGESIZE));
|
||||
if (Linux::page_size() == -1) {
|
||||
fatal(err_msg("os_linux.cpp: os::init: sysconf failed (%s)",
|
||||
strerror(errno)));
|
||||
fatal("os_linux.cpp: os::init: sysconf failed (%s)",
|
||||
strerror(errno));
|
||||
}
|
||||
init_page_sizes((size_t) Linux::page_size());
|
||||
|
||||
@ -4628,7 +4628,7 @@ void os::init(void) {
|
||||
int status;
|
||||
pthread_condattr_t* _condattr = os::Linux::condAttr();
|
||||
if ((status = pthread_condattr_init(_condattr)) != 0) {
|
||||
fatal(err_msg("pthread_condattr_init: %s", strerror(status)));
|
||||
fatal("pthread_condattr_init: %s", strerror(status));
|
||||
}
|
||||
// Only set the clock if CLOCK_MONOTONIC is available
|
||||
if (os::supports_monotonic_clock()) {
|
||||
@ -4637,7 +4637,7 @@ void os::init(void) {
|
||||
warning("Unable to use monotonic clock with relative timed-waits" \
|
||||
" - changes to the time-of-day clock may have adverse affects");
|
||||
} else {
|
||||
fatal(err_msg("pthread_condattr_setclock: %s", strerror(status)));
|
||||
fatal("pthread_condattr_setclock: %s", strerror(status));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -121,8 +121,7 @@ static void crash_handler(int sig, siginfo_t* info, void* ucVoid) {
|
||||
return;
|
||||
}
|
||||
|
||||
VMError err(NULL, sig, pc, info, ucVoid);
|
||||
err.report_and_die();
|
||||
VMError::report_and_die(NULL, sig, pc, info, ucVoid);
|
||||
}
|
||||
|
||||
void VMError::reset_signal_handlers() {
|
||||
|
@ -1027,10 +1027,10 @@ void os::WatcherThreadCrashProtection::check_crash_protection(int sig,
|
||||
}
|
||||
}
|
||||
|
||||
#define check_with_errno(check_type, cond, msg) \
|
||||
do { \
|
||||
int err = errno; \
|
||||
check_type(cond, err_msg("%s; error='%s' (errno=%d)", msg, strerror(err), err)); \
|
||||
#define check_with_errno(check_type, cond, msg) \
|
||||
do { \
|
||||
int err = errno; \
|
||||
check_type(cond, "%s; error='%s' (errno=%d)", msg, strerror(err), err); \
|
||||
} while (false)
|
||||
|
||||
#define assert_with_errno(cond, msg) check_with_errno(assert, cond, msg)
|
||||
|
@ -1118,8 +1118,7 @@ sigset_t* os::Solaris::allowdebug_blocked_signals() {
|
||||
|
||||
|
||||
void _handle_uncaught_cxx_exception() {
|
||||
VMError err("An uncaught C++ exception");
|
||||
err.report_and_die();
|
||||
VMError::report_and_die("An uncaught C++ exception");
|
||||
}
|
||||
|
||||
|
||||
@ -1330,7 +1329,7 @@ jlong getTimeMillis() {
|
||||
jlong os::javaTimeMillis() {
|
||||
timeval t;
|
||||
if (gettimeofday(&t, NULL) == -1) {
|
||||
fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
|
||||
fatal("os::javaTimeMillis: gettimeofday (%s)", strerror(errno));
|
||||
}
|
||||
return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000;
|
||||
}
|
||||
@ -1338,7 +1337,7 @@ jlong os::javaTimeMillis() {
|
||||
void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
|
||||
timeval t;
|
||||
if (gettimeofday(&t, NULL) == -1) {
|
||||
fatal(err_msg("os::javaTimeSystemUTC: gettimeofday (%s)", strerror(errno)));
|
||||
fatal("os::javaTimeSystemUTC: gettimeofday (%s)", strerror(errno));
|
||||
}
|
||||
seconds = jlong(t.tv_sec);
|
||||
nanos = jlong(t.tv_usec) * 1000;
|
||||
@ -2392,14 +2391,14 @@ void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
|
||||
if (err != 0) {
|
||||
// the caller wants all commit errors to exit with the specified mesg:
|
||||
warn_fail_commit_memory(addr, bytes, exec, err);
|
||||
vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
|
||||
vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "%s", mesg);
|
||||
}
|
||||
}
|
||||
|
||||
size_t os::Solaris::page_size_for_alignment(size_t alignment) {
|
||||
assert(is_size_aligned(alignment, (size_t) vm_page_size()),
|
||||
err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
|
||||
alignment, (size_t) vm_page_size()));
|
||||
SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
|
||||
alignment, (size_t) vm_page_size());
|
||||
|
||||
for (int i = 0; _page_sizes[i] != 0; i++) {
|
||||
if (is_size_aligned(alignment, _page_sizes[i])) {
|
||||
@ -2415,7 +2414,7 @@ int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
|
||||
int err = Solaris::commit_memory_impl(addr, bytes, exec);
|
||||
if (err == 0 && UseLargePages && alignment_hint > 0) {
|
||||
assert(is_size_aligned(bytes, alignment_hint),
|
||||
err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));
|
||||
SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint);
|
||||
|
||||
// The syscall memcntl requires an exact page size (see man memcntl for details).
|
||||
size_t page_size = page_size_for_alignment(alignment_hint);
|
||||
@ -2439,7 +2438,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
|
||||
if (err != 0) {
|
||||
// the caller wants all commit errors to exit with the specified mesg:
|
||||
warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
|
||||
vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
|
||||
vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "%s", mesg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2969,11 +2968,11 @@ bool os::Solaris::is_valid_page_size(size_t bytes) {
|
||||
}
|
||||
|
||||
bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
|
||||
assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
|
||||
assert(is_valid_page_size(align), SIZE_FORMAT " is not a valid page size", align);
|
||||
assert(is_ptr_aligned((void*) start, align),
|
||||
err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
|
||||
PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align);
|
||||
assert(is_size_aligned(bytes, align),
|
||||
err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));
|
||||
SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align);
|
||||
|
||||
// Signal to OS that we want large pages for addresses
|
||||
// from addr, addr + bytes
|
||||
@ -3956,8 +3955,8 @@ void os::Solaris::set_signal_handler(int sig, bool set_installed,
|
||||
// libjsig also interposes the sigaction() call below and saves the
|
||||
// old sigaction on it own.
|
||||
} else {
|
||||
fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
|
||||
"%#lx for signal %d.", (long)oldhand, sig));
|
||||
fatal("Encountered unexpected pre-existing sigaction handler "
|
||||
"%#lx for signal %d.", (long)oldhand, sig);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4403,8 +4402,7 @@ void os::init(void) {
|
||||
|
||||
page_size = sysconf(_SC_PAGESIZE);
|
||||
if (page_size == -1) {
|
||||
fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
|
||||
strerror(errno)));
|
||||
fatal("os_solaris.cpp: os::init: sysconf failed (%s)", strerror(errno));
|
||||
}
|
||||
init_page_sizes((size_t) page_size);
|
||||
|
||||
@ -4416,7 +4414,7 @@ void os::init(void) {
|
||||
|
||||
int fd = ::open("/dev/zero", O_RDWR);
|
||||
if (fd < 0) {
|
||||
fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
|
||||
fatal("os::init: cannot open /dev/zero (%s)", strerror(errno));
|
||||
} else {
|
||||
Solaris::set_dev_zero_fd(fd);
|
||||
|
||||
|
@ -48,8 +48,8 @@ ThreadCritical::ThreadCritical() {
|
||||
thread_t owner = thr_self();
|
||||
if (global_mut_owner != owner) {
|
||||
if (os::Solaris::mutex_lock(&global_mut))
|
||||
fatal(err_msg("ThreadCritical::ThreadCritical: mutex_lock failed (%s)",
|
||||
strerror(errno)));
|
||||
fatal("ThreadCritical::ThreadCritical: mutex_lock failed (%s)",
|
||||
strerror(errno));
|
||||
assert(global_mut_count == 0, "must have clean count");
|
||||
assert(global_mut_owner == -1, "must have clean owner");
|
||||
}
|
||||
@ -68,8 +68,7 @@ ThreadCritical::~ThreadCritical() {
|
||||
if (global_mut_count == 0) {
|
||||
global_mut_owner = -1;
|
||||
if (os::Solaris::mutex_unlock(&global_mut))
|
||||
fatal(err_msg("ThreadCritical::~ThreadCritical: mutex_unlock failed "
|
||||
"(%s)", strerror(errno)));
|
||||
fatal("ThreadCritical::~ThreadCritical: mutex_unlock failed (%s)", strerror(errno));
|
||||
}
|
||||
} else {
|
||||
assert (Threads::number_of_threads() == 0, "valid only during initialization");
|
||||
|
@ -117,8 +117,7 @@ static void crash_handler(int sig, siginfo_t* info, void* ucVoid) {
|
||||
return;
|
||||
}
|
||||
|
||||
VMError err(NULL, sig, pc, info, ucVoid);
|
||||
err.report_and_die();
|
||||
VMError::report_and_die(NULL, sig, pc, info, ucVoid);
|
||||
}
|
||||
|
||||
void VMError::reset_signal_handlers() {
|
||||
|
@ -823,7 +823,7 @@ jlong offset() {
|
||||
java_origin.wMilliseconds = 0;
|
||||
FILETIME jot;
|
||||
if (!SystemTimeToFileTime(&java_origin, &jot)) {
|
||||
fatal(err_msg("Error = %d\nWindows error", GetLastError()));
|
||||
fatal("Error = %d\nWindows error", GetLastError());
|
||||
}
|
||||
_calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
|
||||
_has_calculated_offset = 1;
|
||||
@ -1936,7 +1936,7 @@ int os::get_last_error() {
|
||||
WindowsSemaphore::WindowsSemaphore(uint value) {
|
||||
_semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL);
|
||||
|
||||
guarantee(_semaphore != NULL, err_msg("CreateSemaphore failed with error code: %lu", GetLastError()));
|
||||
guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError());
|
||||
}
|
||||
|
||||
WindowsSemaphore::~WindowsSemaphore() {
|
||||
@ -1947,14 +1947,14 @@ void WindowsSemaphore::signal(uint count) {
|
||||
if (count > 0) {
|
||||
BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL);
|
||||
|
||||
assert(ret != 0, err_msg("ReleaseSemaphore failed with error code: %lu", GetLastError()));
|
||||
assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError());
|
||||
}
|
||||
}
|
||||
|
||||
void WindowsSemaphore::wait() {
|
||||
DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE);
|
||||
assert(ret != WAIT_FAILED, err_msg("WaitForSingleObject failed with error code: %lu", GetLastError()));
|
||||
assert(ret == WAIT_OBJECT_0, err_msg("WaitForSingleObject failed with return value: %lu", ret));
|
||||
assert(ret != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError());
|
||||
assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret);
|
||||
}
|
||||
|
||||
// sun.misc.Signal
|
||||
@ -2344,8 +2344,7 @@ LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
|
||||
static inline void report_error(Thread* t, DWORD exception_code,
|
||||
address addr, void* siginfo, void* context) {
|
||||
VMError err(t, exception_code, addr, siginfo, context);
|
||||
err.report_and_die();
|
||||
VMError::report_and_die(t, exception_code, addr, siginfo, context);
|
||||
|
||||
// If UseOsErrorReporting, this will return here and save the error file
|
||||
// somewhere where we can find it in the minidump.
|
||||
@ -3325,7 +3324,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
|
||||
assert(mesg != NULL, "mesg must be specified");
|
||||
if (!pd_commit_memory(addr, size, exec)) {
|
||||
warn_fail_commit_memory(addr, size, exec);
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5259,7 +5258,7 @@ bool os::check_heap(bool force) {
|
||||
}
|
||||
DWORD err = GetLastError();
|
||||
if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) {
|
||||
fatal(err_msg("heap walk aborted with error %d", err));
|
||||
fatal("heap walk aborted with error %d", err);
|
||||
}
|
||||
HeapUnlock(heap);
|
||||
}
|
||||
@ -5978,8 +5977,8 @@ void TestReserveMemorySpecial_test() {
|
||||
os::release_memory_special(actual_location, expected_allocation_size);
|
||||
// only now check, after releasing any memory to avoid any leaks.
|
||||
assert(actual_location == expected_location,
|
||||
err_msg("Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
|
||||
expected_location, expected_allocation_size, actual_location));
|
||||
"Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
|
||||
expected_location, expected_allocation_size, actual_location);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -70,9 +70,8 @@ address VMError::get_resetted_sighandler(int sig) {
|
||||
|
||||
LONG WINAPI crash_handler(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
|
||||
VMError err(NULL, exception_code, NULL,
|
||||
exceptionInfo->ExceptionRecord, exceptionInfo->ContextRecord);
|
||||
err.report_and_die();
|
||||
VMError::report_and_die(NULL, exception_code, NULL, exceptionInfo->ExceptionRecord,
|
||||
exceptionInfo->ContextRecord);
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
|
||||
|
@ -468,8 +468,7 @@ report_and_die:
|
||||
sigaddset(&newset, sig);
|
||||
sigthreadmask(SIG_UNBLOCK, &newset, NULL);
|
||||
|
||||
VMError err(t, sig, pc, info, ucVoid);
|
||||
err.report_and_die();
|
||||
VMError::report_and_die(t, sig, pc, info, ucVoid);
|
||||
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
|
@ -731,8 +731,7 @@ JVM_handle_bsd_signal(int sig,
|
||||
sigaddset(&newset, sig);
|
||||
sigprocmask(SIG_UNBLOCK, &newset, NULL);
|
||||
|
||||
VMError err(t, sig, pc, info, ucVoid);
|
||||
err.report_and_die();
|
||||
VMError::report_and_die(t, sig, pc, info, ucVoid);
|
||||
|
||||
ShouldNotReachHere();
|
||||
return false;
|
||||
@ -865,7 +864,7 @@ static void current_stack_region(address * bottom, size_t * size) {
|
||||
int rslt = pthread_stackseg_np(pthread_self(), &ss);
|
||||
|
||||
if (rslt != 0)
|
||||
fatal(err_msg("pthread_stackseg_np failed with err = %d", rslt));
|
||||
fatal("pthread_stackseg_np failed with err = %d", rslt);
|
||||
|
||||
*bottom = (address)((char *)ss.ss_sp - ss.ss_size);
|
||||
*size = ss.ss_size;
|
||||
@ -876,12 +875,12 @@ static void current_stack_region(address * bottom, size_t * size) {
|
||||
|
||||
// JVM needs to know exact stack location, abort if it fails
|
||||
if (rslt != 0)
|
||||
fatal(err_msg("pthread_attr_init failed with err = %d", rslt));
|
||||
fatal("pthread_attr_init failed with err = %d", rslt);
|
||||
|
||||
rslt = pthread_attr_get_np(pthread_self(), &attr);
|
||||
|
||||
if (rslt != 0)
|
||||
fatal(err_msg("pthread_attr_get_np failed with err = %d", rslt));
|
||||
fatal("pthread_attr_get_np failed with err = %d", rslt);
|
||||
|
||||
if (pthread_attr_getstackaddr(&attr, (void **)bottom) != 0 ||
|
||||
pthread_attr_getstacksize(&attr, size) != 0) {
|
||||
|
@ -320,8 +320,7 @@ static void current_stack_region(address *bottom, size_t *size) {
|
||||
int rslt = pthread_stackseg_np(pthread_self(), &ss);
|
||||
|
||||
if (rslt != 0)
|
||||
fatal(err_msg("pthread_stackseg_np failed with err = " INT32_FORMAT,
|
||||
rslt));
|
||||
fatal("pthread_stackseg_np failed with err = " INT32_FORMAT, rslt);
|
||||
|
||||
stack_top = (address) ss.ss_sp;
|
||||
stack_bytes = ss.ss_size;
|
||||
@ -333,13 +332,12 @@ static void current_stack_region(address *bottom, size_t *size) {
|
||||
|
||||
// JVM needs to know exact stack location, abort if it fails
|
||||
if (rslt != 0)
|
||||
fatal(err_msg("pthread_attr_init failed with err = " INT32_FORMAT, rslt));
|
||||
fatal("pthread_attr_init failed with err = " INT32_FORMAT, rslt);
|
||||
|
||||
rslt = pthread_attr_get_np(pthread_self(), &attr);
|
||||
|
||||
if (rslt != 0)
|
||||
fatal(err_msg("pthread_attr_get_np failed with err = " INT32_FORMAT,
|
||||
rslt));
|
||||
fatal("pthread_attr_get_np failed with err = " INT32_FORMAT, rslt);
|
||||
|
||||
if (pthread_attr_getstackaddr(&attr, (void **) &stack_bottom) != 0 ||
|
||||
pthread_attr_getstacksize(&attr, &stack_bytes) != 0) {
|
||||
|
@ -464,8 +464,7 @@ JVM_handle_linux_signal(int sig,
|
||||
sigaddset(&newset, sig);
|
||||
sigprocmask(SIG_UNBLOCK, &newset, NULL);
|
||||
|
||||
VMError err(t, sig, pc, info, ucVoid);
|
||||
err.report_and_die();
|
||||
VMError::report_and_die(t, sig, pc, info, ucVoid);
|
||||
|
||||
ShouldNotReachHere();
|
||||
return true; // Mute compiler
|
||||
@ -558,7 +557,7 @@ static void current_stack_region(address * bottom, size_t * size) {
|
||||
if (rslt == ENOMEM) {
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
|
||||
} else {
|
||||
fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
|
||||
fatal("pthread_getattr_np failed with errno = %d", rslt);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -439,8 +439,7 @@ report_and_die:
|
||||
sigaddset(&newset, sig);
|
||||
sigprocmask(SIG_UNBLOCK, &newset, NULL);
|
||||
|
||||
VMError err(t, sig, pc, info, ucVoid);
|
||||
err.report_and_die();
|
||||
VMError::report_and_die(t, sig, pc, info, ucVoid);
|
||||
|
||||
ShouldNotReachHere();
|
||||
return false;
|
||||
@ -531,7 +530,7 @@ static void current_stack_region(address * bottom, size_t * size) {
|
||||
if (rslt == ENOMEM) {
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
|
||||
} else {
|
||||
fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
|
||||
fatal("pthread_getattr_np failed with errno = %d", rslt);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -172,7 +172,7 @@ static void current_stack_region(address* bottom, size_t* size) {
|
||||
if (rslt == ENOMEM) {
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
|
||||
} else {
|
||||
fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
|
||||
fatal("pthread_getattr_np failed with errno = %d", rslt);
|
||||
}
|
||||
}
|
||||
|
||||
@ -692,8 +692,7 @@ JVM_handle_linux_signal(int sig,
|
||||
sigaddset(&newset, sig);
|
||||
sigprocmask(SIG_UNBLOCK, &newset, NULL);
|
||||
|
||||
VMError err(t, sig, pc, info, ucVoid);
|
||||
err.report_and_die();
|
||||
VMError::report_and_die(t, sig, pc, info, ucVoid);
|
||||
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
@ -542,8 +542,7 @@ JVM_handle_linux_signal(int sig,
|
||||
sigaddset(&newset, sig);
|
||||
sigprocmask(SIG_UNBLOCK, &newset, NULL);
|
||||
|
||||
VMError err(t, sig, pc, info, ucVoid);
|
||||
err.report_and_die();
|
||||
VMError::report_and_die(t, sig, pc, info, ucVoid);
|
||||
|
||||
ShouldNotReachHere();
|
||||
return true; // Mute compiler
|
||||
@ -689,7 +688,7 @@ static void current_stack_region(address * bottom, size_t * size) {
|
||||
if (rslt == ENOMEM) {
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
|
||||
} else {
|
||||
fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
|
||||
fatal("pthread_getattr_np failed with errno = %d", rslt);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -328,7 +328,7 @@ static void current_stack_region(address *bottom, size_t *size) {
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
|
||||
}
|
||||
else {
|
||||
fatal(err_msg("pthread_getattr_np failed with errno = %d", res));
|
||||
fatal("pthread_getattr_np failed with errno = %d", res);
|
||||
}
|
||||
}
|
||||
|
||||
@ -336,7 +336,7 @@ static void current_stack_region(address *bottom, size_t *size) {
|
||||
size_t stack_bytes;
|
||||
res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
|
||||
if (res != 0) {
|
||||
fatal(err_msg("pthread_attr_getstack failed with errno = %d", res));
|
||||
fatal("pthread_attr_getstack failed with errno = %d", res);
|
||||
}
|
||||
address stack_top = stack_bottom + stack_bytes;
|
||||
|
||||
@ -348,7 +348,7 @@ static void current_stack_region(address *bottom, size_t *size) {
|
||||
size_t guard_bytes;
|
||||
res = pthread_attr_getguardsize(&attr, &guard_bytes);
|
||||
if (res != 0) {
|
||||
fatal(err_msg("pthread_attr_getguardsize failed with errno = %d", res));
|
||||
fatal("pthread_attr_getguardsize failed with errno = %d", res);
|
||||
}
|
||||
int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
|
||||
assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
|
||||
|
@ -549,8 +549,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "Out of swap space to map in thread stack.");
|
||||
}
|
||||
|
||||
VMError err(t, sig, pc, info, ucVoid);
|
||||
err.report_and_die();
|
||||
VMError::report_and_die(t, sig, pc, info, ucVoid);
|
||||
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
@ -707,8 +707,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "Out of swap space to map in thread stack.");
|
||||
}
|
||||
|
||||
VMError err(t, sig, pc, info, ucVoid);
|
||||
err.report_and_die();
|
||||
VMError::report_and_die(t, sig, pc, info, ucVoid);
|
||||
|
||||
ShouldNotReachHere();
|
||||
return false;
|
||||
|
@ -43,8 +43,7 @@ AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
|
||||
CodeSection* cs = code->insts();
|
||||
cs->clear_mark(); // new assembler kills old mark
|
||||
if (cs->start() == NULL) {
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, err_msg("CodeCache: no room for %s",
|
||||
code->name()));
|
||||
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "CodeCache: no room for %s", code->name());
|
||||
}
|
||||
_code_section = cs;
|
||||
_oop_recorder= code->oop_recorder();
|
||||
|
@ -173,7 +173,7 @@ class CodeSection VALUE_OBJ_CLASS_SPEC {
|
||||
bool allocates(address pc) const { return pc >= _start && pc < _limit; }
|
||||
bool allocates2(address pc) const { return pc >= _start && pc <= _limit; }
|
||||
|
||||
void set_end(address pc) { assert(allocates2(pc), err_msg("not in CodeBuffer memory: " INTPTR_FORMAT " <= " INTPTR_FORMAT " <= " INTPTR_FORMAT, p2i(_start), p2i(pc), p2i(_limit))); _end = pc; }
|
||||
void set_end(address pc) { assert(allocates2(pc), "not in CodeBuffer memory: " INTPTR_FORMAT " <= " INTPTR_FORMAT " <= " INTPTR_FORMAT, p2i(_start), p2i(pc), p2i(_limit)); _end = pc; }
|
||||
void set_mark(address pc) { assert(contains2(pc), "not in codeBuffer");
|
||||
_mark = pc; }
|
||||
void set_mark_off(int offset) { assert(contains2(offset+_start),"not in codeBuffer");
|
||||
|
@ -121,8 +121,7 @@ inline void assert_different_registers(
|
||||
) {
|
||||
assert(
|
||||
a != b,
|
||||
err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b))
|
||||
"registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT "", p2i(a), p2i(b)
|
||||
);
|
||||
}
|
||||
|
||||
@ -135,9 +134,9 @@ inline void assert_different_registers(
|
||||
assert(
|
||||
a != b && a != c
|
||||
&& b != c,
|
||||
err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c))
|
||||
"registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c)
|
||||
);
|
||||
}
|
||||
|
||||
@ -152,9 +151,9 @@ inline void assert_different_registers(
|
||||
a != b && a != c && a != d
|
||||
&& b != c && b != d
|
||||
&& c != d,
|
||||
err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d))
|
||||
"registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d)
|
||||
);
|
||||
}
|
||||
|
||||
@ -171,9 +170,9 @@ inline void assert_different_registers(
|
||||
&& b != c && b != d && b != e
|
||||
&& c != d && c != e
|
||||
&& d != e,
|
||||
err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e))
|
||||
"registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e)
|
||||
);
|
||||
}
|
||||
|
||||
@ -192,10 +191,10 @@ inline void assert_different_registers(
|
||||
&& c != d && c != e && c != f
|
||||
&& d != e && d != f
|
||||
&& e != f,
|
||||
err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
|
||||
", f=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f))
|
||||
"registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
|
||||
", f=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f)
|
||||
);
|
||||
}
|
||||
|
||||
@ -216,10 +215,10 @@ inline void assert_different_registers(
|
||||
&& d != e && d != f && d != g
|
||||
&& e != f && e != g
|
||||
&& f != g,
|
||||
err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
|
||||
", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g))
|
||||
"registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
|
||||
", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g)
|
||||
);
|
||||
}
|
||||
|
||||
@ -242,10 +241,10 @@ inline void assert_different_registers(
|
||||
&& e != f && e != g && e != h
|
||||
&& f != g && f != h
|
||||
&& g != h,
|
||||
err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
|
||||
", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h))
|
||||
"registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
|
||||
", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h)
|
||||
);
|
||||
}
|
||||
|
||||
@ -270,11 +269,11 @@ inline void assert_different_registers(
|
||||
&& f != g && f != h && f != i
|
||||
&& g != h && g != i
|
||||
&& h != i,
|
||||
err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
|
||||
", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT
|
||||
", i=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i))
|
||||
"registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
|
||||
", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT
|
||||
", i=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i)
|
||||
);
|
||||
}
|
||||
|
||||
@ -300,11 +299,11 @@ inline void assert_different_registers(
|
||||
&& g != h && g != i && g != j
|
||||
&& h != i && h != j
|
||||
&& i != j,
|
||||
err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
|
||||
", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT
|
||||
", i=" INTPTR_FORMAT ", j=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i), p2i(j))
|
||||
"registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
|
||||
", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT
|
||||
", i=" INTPTR_FORMAT ", j=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i), p2i(j)
|
||||
);
|
||||
}
|
||||
|
||||
@ -332,11 +331,11 @@ inline void assert_different_registers(
|
||||
&& h != i && h != j && h !=k
|
||||
&& i != j && i !=k
|
||||
&& j !=k,
|
||||
err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
|
||||
", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT
|
||||
", i=" INTPTR_FORMAT ", j=" INTPTR_FORMAT ", k=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i), p2i(j), p2i(k))
|
||||
"registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
|
||||
", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT
|
||||
", i=" INTPTR_FORMAT ", j=" INTPTR_FORMAT ", k=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i), p2i(j), p2i(k)
|
||||
);
|
||||
}
|
||||
|
||||
@ -366,12 +365,12 @@ inline void assert_different_registers(
|
||||
&& i != j && i !=k && i !=l
|
||||
&& j !=k && j !=l
|
||||
&& k !=l,
|
||||
err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
|
||||
", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT
|
||||
", i=" INTPTR_FORMAT ", j=" INTPTR_FORMAT ", k=" INTPTR_FORMAT
|
||||
", l=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i), p2i(j), p2i(k), p2i(l))
|
||||
"registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
|
||||
", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
|
||||
", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT
|
||||
", i=" INTPTR_FORMAT ", j=" INTPTR_FORMAT ", k=" INTPTR_FORMAT
|
||||
", l=" INTPTR_FORMAT "",
|
||||
p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i), p2i(j), p2i(k), p2i(l)
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -4020,7 +4020,7 @@ bool GraphBuilder::try_method_handle_inline(ciMethod* callee) {
|
||||
break;
|
||||
|
||||
default:
|
||||
fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
|
||||
fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
|
||||
break;
|
||||
}
|
||||
set_state(state_before);
|
||||
|
@ -462,7 +462,7 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
|
||||
vtable_call(op);
|
||||
break;
|
||||
default:
|
||||
fatal(err_msg_res("unexpected op code: %s", op->name()));
|
||||
fatal("unexpected op code: %s", op->name());
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -920,7 +920,7 @@ LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
|
||||
|
||||
LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
|
||||
assert(type2size[t] == type2size[value->type()],
|
||||
err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
|
||||
"size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type()));
|
||||
if (!value->is_register()) {
|
||||
// force into a register
|
||||
LIR_Opr r = new_register(value->type());
|
||||
@ -2832,7 +2832,7 @@ void LIRGenerator::do_OsrEntry(OsrEntry* x) {
|
||||
|
||||
void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
|
||||
assert(args->length() == arg_list->length(),
|
||||
err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
|
||||
"args=%d, arg_list=%d", args->length(), arg_list->length());
|
||||
for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
|
||||
LIRItem* param = args->at(i);
|
||||
LIR_Opr loc = arg_list->at(i);
|
||||
@ -2976,7 +2976,7 @@ void LIRGenerator::do_Invoke(Invoke* x) {
|
||||
break;
|
||||
}
|
||||
default:
|
||||
fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
|
||||
fatal("unexpected bytecode: %s", Bytecodes::name(x->code()));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -66,8 +66,8 @@ ciKlass::ciKlass(ciSymbol* name, BasicType bt) : ciType(bt) {
|
||||
// ------------------------------------------------------------------
|
||||
// ciKlass::is_subtype_of
|
||||
bool ciKlass::is_subtype_of(ciKlass* that) {
|
||||
assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii()));
|
||||
assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii()));
|
||||
assert(this->is_loaded(), "must be loaded: %s", this->name()->as_quoted_ascii());
|
||||
assert(that->is_loaded(), "must be loaded: %s", that->name()->as_quoted_ascii());
|
||||
|
||||
// Check to see if the klasses are identical.
|
||||
if (this == that) {
|
||||
@ -85,8 +85,8 @@ bool ciKlass::is_subtype_of(ciKlass* that) {
|
||||
// ------------------------------------------------------------------
|
||||
// ciKlass::is_subclass_of
|
||||
bool ciKlass::is_subclass_of(ciKlass* that) {
|
||||
assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii()));
|
||||
assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii()));
|
||||
assert(this->is_loaded(), "must be loaded: %s", this->name()->as_quoted_ascii());
|
||||
assert(that->is_loaded(), "must be loaded: %s", that->name()->as_quoted_ascii());
|
||||
|
||||
VM_ENTRY_MARK;
|
||||
Klass* this_klass = get_Klass();
|
||||
|
@ -576,13 +576,13 @@ void ciCallProfile::add_receiver(ciKlass* receiver, int receiver_count) {
|
||||
|
||||
void ciMethod::assert_virtual_call_type_ok(int bci) {
|
||||
assert(java_code_at_bci(bci) == Bytecodes::_invokevirtual ||
|
||||
java_code_at_bci(bci) == Bytecodes::_invokeinterface, err_msg("unexpected bytecode %s", Bytecodes::name(java_code_at_bci(bci))));
|
||||
java_code_at_bci(bci) == Bytecodes::_invokeinterface, "unexpected bytecode %s", Bytecodes::name(java_code_at_bci(bci)));
|
||||
}
|
||||
|
||||
void ciMethod::assert_call_type_ok(int bci) {
|
||||
assert(java_code_at_bci(bci) == Bytecodes::_invokestatic ||
|
||||
java_code_at_bci(bci) == Bytecodes::_invokespecial ||
|
||||
java_code_at_bci(bci) == Bytecodes::_invokedynamic, err_msg("unexpected bytecode %s", Bytecodes::name(java_code_at_bci(bci))));
|
||||
java_code_at_bci(bci) == Bytecodes::_invokedynamic, "unexpected bytecode %s", Bytecodes::name(java_code_at_bci(bci)));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -122,7 +122,7 @@ void ciMethodData::load_extra_data() {
|
||||
// An empty slot or ArgInfoData entry marks the end of the trap data
|
||||
return;
|
||||
default:
|
||||
fatal(err_msg("bad tag = %d", dp_dst->tag()));
|
||||
fatal("bad tag = %d", dp_dst->tag());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -289,7 +289,7 @@ ciProfileData* ciMethodData::bci_to_extra_data(int bci, ciMethod* m, bool& two_f
|
||||
break;
|
||||
}
|
||||
default:
|
||||
fatal(err_msg("bad tag = %d", dp->tag()));
|
||||
fatal("bad tag = %d", dp->tag());
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
@ -578,7 +578,7 @@ void ciMethodData::dump_replay_data_extra_data_helper(outputStream* out, int rou
|
||||
break;
|
||||
}
|
||||
default:
|
||||
fatal(err_msg("bad tag = %d", dp->tag()));
|
||||
fatal("bad tag = %d", dp->tag());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -690,7 +690,7 @@ void ciMethodData::print_data_on(outputStream* st) {
|
||||
data = new ciSpeculativeTrapData(dp);
|
||||
break;
|
||||
default:
|
||||
fatal(err_msg("unexpected tag %d", dp->tag()));
|
||||
fatal("unexpected tag %d", dp->tag());
|
||||
}
|
||||
st->print("%d", dp_to_di(data->dp()));
|
||||
st->fill_to(6);
|
||||
|
@ -730,7 +730,7 @@ class CompileReplay : public StackObj {
|
||||
if (parsed_two_word == i) continue;
|
||||
|
||||
default:
|
||||
fatal(err_msg_res("Unexpected tag: %d", cp->tag_at(i).value()));
|
||||
fatal("Unexpected tag: %d", cp->tag_at(i).value());
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1964,7 +1964,7 @@ ciTypeFlow::ciTypeFlow(ciEnv* env, ciMethod* method, int osr_bci) {
|
||||
_has_irreducible_entry = false;
|
||||
_osr_bci = osr_bci;
|
||||
_failure_reason = NULL;
|
||||
assert(0 <= start_bci() && start_bci() < code_size() , err_msg("correct osr_bci argument: 0 <= %d < %d", start_bci(), code_size()));
|
||||
assert(0 <= start_bci() && start_bci() < code_size() , "correct osr_bci argument: 0 <= %d < %d", start_bci(), code_size());
|
||||
_work_list = NULL;
|
||||
|
||||
_ciblock_count = _methodBlocks->num_blocks();
|
||||
|
@ -262,10 +262,9 @@ void AltHashing::testMurmur3_32_ByteArray() {
|
||||
juint final_hash = murmur3_32(hashes, 4*256);
|
||||
|
||||
assert (MURMUR3_32_X86_CHECK_VALUE == final_hash,
|
||||
err_msg(
|
||||
"Calculated hash result not as expected. Expected %08X got %08X\n",
|
||||
MURMUR3_32_X86_CHECK_VALUE,
|
||||
final_hash));
|
||||
"Calculated hash result not as expected. Expected %08X got %08X\n",
|
||||
MURMUR3_32_X86_CHECK_VALUE,
|
||||
final_hash);
|
||||
}
|
||||
|
||||
void AltHashing::testEquivalentHashes() {
|
||||
@ -276,24 +275,24 @@ void AltHashing::testEquivalentHashes() {
|
||||
jbytes = murmur3_32(TWO_BYTE, 2);
|
||||
jchars = murmur3_32(ONE_CHAR, 1);
|
||||
assert (jbytes == jchars,
|
||||
err_msg("Hashes did not match. b:%08x != c:%08x\n", jbytes, jchars));
|
||||
"Hashes did not match. b:%08x != c:%08x\n", jbytes, jchars);
|
||||
|
||||
jbytes = murmur3_32(FOUR_BYTE, 4);
|
||||
jchars = murmur3_32(TWO_CHAR, 2);
|
||||
ints = murmur3_32(ONE_INT, 1);
|
||||
assert ((jbytes == jchars) && (jbytes == ints),
|
||||
err_msg("Hashes did not match. b:%08x != c:%08x != i:%08x\n", jbytes, jchars, ints));
|
||||
"Hashes did not match. b:%08x != c:%08x != i:%08x\n", jbytes, jchars, ints);
|
||||
|
||||
jbytes = murmur3_32(SIX_BYTE, 6);
|
||||
jchars = murmur3_32(THREE_CHAR, 3);
|
||||
assert (jbytes == jchars,
|
||||
err_msg("Hashes did not match. b:%08x != c:%08x\n", jbytes, jchars));
|
||||
"Hashes did not match. b:%08x != c:%08x\n", jbytes, jchars);
|
||||
|
||||
jbytes = murmur3_32(EIGHT_BYTE, 8);
|
||||
jchars = murmur3_32(FOUR_CHAR, 4);
|
||||
ints = murmur3_32(TWO_INT, 2);
|
||||
assert ((jbytes == jchars) && (jbytes == ints),
|
||||
err_msg("Hashes did not match. b:%08x != c:%08x != i:%08x\n", jbytes, jchars, ints));
|
||||
"Hashes did not match. b:%08x != c:%08x != i:%08x\n", jbytes, jchars, ints);
|
||||
}
|
||||
|
||||
// Returns true if the alternate hashcode is correct
|
||||
|
@ -319,12 +319,12 @@ PRAGMA_DIAG_PUSH
|
||||
PRAGMA_FORMAT_NONLITERAL_IGNORED
|
||||
void ClassFileParser::report_assert_property_failure(const char* msg, TRAPS) {
|
||||
ResourceMark rm(THREAD);
|
||||
fatal(err_msg(msg, _class_name->as_C_string()));
|
||||
fatal(msg, _class_name->as_C_string());
|
||||
}
|
||||
|
||||
void ClassFileParser::report_assert_property_failure(const char* msg, int index, TRAPS) {
|
||||
ResourceMark rm(THREAD);
|
||||
fatal(err_msg(msg, index, _class_name->as_C_string()));
|
||||
fatal(msg, index, _class_name->as_C_string());
|
||||
}
|
||||
PRAGMA_DIAG_POP
|
||||
|
||||
@ -492,8 +492,7 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
|
||||
break;
|
||||
}
|
||||
default:
|
||||
fatal(err_msg("bad constant pool tag value %u",
|
||||
cp->tag_at(index).value()));
|
||||
fatal("bad constant pool tag value %u", cp->tag_at(index).value());
|
||||
ShouldNotReachHere();
|
||||
break;
|
||||
} // end of switch
|
||||
|
@ -913,7 +913,7 @@ Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
|
||||
}
|
||||
|
||||
// Nothing more for the iterator to hand out.
|
||||
assert(head == NULL, err_msg("head is " PTR_FORMAT ", expected not null:", p2i(head)));
|
||||
assert(head == NULL, "head is " PTR_FORMAT ", expected not null:", p2i(head));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -119,7 +119,7 @@ MetadataOnStackBuffer* MetadataOnStackMark::allocate_buffer() {
|
||||
allocated = new MetadataOnStackBuffer();
|
||||
}
|
||||
|
||||
assert(!allocated->is_full(), err_msg("Should not be full: " PTR_FORMAT, p2i(allocated)));
|
||||
assert(!allocated->is_full(), "Should not be full: " PTR_FORMAT, p2i(allocated));
|
||||
|
||||
return allocated;
|
||||
}
|
||||
|
@ -312,12 +312,12 @@ void StringTable::buckets_oops_do(OopClosure* f, int start_idx, int end_idx) {
|
||||
const int limit = the_table()->table_size();
|
||||
|
||||
assert(0 <= start_idx && start_idx <= limit,
|
||||
err_msg("start_idx (%d) is out of bounds", start_idx));
|
||||
"start_idx (%d) is out of bounds", start_idx);
|
||||
assert(0 <= end_idx && end_idx <= limit,
|
||||
err_msg("end_idx (%d) is out of bounds", end_idx));
|
||||
"end_idx (%d) is out of bounds", end_idx);
|
||||
assert(start_idx <= end_idx,
|
||||
err_msg("Index ordering: start_idx=%d, end_idx=%d",
|
||||
start_idx, end_idx));
|
||||
"Index ordering: start_idx=%d, end_idx=%d",
|
||||
start_idx, end_idx);
|
||||
|
||||
for (int i = start_idx; i < end_idx; i += 1) {
|
||||
HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
|
||||
@ -335,12 +335,12 @@ void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClos
|
||||
const int limit = the_table()->table_size();
|
||||
|
||||
assert(0 <= start_idx && start_idx <= limit,
|
||||
err_msg("start_idx (%d) is out of bounds", start_idx));
|
||||
"start_idx (%d) is out of bounds", start_idx);
|
||||
assert(0 <= end_idx && end_idx <= limit,
|
||||
err_msg("end_idx (%d) is out of bounds", end_idx));
|
||||
"end_idx (%d) is out of bounds", end_idx);
|
||||
assert(start_idx <= end_idx,
|
||||
err_msg("Index ordering: start_idx=%d, end_idx=%d",
|
||||
start_idx, end_idx));
|
||||
"Index ordering: start_idx=%d, end_idx=%d",
|
||||
start_idx, end_idx);
|
||||
|
||||
for (int i = start_idx; i < end_idx; ++i) {
|
||||
HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
|
||||
|
@ -229,9 +229,9 @@ Klass* SystemDictionary::resolve_or_fail(Symbol* class_name,
|
||||
|
||||
Klass* SystemDictionary::resolve_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS) {
|
||||
assert(!THREAD->is_Compiler_thread(),
|
||||
err_msg("can not load classes with compiler thread: class=%s, classloader=%s",
|
||||
class_name->as_C_string(),
|
||||
class_loader.is_null() ? "null" : class_loader->klass()->name()->as_C_string()));
|
||||
"can not load classes with compiler thread: class=%s, classloader=%s",
|
||||
class_name->as_C_string(),
|
||||
class_loader.is_null() ? "null" : class_loader->klass()->name()->as_C_string());
|
||||
if (FieldType::is_array(class_name)) {
|
||||
return resolve_array_class_or_null(class_name, class_loader, protection_domain, THREAD);
|
||||
} else if (FieldType::is_obj(class_name)) {
|
||||
@ -2264,7 +2264,7 @@ methodHandle SystemDictionary::find_method_handle_intrinsic(vmIntrinsics::ID iid
|
||||
assert(MethodHandles::is_signature_polymorphic(iid) &&
|
||||
MethodHandles::is_signature_polymorphic_intrinsic(iid) &&
|
||||
iid != vmIntrinsics::_invokeGeneric,
|
||||
err_msg("must be a known MH intrinsic iid=%d: %s", iid, vmIntrinsics::name_at(iid)));
|
||||
"must be a known MH intrinsic iid=%d: %s", iid, vmIntrinsics::name_at(iid));
|
||||
|
||||
unsigned int hash = invoke_method_table()->compute_hash(signature, iid);
|
||||
int index = invoke_method_table()->hash_to_index(hash);
|
||||
@ -2390,7 +2390,7 @@ static bool is_always_visible_class(oop mirror) {
|
||||
if (klass->oop_is_typeArray()) {
|
||||
return true; // primitive array
|
||||
}
|
||||
assert(klass->oop_is_instance(), klass->external_name());
|
||||
assert(klass->oop_is_instance(), "%s", klass->external_name());
|
||||
return klass->is_public() &&
|
||||
(InstanceKlass::cast(klass)->is_same_class_package(SystemDictionary::Object_klass()) || // java.lang
|
||||
InstanceKlass::cast(klass)->is_same_class_package(SystemDictionary::MethodHandle_klass())); // java.lang.invoke
|
||||
@ -2443,7 +2443,7 @@ Handle SystemDictionary::find_method_handle_type(Symbol* signature,
|
||||
mirror = ss.as_java_mirror(class_loader, protection_domain,
|
||||
SignatureStream::NCDFError, CHECK_(empty));
|
||||
}
|
||||
assert(!oopDesc::is_null(mirror), ss.as_symbol(THREAD)->as_C_string());
|
||||
assert(!oopDesc::is_null(mirror), "%s", ss.as_symbol(THREAD)->as_C_string());
|
||||
if (ss.at_return_type())
|
||||
rt = Handle(THREAD, mirror);
|
||||
else
|
||||
|
@ -365,7 +365,7 @@ CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool strict) {
|
||||
// Possibly wakes up the sweeper thread.
|
||||
NMethodSweeper::notify(code_blob_type);
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
assert(size > 0, err_msg_res("Code cache allocation request must be > 0 but is %d", size));
|
||||
assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
|
||||
if (size <= 0) {
|
||||
return NULL;
|
||||
}
|
||||
@ -817,7 +817,7 @@ double CodeCache::reverse_free_ratio(int code_blob_type) {
|
||||
double max_capacity = (double)heap->max_capacity();
|
||||
double result = max_capacity / unallocated_capacity;
|
||||
assert (max_capacity >= unallocated_capacity, "Must be");
|
||||
assert (result >= 1.0, err_msg_res("reverse_free_ratio must be at least 1. It is %f", result));
|
||||
assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -400,7 +400,7 @@ int Dependencies::dep_args(Dependencies::DepType dept) {
|
||||
}
|
||||
|
||||
void Dependencies::check_valid_dependency_type(DepType dept) {
|
||||
guarantee(FIRST_TYPE <= dept && dept < TYPE_LIMIT, err_msg("invalid dependency type: %d", (int) dept));
|
||||
guarantee(FIRST_TYPE <= dept && dept < TYPE_LIMIT, "invalid dependency type: %d", (int) dept);
|
||||
}
|
||||
|
||||
// for the sake of the compiler log, print out current dependencies:
|
||||
|
@ -225,6 +225,6 @@ void ImplicitExceptionTable::verify(nmethod *nm) const {
|
||||
for (uint i = 0; i < len(); i++) {
|
||||
if ((*adr(i) > (unsigned int)nm->insts_size()) ||
|
||||
(*(adr(i)+1) > (unsigned int)nm->insts_size()))
|
||||
fatal(err_msg("Invalid offset in ImplicitExceptionTable at " PTR_FORMAT, _data));
|
||||
fatal("Invalid offset in ImplicitExceptionTable at " PTR_FORMAT, _data);
|
||||
}
|
||||
}
|
||||
|
@ -1709,7 +1709,7 @@ static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address add
|
||||
// Clean inline caches pointing to both zombie and not_entrant methods
|
||||
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
|
||||
ic->set_to_clean();
|
||||
assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
|
||||
assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string());
|
||||
}
|
||||
}
|
||||
|
||||
@ -2540,7 +2540,7 @@ void nmethod::verify() {
|
||||
ResourceMark rm;
|
||||
|
||||
if (!CodeCache::contains(this)) {
|
||||
fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
|
||||
fatal("nmethod at " INTPTR_FORMAT " not in zone", this);
|
||||
}
|
||||
|
||||
if(is_native_method() )
|
||||
@ -2548,8 +2548,7 @@ void nmethod::verify() {
|
||||
|
||||
nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
|
||||
if (nm != this) {
|
||||
fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
|
||||
this));
|
||||
fatal("findNMethod did not find this nmethod (" INTPTR_FORMAT ")", this);
|
||||
}
|
||||
|
||||
for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
|
||||
|
@ -67,7 +67,7 @@ StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
|
||||
intptr_t size = round_to(buffer_size, 2*BytesPerWord);
|
||||
BufferBlob* blob = BufferBlob::create(name, size);
|
||||
if( blob == NULL) {
|
||||
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, err_msg("CodeCache: no room for %s", name));
|
||||
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", name);
|
||||
}
|
||||
_stub_interface = stub_interface;
|
||||
_buffer_size = blob->content_size();
|
||||
|
@ -222,9 +222,9 @@ extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int
|
||||
InstanceKlass* ik = InstanceKlass::cast(klass);
|
||||
klassVtable* vt = ik->vtable();
|
||||
ik->print();
|
||||
fatal(err_msg("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", "
|
||||
"index %d (vtable length %d)",
|
||||
(address)receiver, index, vt->length()));
|
||||
fatal("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", "
|
||||
"index %d (vtable length %d)",
|
||||
(address)receiver, index, vt->length());
|
||||
}
|
||||
|
||||
#endif // Product
|
||||
#endif // PRODUCT
|
||||
|
@ -1669,8 +1669,8 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
}
|
||||
if (PrintCompilation) {
|
||||
FormatBufferResource msg = retry_message != NULL ?
|
||||
err_msg_res("COMPILE SKIPPED: %s (%s)", ci_env.failure_reason(), retry_message) :
|
||||
err_msg_res("COMPILE SKIPPED: %s", ci_env.failure_reason());
|
||||
FormatBufferResource("COMPILE SKIPPED: %s (%s)", ci_env.failure_reason(), retry_message) :
|
||||
FormatBufferResource("COMPILE SKIPPED: %s", ci_env.failure_reason());
|
||||
task->print(tty, msg);
|
||||
}
|
||||
} else {
|
||||
|
@ -133,17 +133,17 @@ void AdaptiveFreeList<Chunk>::verify_stats() const {
|
||||
+ _allocation_stats.coal_births() + 1) // Total Production Stock + 1
|
||||
>= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
|
||||
+ (ssize_t)count()), // Total Current Stock + depletion
|
||||
err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
|
||||
" violates Conservation Principle: "
|
||||
"prev_sweep(" SIZE_FORMAT ")"
|
||||
" + split_births(" SIZE_FORMAT ")"
|
||||
" + coal_births(" SIZE_FORMAT ") + 1 >= "
|
||||
" split_deaths(" SIZE_FORMAT ")"
|
||||
" coal_deaths(" SIZE_FORMAT ")"
|
||||
" + count(" SSIZE_FORMAT ")",
|
||||
p2i(this), size(), _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
|
||||
_allocation_stats.coal_births(), _allocation_stats.split_deaths(),
|
||||
_allocation_stats.coal_deaths(), count()));
|
||||
"FreeList " PTR_FORMAT " of size " SIZE_FORMAT
|
||||
" violates Conservation Principle: "
|
||||
"prev_sweep(" SIZE_FORMAT ")"
|
||||
" + split_births(" SIZE_FORMAT ")"
|
||||
" + coal_births(" SIZE_FORMAT ") + 1 >= "
|
||||
" split_deaths(" SIZE_FORMAT ")"
|
||||
" coal_deaths(" SIZE_FORMAT ")"
|
||||
" + count(" SSIZE_FORMAT ")",
|
||||
p2i(this), size(), _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
|
||||
_allocation_stats.coal_births(), _allocation_stats.split_deaths(),
|
||||
_allocation_stats.coal_deaths(), count());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -105,9 +105,9 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
|
||||
ssize_t demand = prev_sweep() - (ssize_t)count + split_births() + coal_births()
|
||||
- split_deaths() - coal_deaths();
|
||||
assert(demand >= 0,
|
||||
err_msg("Demand (" SSIZE_FORMAT ") should be non-negative for "
|
||||
PTR_FORMAT " (size=" SIZE_FORMAT ")",
|
||||
demand, p2i(this), count));
|
||||
"Demand (" SSIZE_FORMAT ") should be non-negative for "
|
||||
PTR_FORMAT " (size=" SIZE_FORMAT ")",
|
||||
demand, p2i(this), count);
|
||||
// Defensive: adjust for imprecision in event counting
|
||||
if (demand < 0) {
|
||||
demand = 0;
|
||||
|
@ -1959,9 +1959,9 @@ void CompactibleFreeListSpace::save_marks() {
|
||||
MemRegion ur = used_region();
|
||||
MemRegion urasm = used_region_at_save_marks();
|
||||
assert(ur.contains(urasm),
|
||||
err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
|
||||
" should contain [" PTR_FORMAT "," PTR_FORMAT ")",
|
||||
p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
|
||||
" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
|
||||
" should contain [" PTR_FORMAT "," PTR_FORMAT ")",
|
||||
p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
|
||||
#endif
|
||||
// inform allocator that promotions should be tracked.
|
||||
assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
|
||||
@ -2875,9 +2875,9 @@ FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, si
|
||||
smallSplitBirth(rem);
|
||||
}
|
||||
assert(n * word_sz == fc->size(),
|
||||
err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
|
||||
SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
|
||||
fc->size(), n, word_sz));
|
||||
"Chunk size " SIZE_FORMAT " is not exactly splittable by "
|
||||
SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
|
||||
fc->size(), n, word_sz);
|
||||
return fc;
|
||||
}
|
||||
|
||||
|
@ -3476,7 +3476,7 @@ class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
|
||||
// been published), so we do not need to check for
|
||||
// uninitialized objects before pushing here.
|
||||
void Par_ConcMarkingClosure::do_oop(oop obj) {
|
||||
assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||
assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
// Check if oop points into the CMS generation
|
||||
// and is not marked
|
||||
@ -6458,7 +6458,7 @@ void SurvivorSpacePrecleanClosure::do_yield_work() {
|
||||
// isMarked() query is "safe".
|
||||
bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
|
||||
// Ignore mark word because we are running concurrent with mutators
|
||||
assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
|
||||
assert(p->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
|
||||
HeapWord* addr = (HeapWord*)p;
|
||||
assert(_span.contains(addr), "we are scanning the CMS generation");
|
||||
bool is_obj_array = false;
|
||||
@ -6893,7 +6893,7 @@ void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
|
||||
}
|
||||
|
||||
void PushAndMarkVerifyClosure::do_oop(oop obj) {
|
||||
assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||
assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
|
||||
// Oop lies in _span and isn't yet grey or black
|
||||
@ -6991,7 +6991,7 @@ void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
|
||||
|
||||
void PushOrMarkClosure::do_oop(oop obj) {
|
||||
// Ignore mark word because we are running concurrent with mutators.
|
||||
assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||
assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
|
||||
// Oop lies in _span and isn't yet grey or black
|
||||
@ -7029,7 +7029,7 @@ void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p)
|
||||
|
||||
void Par_PushOrMarkClosure::do_oop(oop obj) {
|
||||
// Ignore mark word because we are running concurrent with mutators.
|
||||
assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||
assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
||||
// Oop lies in _span and isn't yet grey or black
|
||||
@ -7106,7 +7106,7 @@ void PushAndMarkClosure::do_oop(oop obj) {
|
||||
// path and may be at the end of the global overflow list (so
|
||||
// the mark word may be NULL).
|
||||
assert(obj->is_oop_or_null(true /* ignore mark word */),
|
||||
err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||
"Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
// Check if oop points into the CMS generation
|
||||
// and is not marked
|
||||
@ -7186,7 +7186,7 @@ void Par_PushAndMarkClosure::do_oop(oop obj) {
|
||||
// the debugger, is_oop_or_null(false) may subsequently start
|
||||
// to hold.
|
||||
assert(obj->is_oop_or_null(true),
|
||||
err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||
"Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
// Check if oop points into the CMS generation
|
||||
// and is not marked
|
||||
@ -7423,7 +7423,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
|
||||
// coalesced chunk to the appropriate free list.
|
||||
if (inFreeRange()) {
|
||||
assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
|
||||
err_msg("freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger())));
|
||||
"freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger()));
|
||||
flush_cur_free_chunk(freeFinger(),
|
||||
pointer_delta(addr, freeFinger()));
|
||||
if (CMSTraceSweeper) {
|
||||
@ -7825,10 +7825,10 @@ void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
|
||||
assert(inFreeRange(), "Should only be called if currently in a free range.");
|
||||
HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
|
||||
assert(_sp->used_region().contains(eob - 1),
|
||||
err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
|
||||
" out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
|
||||
" when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
|
||||
p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size));
|
||||
"eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
|
||||
" out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
|
||||
" when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
|
||||
p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
|
||||
if (eob >= _limit) {
|
||||
assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
|
||||
if (CMSTraceSweeper) {
|
||||
|
@ -296,8 +296,8 @@ class ChunkArray: public CHeapObj<mtGC> {
|
||||
|
||||
size_t end() {
|
||||
assert(_index <= capacity(),
|
||||
err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds",
|
||||
_index, _capacity));
|
||||
"_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds",
|
||||
_index, _capacity);
|
||||
return _index;
|
||||
} // exclusive
|
||||
|
||||
@ -322,9 +322,9 @@ class ChunkArray: public CHeapObj<mtGC> {
|
||||
} else {
|
||||
++_overflows;
|
||||
assert(_index == _capacity,
|
||||
err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT
|
||||
"): out of bounds at overflow#" SIZE_FORMAT,
|
||||
_index, _capacity, _overflows));
|
||||
"_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT
|
||||
"): out of bounds at overflow#" SIZE_FORMAT,
|
||||
_index, _capacity, _overflows);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -43,7 +43,7 @@ non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
|
||||
uint n_threads) {
|
||||
assert(n_threads > 0, "expected n_threads > 0");
|
||||
assert(n_threads <= ParallelGCThreads,
|
||||
err_msg("n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads));
|
||||
"n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads);
|
||||
|
||||
// Make sure the LNC array is valid for the space.
|
||||
jbyte** lowest_non_clean;
|
||||
@ -370,18 +370,18 @@ process_chunk_boundaries(Space* sp,
|
||||
- lowest_non_clean_base_chunk_index;
|
||||
if (last_chunk_index_to_check > last_chunk_index) {
|
||||
assert(last_block + last_block_size > used.end(),
|
||||
err_msg("Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]"
|
||||
" does not exceed used.end() = " PTR_FORMAT ","
|
||||
" yet last_chunk_index_to_check " INTPTR_FORMAT
|
||||
" exceeds last_chunk_index " INTPTR_FORMAT,
|
||||
p2i(last_block), p2i(last_block + last_block_size),
|
||||
p2i(used.end()),
|
||||
last_chunk_index_to_check, last_chunk_index));
|
||||
"Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]"
|
||||
" does not exceed used.end() = " PTR_FORMAT ","
|
||||
" yet last_chunk_index_to_check " INTPTR_FORMAT
|
||||
" exceeds last_chunk_index " INTPTR_FORMAT,
|
||||
p2i(last_block), p2i(last_block + last_block_size),
|
||||
p2i(used.end()),
|
||||
last_chunk_index_to_check, last_chunk_index);
|
||||
assert(sp->used_region().end() > used.end(),
|
||||
err_msg("Expansion did not happen: "
|
||||
"[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
|
||||
p2i(sp->used_region().start()), p2i(sp->used_region().end()),
|
||||
p2i(used.start()), p2i(used.end())));
|
||||
"Expansion did not happen: "
|
||||
"[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
|
||||
p2i(sp->used_region().start()), p2i(sp->used_region().end()),
|
||||
p2i(used.start()), p2i(used.end()));
|
||||
NOISY(tty->print_cr(" process_chunk_boundary: heap expanded; explicitly bounding last_chunk");)
|
||||
last_chunk_index_to_check = last_chunk_index;
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
|
||||
} else {
|
||||
res = (PromotedObject*)(_next & next_mask);
|
||||
}
|
||||
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res))));
|
||||
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res)));
|
||||
return res;
|
||||
}
|
||||
inline void setNext(PromotedObject* x) {
|
||||
|
@ -153,10 +153,10 @@ class TestBufferingOopClosure {
|
||||
|
||||
boc.done();
|
||||
|
||||
#define assert_testCount(got, expected) \
|
||||
assert((got) == (expected), \
|
||||
err_msg("Expected: %d, got: %d, when running testCount(%d, %d, %d)", \
|
||||
(got), (expected), num_narrow, num_full, do_oop_order))
|
||||
#define assert_testCount(got, expected) \
|
||||
assert((got) == (expected), \
|
||||
"Expected: %d, got: %d, when running testCount(%d, %d, %d)", \
|
||||
(got), (expected), num_narrow, num_full, do_oop_order)
|
||||
|
||||
assert_testCount(num_narrow, coc.narrow_oop_count());
|
||||
assert_testCount(num_full, coc.full_oop_count());
|
||||
@ -190,11 +190,11 @@ class TestBufferingOopClosure {
|
||||
|
||||
fr.oops_do(&boc, 0);
|
||||
|
||||
#define assert_testIsBufferEmptyOrFull(got, expected) \
|
||||
assert((got) == (expected), \
|
||||
err_msg("Expected: %d, got: %d. testIsBufferEmptyOrFull(%d, %d, %s, %s)", \
|
||||
(got), (expected), num_narrow, num_full, \
|
||||
BOOL_TO_STR(expect_empty), BOOL_TO_STR(expect_full)))
|
||||
#define assert_testIsBufferEmptyOrFull(got, expected) \
|
||||
assert((got) == (expected), \
|
||||
"Expected: %d, got: %d. testIsBufferEmptyOrFull(%d, %d, %s, %s)", \
|
||||
(got), (expected), num_narrow, num_full, \
|
||||
BOOL_TO_STR(expect_empty), BOOL_TO_STR(expect_full))
|
||||
|
||||
assert_testIsBufferEmptyOrFull(expect_empty, boc.is_buffer_empty());
|
||||
assert_testIsBufferEmptyOrFull(expect_full, boc.is_buffer_full());
|
||||
@ -232,8 +232,8 @@ class TestBufferingOopClosure {
|
||||
boc.done();
|
||||
|
||||
assert(boc.is_buffer_empty(),
|
||||
err_msg("Should be empty after call to done(). testEmptyAfterDone(%d, %d)",
|
||||
num_narrow, num_full));
|
||||
"Should be empty after call to done(). testEmptyAfterDone(%d, %d)",
|
||||
num_narrow, num_full);
|
||||
}
|
||||
|
||||
static void testEmptyAfterDone() {
|
||||
|
@ -91,10 +91,8 @@ CollectionSetChooser::CollectionSetChooser() :
|
||||
|
||||
#ifndef PRODUCT
|
||||
void CollectionSetChooser::verify() {
|
||||
guarantee(_end <= regions_length(),
|
||||
err_msg("_end: %u regions length: %u", _end, regions_length()));
|
||||
guarantee(_front <= _end,
|
||||
err_msg("_front: %u _end: %u", _front, _end));
|
||||
guarantee(_end <= regions_length(), "_end: %u regions length: %u", _end, regions_length());
|
||||
guarantee(_front <= _end, "_front: %u _end: %u", _front, _end);
|
||||
uint index = 0;
|
||||
size_t sum_of_reclaimable_bytes = 0;
|
||||
while (index < _front) {
|
||||
@ -108,19 +106,19 @@ void CollectionSetChooser::verify() {
|
||||
guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
|
||||
guarantee(!curr->is_young(), "should not be young!");
|
||||
guarantee(!curr->is_pinned(),
|
||||
err_msg("Pinned region should not be in collection set (index %u)", curr->hrm_index()));
|
||||
"Pinned region should not be in collection set (index %u)", curr->hrm_index());
|
||||
if (prev != NULL) {
|
||||
guarantee(order_regions(prev, curr) != 1,
|
||||
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
|
||||
prev->gc_efficiency(), curr->gc_efficiency()));
|
||||
"GC eff prev: %1.4f GC eff curr: %1.4f",
|
||||
prev->gc_efficiency(), curr->gc_efficiency());
|
||||
}
|
||||
sum_of_reclaimable_bytes += curr->reclaimable_bytes();
|
||||
prev = curr;
|
||||
}
|
||||
guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes,
|
||||
err_msg("reclaimable bytes inconsistent, "
|
||||
"remaining: " SIZE_FORMAT " sum: " SIZE_FORMAT,
|
||||
_remaining_reclaimable_bytes, sum_of_reclaimable_bytes));
|
||||
"reclaimable bytes inconsistent, "
|
||||
"remaining: " SIZE_FORMAT " sum: " SIZE_FORMAT,
|
||||
_remaining_reclaimable_bytes, sum_of_reclaimable_bytes);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
@ -151,7 +149,7 @@ void CollectionSetChooser::sort_regions() {
|
||||
|
||||
void CollectionSetChooser::add_region(HeapRegion* hr) {
|
||||
assert(!hr->is_pinned(),
|
||||
err_msg("Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index()));
|
||||
"Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index());
|
||||
assert(!hr->is_young(), "should not be young!");
|
||||
_regions.append(hr);
|
||||
_end++;
|
||||
|
@ -73,9 +73,7 @@ public:
|
||||
HeapRegion* res = NULL;
|
||||
if (_front < _end) {
|
||||
res = regions_at(_front);
|
||||
assert(res != NULL,
|
||||
err_msg("Unexpected NULL hr in _regions at index %u",
|
||||
_front));
|
||||
assert(res != NULL, "Unexpected NULL hr in _regions at index %u", _front);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -88,9 +86,9 @@ public:
|
||||
assert(_front < _end, "pre-condition");
|
||||
regions_at_put(_front, NULL);
|
||||
assert(hr->reclaimable_bytes() <= _remaining_reclaimable_bytes,
|
||||
err_msg("remaining reclaimable bytes inconsistent "
|
||||
"from region: " SIZE_FORMAT " remaining: " SIZE_FORMAT,
|
||||
hr->reclaimable_bytes(), _remaining_reclaimable_bytes));
|
||||
"remaining reclaimable bytes inconsistent "
|
||||
"from region: " SIZE_FORMAT " remaining: " SIZE_FORMAT,
|
||||
hr->reclaimable_bytes(), _remaining_reclaimable_bytes);
|
||||
_remaining_reclaimable_bytes -= hr->reclaimable_bytes();
|
||||
_front += 1;
|
||||
return hr;
|
||||
|
@ -399,7 +399,7 @@ void CMMarkStack::note_end_of_gc() {
|
||||
// only check this once per GC anyway, so it won't be a performance
|
||||
// issue in any way.
|
||||
guarantee(_saved_index == _index,
|
||||
err_msg("saved index: %d index: %d", _saved_index, _index));
|
||||
"saved index: %d index: %d", _saved_index, _index);
|
||||
_saved_index = -1;
|
||||
}
|
||||
|
||||
@ -794,8 +794,8 @@ void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurren
|
||||
// in a STW phase.
|
||||
assert(!concurrent_marking_in_progress(), "invariant");
|
||||
assert(out_of_regions(),
|
||||
err_msg("only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
|
||||
p2i(_finger), p2i(_heap_end)));
|
||||
"only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
|
||||
p2i(_finger), p2i(_heap_end));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1416,9 +1416,9 @@ public:
|
||||
HeapWord* start = hr->bottom();
|
||||
|
||||
assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
|
||||
err_msg("Preconditions not met - "
|
||||
"start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
|
||||
p2i(start), p2i(ntams), p2i(hr->end())));
|
||||
"Preconditions not met - "
|
||||
"start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
|
||||
p2i(start), p2i(ntams), p2i(hr->end()));
|
||||
|
||||
// Find the first marked object at or after "start".
|
||||
start = _bm->getNextMarkedWordAddress(start, ntams);
|
||||
@ -1718,11 +1718,11 @@ class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
|
||||
}
|
||||
|
||||
assert(end_idx <= _card_bm->size(),
|
||||
err_msg("oob: end_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
|
||||
end_idx, _card_bm->size()));
|
||||
"oob: end_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
|
||||
end_idx, _card_bm->size());
|
||||
assert(start_idx < _card_bm->size(),
|
||||
err_msg("oob: start_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
|
||||
start_idx, _card_bm->size()));
|
||||
"oob: start_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT,
|
||||
start_idx, _card_bm->size());
|
||||
|
||||
_cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
|
||||
}
|
||||
@ -2472,7 +2472,7 @@ private:
|
||||
// object; it could instead have been a stale reference.
|
||||
oop obj = static_cast<oop>(entry);
|
||||
assert(obj->is_oop(true /* ignore mark word */),
|
||||
err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)));
|
||||
"Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj));
|
||||
_task->make_reference_grey(obj, hr);
|
||||
}
|
||||
}
|
||||
@ -2589,9 +2589,9 @@ void ConcurrentMark::checkpointRootsFinalWork() {
|
||||
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|
||||
guarantee(has_overflown() ||
|
||||
satb_mq_set.completed_buffers_num() == 0,
|
||||
err_msg("Invariant: has_overflown = %s, num buffers = %d",
|
||||
BOOL_TO_STR(has_overflown()),
|
||||
satb_mq_set.completed_buffers_num()));
|
||||
"Invariant: has_overflown = %s, num buffers = %d",
|
||||
BOOL_TO_STR(has_overflown()),
|
||||
satb_mq_set.completed_buffers_num());
|
||||
|
||||
print_stats();
|
||||
}
|
||||
@ -2725,11 +2725,11 @@ public:
|
||||
|
||||
void operator()(oop obj) const {
|
||||
guarantee(obj->is_oop(),
|
||||
err_msg("Non-oop " PTR_FORMAT ", phase: %s, info: %d",
|
||||
p2i(obj), _phase, _info));
|
||||
"Non-oop " PTR_FORMAT ", phase: %s, info: %d",
|
||||
p2i(obj), _phase, _info);
|
||||
guarantee(!_g1h->obj_in_cs(obj),
|
||||
err_msg("obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
|
||||
p2i(obj), _phase, _info));
|
||||
"obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
|
||||
p2i(obj), _phase, _info);
|
||||
}
|
||||
};
|
||||
|
||||
@ -2762,8 +2762,8 @@ void ConcurrentMark::verify_no_cset_oops() {
|
||||
// here.
|
||||
HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
|
||||
guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
|
||||
err_msg("global finger: " PTR_FORMAT " region: " HR_FORMAT,
|
||||
p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
|
||||
"global finger: " PTR_FORMAT " region: " HR_FORMAT,
|
||||
p2i(global_finger), HR_FORMAT_PARAMS(global_hr));
|
||||
}
|
||||
|
||||
// Verify the task fingers
|
||||
@ -2776,8 +2776,8 @@ void ConcurrentMark::verify_no_cset_oops() {
|
||||
HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
|
||||
guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
|
||||
!task_hr->in_collection_set(),
|
||||
err_msg("task finger: " PTR_FORMAT " region: " HR_FORMAT,
|
||||
p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
|
||||
"task finger: " PTR_FORMAT " region: " HR_FORMAT,
|
||||
p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2817,10 +2817,10 @@ class AggregateCountDataHRClosure: public HeapRegionClosure {
|
||||
HeapWord* end = hr->end();
|
||||
|
||||
assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
|
||||
err_msg("Preconditions not met - "
|
||||
"start: " PTR_FORMAT ", limit: " PTR_FORMAT ", "
|
||||
"top: " PTR_FORMAT ", end: " PTR_FORMAT,
|
||||
p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
|
||||
"Preconditions not met - "
|
||||
"start: " PTR_FORMAT ", limit: " PTR_FORMAT ", "
|
||||
"top: " PTR_FORMAT ", end: " PTR_FORMAT,
|
||||
p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end()));
|
||||
|
||||
assert(hr->next_marked_bytes() == 0, "Precondition");
|
||||
|
||||
|
@ -197,9 +197,9 @@ inline bool CMBitMapRO::iterate(BitMapClosure* cl) {
|
||||
assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize), \
|
||||
"outside underlying space?"); \
|
||||
assert(G1CollectedHeap::heap()->is_in_exact(addr), \
|
||||
err_msg("Trying to access not available bitmap " PTR_FORMAT \
|
||||
" corresponding to " PTR_FORMAT " (%u)", \
|
||||
p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr)));
|
||||
"Trying to access not available bitmap " PTR_FORMAT \
|
||||
" corresponding to " PTR_FORMAT " (%u)", \
|
||||
p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr));
|
||||
|
||||
inline void CMBitMap::mark(HeapWord* addr) {
|
||||
check_mark(addr);
|
||||
@ -225,8 +225,7 @@ inline bool CMBitMap::parClear(HeapWord* addr) {
|
||||
|
||||
template<typename Fn>
|
||||
inline void CMMarkStack::iterate(Fn fn) {
|
||||
assert(_saved_index == _index,
|
||||
err_msg("saved index: %d index: %d", _saved_index, _index));
|
||||
assert(_saved_index == _index, "saved index: %d index: %d", _saved_index, _index);
|
||||
for (int i = 0; i < _index; ++i) {
|
||||
fn(_base[i]);
|
||||
}
|
||||
@ -385,7 +384,7 @@ inline void CMTask::deal_with_reference(oop obj) {
|
||||
increment_refs_reached();
|
||||
|
||||
HeapWord* objAddr = (HeapWord*) obj;
|
||||
assert(obj->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||
assert(obj->is_oop_or_null(true /* ignore mark word */), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
|
||||
if (_g1h->is_in_g1_reserved(objAddr)) {
|
||||
assert(obj != NULL, "null check is implicit");
|
||||
if (!_nextMarkBitMap->isMarked(objAddr)) {
|
||||
@ -427,9 +426,9 @@ inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
|
||||
// assert that word_size is under an upper bound which is its
|
||||
// containing region's capacity.
|
||||
assert(word_size * HeapWordSize <= hr->capacity(),
|
||||
err_msg("size: " SIZE_FORMAT " capacity: " SIZE_FORMAT " " HR_FORMAT,
|
||||
word_size * HeapWordSize, hr->capacity(),
|
||||
HR_FORMAT_PARAMS(hr)));
|
||||
"size: " SIZE_FORMAT " capacity: " SIZE_FORMAT " " HR_FORMAT,
|
||||
word_size * HeapWordSize, hr->capacity(),
|
||||
HR_FORMAT_PARAMS(hr));
|
||||
|
||||
if (addr < hr->next_top_at_mark_start()) {
|
||||
if (!_nextMarkBitMap->isMarked(addr)) {
|
||||
|
@ -91,7 +91,7 @@ size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region,
|
||||
}
|
||||
|
||||
size_t G1AllocRegion::retire(bool fill_up) {
|
||||
assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
|
||||
assert_alloc_region(_alloc_region != NULL, "not initialized properly");
|
||||
|
||||
size_t result = 0;
|
||||
|
||||
@ -101,15 +101,14 @@ size_t G1AllocRegion::retire(bool fill_up) {
|
||||
// We never have to check whether the active region is empty or not,
|
||||
// and potentially free it if it is, given that it's guaranteed that
|
||||
// it will never be empty.
|
||||
assert(!alloc_region->is_empty(),
|
||||
ar_ext_msg(this, "the alloc region should never be empty"));
|
||||
assert_alloc_region(!alloc_region->is_empty(),
|
||||
"the alloc region should never be empty");
|
||||
|
||||
if (fill_up) {
|
||||
result = fill_up_remaining_space(alloc_region, _bot_updates);
|
||||
}
|
||||
|
||||
assert(alloc_region->used() >= _used_bytes_before,
|
||||
ar_ext_msg(this, "invariant"));
|
||||
assert_alloc_region(alloc_region->used() >= _used_bytes_before, "invariant");
|
||||
size_t allocated_bytes = alloc_region->used() - _used_bytes_before;
|
||||
retire_region(alloc_region, allocated_bytes);
|
||||
_used_bytes_before = 0;
|
||||
@ -122,8 +121,8 @@ size_t G1AllocRegion::retire(bool fill_up) {
|
||||
|
||||
HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
|
||||
bool force) {
|
||||
assert(_alloc_region == _dummy_region, ar_ext_msg(this, "pre-condition"));
|
||||
assert(_used_bytes_before == 0, ar_ext_msg(this, "pre-condition"));
|
||||
assert_alloc_region(_alloc_region == _dummy_region, "pre-condition");
|
||||
assert_alloc_region(_used_bytes_before == 0, "pre-condition");
|
||||
|
||||
trace("attempting region allocation");
|
||||
HeapRegion* new_alloc_region = allocate_new_region(word_size, force);
|
||||
@ -132,7 +131,7 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
|
||||
// Need to do this before the allocation
|
||||
_used_bytes_before = new_alloc_region->used();
|
||||
HeapWord* result = allocate(new_alloc_region, word_size, _bot_updates);
|
||||
assert(result != NULL, ar_ext_msg(this, "the allocation should succeeded"));
|
||||
assert_alloc_region(result != NULL, "the allocation should succeeded");
|
||||
|
||||
OrderAccess::storestore();
|
||||
// Note that we first perform the allocation and then we store the
|
||||
@ -148,17 +147,10 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
|
||||
msg->append("[%s] %s c: %u b: %s r: " PTR_FORMAT " u: " SIZE_FORMAT,
|
||||
_name, message, _count, BOOL_TO_STR(_bot_updates),
|
||||
p2i(_alloc_region), _used_bytes_before);
|
||||
}
|
||||
|
||||
void G1AllocRegion::init() {
|
||||
trace("initializing");
|
||||
assert(_alloc_region == NULL && _used_bytes_before == 0,
|
||||
ar_ext_msg(this, "pre-condition"));
|
||||
assert(_dummy_region != NULL, ar_ext_msg(this, "should have been set"));
|
||||
assert_alloc_region(_alloc_region == NULL && _used_bytes_before == 0, "pre-condition");
|
||||
assert_alloc_region(_dummy_region != NULL, "should have been set");
|
||||
_alloc_region = _dummy_region;
|
||||
_count = 0;
|
||||
trace("initialized");
|
||||
@ -168,11 +160,10 @@ void G1AllocRegion::set(HeapRegion* alloc_region) {
|
||||
trace("setting");
|
||||
// We explicitly check that the region is not empty to make sure we
|
||||
// maintain the "the alloc region cannot be empty" invariant.
|
||||
assert(alloc_region != NULL && !alloc_region->is_empty(),
|
||||
ar_ext_msg(this, "pre-condition"));
|
||||
assert(_alloc_region == _dummy_region &&
|
||||
_used_bytes_before == 0 && _count == 0,
|
||||
ar_ext_msg(this, "pre-condition"));
|
||||
assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition");
|
||||
assert_alloc_region(_alloc_region == _dummy_region &&
|
||||
_used_bytes_before == 0 && _count == 0,
|
||||
"pre-condition");
|
||||
|
||||
_used_bytes_before = alloc_region->used();
|
||||
_alloc_region = alloc_region;
|
||||
@ -184,8 +175,7 @@ void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
|
||||
trace("update");
|
||||
// We explicitly check that the region is not empty to make sure we
|
||||
// maintain the "the alloc region cannot be empty" invariant.
|
||||
assert(alloc_region != NULL && !alloc_region->is_empty(),
|
||||
ar_ext_msg(this, "pre-condition"));
|
||||
assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition");
|
||||
|
||||
_alloc_region = alloc_region;
|
||||
_alloc_region->set_allocation_context(allocation_context());
|
||||
@ -197,8 +187,7 @@ HeapRegion* G1AllocRegion::release() {
|
||||
trace("releasing");
|
||||
HeapRegion* alloc_region = _alloc_region;
|
||||
retire(false /* fill_up */);
|
||||
assert(_alloc_region == _dummy_region,
|
||||
ar_ext_msg(this, "post-condition of retire()"));
|
||||
assert_alloc_region(_alloc_region == _dummy_region, "post-condition of retire()");
|
||||
_alloc_region = NULL;
|
||||
trace("released");
|
||||
return (alloc_region == _dummy_region) ? NULL : alloc_region;
|
||||
|
@ -34,8 +34,6 @@ class G1CollectedHeap;
|
||||
// 0 -> no tracing, 1 -> basic tracing, 2 -> basic + allocation tracing
|
||||
#define G1_ALLOC_REGION_TRACING 0
|
||||
|
||||
class ar_ext_msg;
|
||||
|
||||
// A class that holds a region that is active in satisfying allocation
|
||||
// requests, potentially issued in parallel. When the active region is
|
||||
// full it will be retired and replaced with a new one. The
|
||||
@ -44,7 +42,6 @@ class ar_ext_msg;
|
||||
// replaced.
|
||||
|
||||
class G1AllocRegion VALUE_OBJ_CLASS_SPEC {
|
||||
friend class ar_ext_msg;
|
||||
|
||||
private:
|
||||
// The active allocating region we are currently allocating out
|
||||
@ -131,8 +128,6 @@ private:
|
||||
// to allocate a new region even if the max has been reached.
|
||||
HeapWord* new_alloc_region_and_allocate(size_t word_size, bool force);
|
||||
|
||||
void fill_in_ext_msg(ar_ext_msg* msg, const char* message);
|
||||
|
||||
protected:
|
||||
// Retire the active allocating region. If fill_up is true then make
|
||||
// sure that the region is full before we retire it so that no one
|
||||
@ -278,11 +273,4 @@ public:
|
||||
virtual HeapRegion* release();
|
||||
};
|
||||
|
||||
class ar_ext_msg : public err_msg {
|
||||
public:
|
||||
ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("%s", "") {
|
||||
alloc_region->fill_in_ext_msg(this, message);
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1ALLOCREGION_HPP
|
||||
|
@ -28,10 +28,18 @@
|
||||
#include "gc/g1/g1AllocRegion.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
|
||||
#define assert_alloc_region(p, message) \
|
||||
do { \
|
||||
assert((p), "[%s] %s c: %u b: %s r: " PTR_FORMAT " u: " SIZE_FORMAT, \
|
||||
_name, (message), _count, BOOL_TO_STR(_bot_updates), \
|
||||
p2i(_alloc_region), _used_bytes_before); \
|
||||
} while (0)
|
||||
|
||||
|
||||
inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region,
|
||||
size_t word_size,
|
||||
bool bot_updates) {
|
||||
assert(alloc_region != NULL, err_msg("pre-condition"));
|
||||
assert(alloc_region != NULL, "pre-condition");
|
||||
|
||||
if (!bot_updates) {
|
||||
return alloc_region->allocate_no_bot_updates(word_size);
|
||||
@ -50,8 +58,8 @@ inline HeapWord* G1AllocRegion::par_allocate(HeapRegion* alloc_region,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_word_size,
|
||||
bool bot_updates) {
|
||||
assert(alloc_region != NULL, err_msg("pre-condition"));
|
||||
assert(!alloc_region->is_empty(), err_msg("pre-condition"));
|
||||
assert(alloc_region != NULL, "pre-condition");
|
||||
assert(!alloc_region->is_empty(), "pre-condition");
|
||||
|
||||
if (!bot_updates) {
|
||||
return alloc_region->par_allocate_no_bot_updates(min_word_size, desired_word_size, actual_word_size);
|
||||
@ -69,10 +77,10 @@ inline HeapWord* G1AllocRegion::attempt_allocation(size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_word_size,
|
||||
bool bot_updates) {
|
||||
assert(bot_updates == _bot_updates, ar_ext_msg(this, "pre-condition"));
|
||||
assert_alloc_region(bot_updates == _bot_updates, "pre-condition");
|
||||
|
||||
HeapRegion* alloc_region = _alloc_region;
|
||||
assert(alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
|
||||
assert_alloc_region(alloc_region != NULL, "not initialized properly");
|
||||
|
||||
HeapWord* result = par_allocate(alloc_region, min_word_size, desired_word_size, actual_word_size, bot_updates);
|
||||
if (result != NULL) {
|
||||
@ -113,8 +121,8 @@ inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t min_word_size,
|
||||
|
||||
inline HeapWord* G1AllocRegion::attempt_allocation_force(size_t word_size,
|
||||
bool bot_updates) {
|
||||
assert(bot_updates == _bot_updates, ar_ext_msg(this, "pre-condition"));
|
||||
assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
|
||||
assert_alloc_region(bot_updates == _bot_updates, "pre-condition");
|
||||
assert_alloc_region(_alloc_region != NULL, "not initialized properly");
|
||||
|
||||
trace("forcing alloc", word_size, word_size);
|
||||
HeapWord* result = new_alloc_region_and_allocate(word_size, true /* force */);
|
||||
|
@ -54,7 +54,7 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
||||
HeapRegion* retained_region = *retained_old;
|
||||
*retained_old = NULL;
|
||||
assert(retained_region == NULL || !retained_region->is_archive(),
|
||||
err_msg("Archive region should not be alloc region (index %u)", retained_region->hrm_index()));
|
||||
"Archive region should not be alloc region (index %u)", retained_region->hrm_index());
|
||||
|
||||
// We will discard the current GC alloc region if:
|
||||
// a) it's in the collection set (it can happen!),
|
||||
@ -147,8 +147,8 @@ HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
|
||||
size_t temp = 0;
|
||||
HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, context);
|
||||
assert(result == NULL || temp == word_size,
|
||||
err_msg("Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
|
||||
word_size, temp, p2i(result)));
|
||||
"Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
|
||||
word_size, temp, p2i(result));
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -276,16 +276,16 @@ HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
|
||||
context);
|
||||
|
||||
assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
|
||||
err_msg("Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
|
||||
required_in_plab, plab_word_size, actual_plab_size, p2i(buf)));
|
||||
"Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
|
||||
required_in_plab, plab_word_size, actual_plab_size, p2i(buf));
|
||||
|
||||
if (buf != NULL) {
|
||||
alloc_buf->set_buf(buf, actual_plab_size);
|
||||
|
||||
HeapWord* const obj = alloc_buf->allocate(word_sz);
|
||||
assert(obj != NULL, err_msg("PLAB should have been big enough, tried to allocate "
|
||||
SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,
|
||||
word_sz, required_in_plab, plab_word_size));
|
||||
assert(obj != NULL, "PLAB should have been big enough, tried to allocate "
|
||||
SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,
|
||||
word_sz, required_in_plab, plab_word_size);
|
||||
return obj;
|
||||
}
|
||||
// Otherwise.
|
||||
@ -354,7 +354,7 @@ bool G1ArchiveAllocator::alloc_new_region() {
|
||||
if (hr == NULL) {
|
||||
return false;
|
||||
}
|
||||
assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index()));
|
||||
assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
|
||||
hr->set_archive();
|
||||
_g1h->old_set_add(hr);
|
||||
_g1h->hr_printer()->alloc(hr, G1HRPrinter::Archive);
|
||||
@ -383,15 +383,15 @@ HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
|
||||
}
|
||||
HeapWord* old_top = _allocation_region->top();
|
||||
assert(_bottom >= _allocation_region->bottom(),
|
||||
err_msg("inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
|
||||
p2i(_bottom), p2i(_allocation_region->bottom())));
|
||||
"inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
|
||||
p2i(_bottom), p2i(_allocation_region->bottom()));
|
||||
assert(_max <= _allocation_region->end(),
|
||||
err_msg("inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
|
||||
p2i(_max), p2i(_allocation_region->end())));
|
||||
"inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
|
||||
p2i(_max), p2i(_allocation_region->end()));
|
||||
assert(_bottom <= old_top && old_top <= _max,
|
||||
err_msg("inconsistent allocation state: expected "
|
||||
PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
|
||||
p2i(_bottom), p2i(old_top), p2i(_max)));
|
||||
"inconsistent allocation state: expected "
|
||||
PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
|
||||
p2i(_bottom), p2i(old_top), p2i(_max));
|
||||
|
||||
// Allocate the next word_size words in the current allocation chunk.
|
||||
// If allocation would cross the _max boundary, insert a filler and begin
|
||||
@ -430,9 +430,9 @@ HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
|
||||
void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes) {
|
||||
assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
|
||||
err_msg("alignment " SIZE_FORMAT " too large", end_alignment_in_bytes));
|
||||
"alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
|
||||
assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
|
||||
err_msg("alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize));
|
||||
"alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
|
||||
|
||||
// If we've allocated nothing, simply return.
|
||||
if (_allocation_region == NULL) {
|
||||
@ -465,8 +465,8 @@ void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
|
||||
// MemRegions to the GrowableArray provided by the caller.
|
||||
int index = _allocated_regions.length() - 1;
|
||||
assert(_allocated_regions.at(index) == _allocation_region,
|
||||
err_msg("expected region %u at end of array, found %u",
|
||||
_allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()));
|
||||
"expected region %u at end of array, found %u",
|
||||
_allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
|
||||
HeapWord* base_address = _allocation_region->bottom();
|
||||
HeapWord* top = base_address;
|
||||
|
||||
@ -482,7 +482,7 @@ void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
|
||||
index = index - 1;
|
||||
}
|
||||
|
||||
assert(top != base_address, err_msg("zero-sized range, address " PTR_FORMAT, p2i(base_address)));
|
||||
assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address));
|
||||
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
|
||||
_allocated_regions.clear();
|
||||
_allocation_region = NULL;
|
||||
|
@ -295,9 +295,9 @@ public:
|
||||
|
||||
virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
|
||||
assert(dest.is_valid(),
|
||||
err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
|
||||
"Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value());
|
||||
assert(_alloc_buffers[dest.value()] != NULL,
|
||||
err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
|
||||
"Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value());
|
||||
return _alloc_buffers[dest.value()];
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ HeapWord* G1Allocator::attempt_allocation(size_t word_size, AllocationContext_t
|
||||
HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size, AllocationContext_t context) {
|
||||
HeapWord* result = mutator_alloc_region(context)->attempt_allocation_locked(word_size, false /* bot_updates */);
|
||||
assert(result != NULL || mutator_alloc_region(context)->get() == NULL,
|
||||
err_msg("Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, p2i(mutator_alloc_region(context)->get())));
|
||||
"Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, p2i(mutator_alloc_region(context)->get()));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -36,19 +36,21 @@ address G1BiasedMappedArrayBase::create_new_base_array(size_t length, size_t ele
|
||||
#ifndef PRODUCT
|
||||
void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
|
||||
guarantee(_base != NULL, "Array not initialized");
|
||||
guarantee(index < length(), err_msg("Index out of bounds index: " SIZE_FORMAT " length: " SIZE_FORMAT, index, length()));
|
||||
guarantee(index < length(), "Index out of bounds index: " SIZE_FORMAT " length: " SIZE_FORMAT, index, length());
|
||||
}
|
||||
|
||||
void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const {
|
||||
guarantee(_biased_base != NULL, "Array not initialized");
|
||||
guarantee(biased_index >= bias() && biased_index < (bias() + length()),
|
||||
err_msg("Biased index out of bounds, index: " SIZE_FORMAT " bias: " SIZE_FORMAT " length: " SIZE_FORMAT, biased_index, bias(), length()));
|
||||
"Biased index out of bounds, index: " SIZE_FORMAT " bias: " SIZE_FORMAT " length: " SIZE_FORMAT,
|
||||
biased_index, bias(), length());
|
||||
}
|
||||
|
||||
void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
|
||||
guarantee(_biased_base != NULL, "Array not initialized");
|
||||
guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
|
||||
err_msg("Biased index out of inclusive bounds, index: " SIZE_FORMAT " bias: " SIZE_FORMAT " length: " SIZE_FORMAT, biased_index, bias(), length()));
|
||||
"Biased index out of inclusive bounds, index: " SIZE_FORMAT " bias: " SIZE_FORMAT " length: " SIZE_FORMAT,
|
||||
biased_index, bias(), length());
|
||||
}
|
||||
|
||||
class TestMappedArray : public G1BiasedMappedArray<int> {
|
||||
@ -65,7 +67,7 @@ public:
|
||||
REGION_SIZE_IN_WORDS * HeapWordSize);
|
||||
// Check address calculation (bounds)
|
||||
assert(array.bottom_address_mapped() == fake_heap,
|
||||
err_msg("bottom mapped address should be " PTR_FORMAT ", but is " PTR_FORMAT, p2i(fake_heap), p2i(array.bottom_address_mapped())));
|
||||
"bottom mapped address should be " PTR_FORMAT ", but is " PTR_FORMAT, p2i(fake_heap), p2i(array.bottom_address_mapped()));
|
||||
assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
|
||||
|
||||
int* bottom = array.address_mapped_to(fake_heap);
|
||||
|
@ -56,7 +56,7 @@ protected:
|
||||
void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) {
|
||||
assert(base != NULL, "just checking");
|
||||
assert(length > 0, "just checking");
|
||||
assert(shift_by < sizeof(uintptr_t) * 8, err_msg("Shifting by %u, larger than word size?", shift_by));
|
||||
assert(shift_by < sizeof(uintptr_t) * 8, "Shifting by %u, larger than word size?", shift_by);
|
||||
_base = base;
|
||||
_length = length;
|
||||
_biased_base = base - (bias * elem_size);
|
||||
@ -69,13 +69,13 @@ protected:
|
||||
void initialize(HeapWord* bottom, HeapWord* end, size_t target_elem_size_in_bytes, size_t mapping_granularity_in_bytes) {
|
||||
assert(mapping_granularity_in_bytes > 0, "just checking");
|
||||
assert(is_power_of_2(mapping_granularity_in_bytes),
|
||||
err_msg("mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes));
|
||||
"mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes);
|
||||
assert((uintptr_t)bottom % mapping_granularity_in_bytes == 0,
|
||||
err_msg("bottom mapping area address must be a multiple of mapping granularity %zd, is " PTR_FORMAT,
|
||||
mapping_granularity_in_bytes, p2i(bottom)));
|
||||
"bottom mapping area address must be a multiple of mapping granularity %zd, is " PTR_FORMAT,
|
||||
mapping_granularity_in_bytes, p2i(bottom));
|
||||
assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
|
||||
err_msg("end mapping area address must be a multiple of mapping granularity %zd, is " PTR_FORMAT,
|
||||
mapping_granularity_in_bytes, p2i(end)));
|
||||
"end mapping area address must be a multiple of mapping granularity %zd, is " PTR_FORMAT,
|
||||
mapping_granularity_in_bytes, p2i(end));
|
||||
size_t num_target_elems = pointer_delta(end, bottom, mapping_granularity_in_bytes);
|
||||
idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
|
||||
address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
|
||||
|
@ -69,14 +69,14 @@ bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
|
||||
#ifdef ASSERT
|
||||
void G1BlockOffsetSharedArray::check_index(size_t index, const char* msg) const {
|
||||
assert((index) < (_reserved.word_size() >> LogN_words),
|
||||
err_msg("%s - index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT,
|
||||
msg, (index), (_reserved.word_size() >> LogN_words)));
|
||||
"%s - index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT,
|
||||
msg, (index), (_reserved.word_size() >> LogN_words));
|
||||
assert(G1CollectedHeap::heap()->is_in_exact(address_for_index_raw(index)),
|
||||
err_msg("Index " SIZE_FORMAT " corresponding to " PTR_FORMAT
|
||||
" (%u) is not in committed area.",
|
||||
(index),
|
||||
p2i(address_for_index_raw(index)),
|
||||
G1CollectedHeap::heap()->addr_to_region(address_for_index_raw(index))));
|
||||
"Index " SIZE_FORMAT " corresponding to " PTR_FORMAT
|
||||
" (%u) is not in committed area.",
|
||||
(index),
|
||||
p2i(address_for_index_raw(index)),
|
||||
G1CollectedHeap::heap()->addr_to_region(address_for_index_raw(index)));
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
@ -192,27 +192,27 @@ void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) con
|
||||
u_char entry = _array->offset_array(c);
|
||||
if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
|
||||
guarantee(entry > N_words,
|
||||
err_msg("Should be in logarithmic region - "
|
||||
"entry: %u, "
|
||||
"_array->offset_array(c): %u, "
|
||||
"N_words: %u",
|
||||
(uint)entry, (uint)_array->offset_array(c), (uint)N_words));
|
||||
"Should be in logarithmic region - "
|
||||
"entry: %u, "
|
||||
"_array->offset_array(c): %u, "
|
||||
"N_words: %u",
|
||||
(uint)entry, (uint)_array->offset_array(c), (uint)N_words);
|
||||
}
|
||||
size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
|
||||
size_t landing_card = c - backskip;
|
||||
guarantee(landing_card >= (start_card - 1), "Inv");
|
||||
if (landing_card >= start_card) {
|
||||
guarantee(_array->offset_array(landing_card) <= entry,
|
||||
err_msg("Monotonicity - landing_card offset: %u, "
|
||||
"entry: %u",
|
||||
(uint)_array->offset_array(landing_card), (uint)entry));
|
||||
"Monotonicity - landing_card offset: %u, "
|
||||
"entry: %u",
|
||||
(uint)_array->offset_array(landing_card), (uint)entry);
|
||||
} else {
|
||||
guarantee(landing_card == start_card - 1, "Tautology");
|
||||
// Note that N_words is the maximum offset value
|
||||
guarantee(_array->offset_array(landing_card) <= N_words,
|
||||
err_msg("landing card offset: %u, "
|
||||
"N_words: %u",
|
||||
(uint)_array->offset_array(landing_card), (uint)N_words));
|
||||
"landing card offset: %u, "
|
||||
"N_words: %u",
|
||||
(uint)_array->offset_array(landing_card), (uint)N_words);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -271,9 +271,9 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
|
||||
HeapWord* next_boundary = _array->address_for_index(n_index) +
|
||||
(n_index == next_index ? 0 : N_words);
|
||||
assert(next_boundary <= _array->_end,
|
||||
err_msg("next_boundary is beyond the end of the covered region "
|
||||
" next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
|
||||
p2i(next_boundary), p2i(_array->_end)));
|
||||
"next_boundary is beyond the end of the covered region "
|
||||
" next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
|
||||
p2i(next_boundary), p2i(_array->_end));
|
||||
if (addr >= gsp()->top()) return gsp()->top();
|
||||
while (next_boundary < addr) {
|
||||
while (n <= next_boundary) {
|
||||
@ -361,25 +361,23 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
|
||||
// is checked by an assertion above.
|
||||
size_t start_index = _array->index_for(blk_start);
|
||||
HeapWord* boundary = _array->address_for_index(start_index);
|
||||
assert((_array->offset_array(orig_index) == 0 &&
|
||||
blk_start == boundary) ||
|
||||
(_array->offset_array(orig_index) > 0 &&
|
||||
_array->offset_array(orig_index) <= N_words),
|
||||
err_msg("offset array should have been set - "
|
||||
"orig_index offset: %u, "
|
||||
"blk_start: " PTR_FORMAT ", "
|
||||
"boundary: " PTR_FORMAT,
|
||||
(uint)_array->offset_array(orig_index),
|
||||
p2i(blk_start), p2i(boundary)));
|
||||
assert((_array->offset_array(orig_index) == 0 && blk_start == boundary) ||
|
||||
(_array->offset_array(orig_index) > 0 && _array->offset_array(orig_index) <= N_words),
|
||||
"offset array should have been set - "
|
||||
"orig_index offset: %u, "
|
||||
"blk_start: " PTR_FORMAT ", "
|
||||
"boundary: " PTR_FORMAT,
|
||||
(uint)_array->offset_array(orig_index),
|
||||
p2i(blk_start), p2i(boundary));
|
||||
for (size_t j = orig_index + 1; j <= end_index; j++) {
|
||||
assert(_array->offset_array(j) > 0 &&
|
||||
_array->offset_array(j) <=
|
||||
(u_char) (N_words+BlockOffsetArray::N_powers-1),
|
||||
err_msg("offset array should have been set - "
|
||||
"%u not > 0 OR %u not <= %u",
|
||||
(uint) _array->offset_array(j),
|
||||
(uint) _array->offset_array(j),
|
||||
(uint) (N_words+BlockOffsetArray::N_powers-1)));
|
||||
"offset array should have been set - "
|
||||
"%u not > 0 OR %u not <= %u",
|
||||
(uint) _array->offset_array(j),
|
||||
(uint) _array->offset_array(j),
|
||||
(uint) (N_words+BlockOffsetArray::N_powers-1));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -402,8 +400,8 @@ void G1BlockOffsetArray::verify() const {
|
||||
size_t obj_size = block_size(obj);
|
||||
obj_end = obj + obj_size;
|
||||
guarantee(obj_end > obj && obj_end <= gsp()->top(),
|
||||
err_msg("Invalid object end. obj: " PTR_FORMAT " obj_size: " SIZE_FORMAT " obj_end: " PTR_FORMAT " top: " PTR_FORMAT,
|
||||
p2i(obj), obj_size, p2i(obj_end), p2i(gsp()->top())));
|
||||
"Invalid object end. obj: " PTR_FORMAT " obj_size: " SIZE_FORMAT " obj_end: " PTR_FORMAT " top: " PTR_FORMAT,
|
||||
p2i(obj), obj_size, p2i(obj_end), p2i(gsp()->top()));
|
||||
}
|
||||
} else {
|
||||
// Because we refine the BOT based on which cards are dirty there is not much we can verify here.
|
||||
@ -414,13 +412,13 @@ void G1BlockOffsetArray::verify() const {
|
||||
|
||||
size_t max_backskip = current_card - start_card;
|
||||
guarantee(backskip <= max_backskip,
|
||||
err_msg("Going backwards beyond the start_card. start_card: " SIZE_FORMAT " current_card: " SIZE_FORMAT " backskip: " SIZE_FORMAT,
|
||||
start_card, current_card, backskip));
|
||||
"Going backwards beyond the start_card. start_card: " SIZE_FORMAT " current_card: " SIZE_FORMAT " backskip: " SIZE_FORMAT,
|
||||
start_card, current_card, backskip);
|
||||
|
||||
HeapWord* backskip_address = _array->address_for_index(current_card - backskip);
|
||||
guarantee(backskip_address >= gsp()->bottom(),
|
||||
err_msg("Going backwards beyond bottom of the region: bottom: " PTR_FORMAT ", backskip_address: " PTR_FORMAT,
|
||||
p2i(gsp()->bottom()), p2i(backskip_address)));
|
||||
"Going backwards beyond bottom of the region: bottom: " PTR_FORMAT ", backskip_address: " PTR_FORMAT,
|
||||
p2i(gsp()->bottom()), p2i(backskip_address));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -80,8 +80,8 @@ public:
|
||||
|
||||
virtual void set_bottom(HeapWord* new_bottom) {
|
||||
assert(new_bottom <= _end,
|
||||
err_msg("new_bottom (" PTR_FORMAT ") > _end (" PTR_FORMAT ")",
|
||||
p2i(new_bottom), p2i(_end)));
|
||||
"new_bottom (" PTR_FORMAT ") > _end (" PTR_FORMAT ")",
|
||||
p2i(new_bottom), p2i(_end));
|
||||
_bottom = new_bottom;
|
||||
resize(pointer_delta(_end, _bottom));
|
||||
}
|
||||
@ -149,9 +149,8 @@ private:
|
||||
|
||||
void check_offset(size_t offset, const char* msg) const {
|
||||
assert(offset <= N_words,
|
||||
err_msg("%s - "
|
||||
"offset: " SIZE_FORMAT ", N_words: %u",
|
||||
msg, offset, (uint)N_words));
|
||||
"%s - offset: " SIZE_FORMAT ", N_words: %u",
|
||||
msg, offset, (uint)N_words);
|
||||
}
|
||||
|
||||
// Bounds checking accessors:
|
||||
|
@ -81,8 +81,8 @@ inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
|
||||
char* pc = (char*)p;
|
||||
assert(pc >= (char*)_reserved.start() &&
|
||||
pc < (char*)_reserved.end(),
|
||||
err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
p2i(p), p2i(_reserved.start()), p2i(_reserved.end())));
|
||||
"p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
p2i(p), p2i(_reserved.start()), p2i(_reserved.end()));
|
||||
size_t result = index_for_raw(p);
|
||||
check_index(result, "bad index from address");
|
||||
return result;
|
||||
@ -93,10 +93,9 @@ G1BlockOffsetSharedArray::address_for_index(size_t index) const {
|
||||
check_index(index, "index out of range");
|
||||
HeapWord* result = address_for_index_raw(index);
|
||||
assert(result >= _reserved.start() && result < _reserved.end(),
|
||||
err_msg("bad address from index result " PTR_FORMAT
|
||||
" _reserved.start() " PTR_FORMAT " _reserved.end() "
|
||||
PTR_FORMAT,
|
||||
p2i(result), p2i(_reserved.start()), p2i(_reserved.end())));
|
||||
"bad address from index result " PTR_FORMAT
|
||||
" _reserved.start() " PTR_FORMAT " _reserved.end() " PTR_FORMAT,
|
||||
p2i(result), p2i(_reserved.start()), p2i(_reserved.end()));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -53,8 +53,8 @@ size_t G1CardCounts::heap_map_factor() {
|
||||
void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
|
||||
if (has_count_table()) {
|
||||
assert(from_card_num < to_card_num,
|
||||
err_msg("Wrong order? from: " SIZE_FORMAT ", to: " SIZE_FORMAT,
|
||||
from_card_num, to_card_num));
|
||||
"Wrong order? from: " SIZE_FORMAT ", to: " SIZE_FORMAT,
|
||||
from_card_num, to_card_num);
|
||||
Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num));
|
||||
}
|
||||
}
|
||||
@ -96,8 +96,8 @@ uint G1CardCounts::add_card_count(jbyte* card_ptr) {
|
||||
if (has_count_table()) {
|
||||
size_t card_num = ptr_2_card_num(card_ptr);
|
||||
assert(card_num < _reserved_max_card_num,
|
||||
err_msg("Card " SIZE_FORMAT " outside of card counts table (max size " SIZE_FORMAT ")",
|
||||
card_num, _reserved_max_card_num));
|
||||
"Card " SIZE_FORMAT " outside of card counts table (max size " SIZE_FORMAT ")",
|
||||
card_num, _reserved_max_card_num);
|
||||
count = (uint) _card_counts[card_num];
|
||||
if (count < G1ConcRSHotCardLimit) {
|
||||
_card_counts[card_num] =
|
||||
|
@ -79,19 +79,19 @@ class G1CardCounts: public CHeapObj<mtGC> {
|
||||
|
||||
size_t ptr_2_card_num(const jbyte* card_ptr) {
|
||||
assert(card_ptr >= _ct_bot,
|
||||
err_msg("Invalid card pointer: "
|
||||
"card_ptr: " PTR_FORMAT ", "
|
||||
"_ct_bot: " PTR_FORMAT,
|
||||
p2i(card_ptr), p2i(_ct_bot)));
|
||||
"Invalid card pointer: "
|
||||
"card_ptr: " PTR_FORMAT ", "
|
||||
"_ct_bot: " PTR_FORMAT,
|
||||
p2i(card_ptr), p2i(_ct_bot));
|
||||
size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
|
||||
assert(card_num < _reserved_max_card_num,
|
||||
err_msg("card pointer out of range: " PTR_FORMAT, p2i(card_ptr)));
|
||||
"card pointer out of range: " PTR_FORMAT, p2i(card_ptr));
|
||||
return card_num;
|
||||
}
|
||||
|
||||
jbyte* card_num_2_ptr(size_t card_num) {
|
||||
assert(card_num < _reserved_max_card_num,
|
||||
err_msg("card num out of range: " SIZE_FORMAT, card_num));
|
||||
"card num out of range: " SIZE_FORMAT, card_num);
|
||||
return (jbyte*) (_ct_bot + card_num);
|
||||
}
|
||||
|
||||
|
@ -352,11 +352,11 @@ class G1CodeRootSetTest {
|
||||
assert(set1.is_empty(), "Code root set must be initially empty but is not.");
|
||||
|
||||
assert(G1CodeRootSet::static_mem_size() == sizeof(void*),
|
||||
err_msg("The code root set's static memory usage is incorrect, " SIZE_FORMAT " bytes", G1CodeRootSet::static_mem_size()));
|
||||
"The code root set's static memory usage is incorrect, " SIZE_FORMAT " bytes", G1CodeRootSet::static_mem_size());
|
||||
|
||||
set1.add((nmethod*)1);
|
||||
assert(set1.length() == 1, err_msg("Added exactly one element, but set contains "
|
||||
SIZE_FORMAT " elements", set1.length()));
|
||||
assert(set1.length() == 1, "Added exactly one element, but set contains "
|
||||
SIZE_FORMAT " elements", set1.length());
|
||||
|
||||
const size_t num_to_add = (size_t)G1CodeRootSet::Threshold + 1;
|
||||
|
||||
@ -364,16 +364,16 @@ class G1CodeRootSetTest {
|
||||
set1.add((nmethod*)1);
|
||||
}
|
||||
assert(set1.length() == 1,
|
||||
err_msg("Duplicate detection should not have increased the set size but "
|
||||
"is " SIZE_FORMAT, set1.length()));
|
||||
"Duplicate detection should not have increased the set size but "
|
||||
"is " SIZE_FORMAT, set1.length());
|
||||
|
||||
for (size_t i = 2; i <= num_to_add; i++) {
|
||||
set1.add((nmethod*)(uintptr_t)(i));
|
||||
}
|
||||
assert(set1.length() == num_to_add,
|
||||
err_msg("After adding in total " SIZE_FORMAT " distinct code roots, they "
|
||||
"need to be in the set, but there are only " SIZE_FORMAT,
|
||||
num_to_add, set1.length()));
|
||||
"After adding in total " SIZE_FORMAT " distinct code roots, they "
|
||||
"need to be in the set, but there are only " SIZE_FORMAT,
|
||||
num_to_add, set1.length());
|
||||
|
||||
assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
|
||||
|
||||
@ -387,8 +387,8 @@ class G1CodeRootSetTest {
|
||||
}
|
||||
}
|
||||
assert(num_popped == num_to_add,
|
||||
err_msg("Managed to pop " SIZE_FORMAT " code roots, but only " SIZE_FORMAT " "
|
||||
"were added", num_popped, num_to_add));
|
||||
"Managed to pop " SIZE_FORMAT " code roots, but only " SIZE_FORMAT " "
|
||||
"were added", num_popped, num_to_add);
|
||||
assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
|
||||
|
||||
G1CodeRootSet::purge();
|
||||
|
@ -971,11 +971,11 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||
size_t commits = 0;
|
||||
|
||||
guarantee(reserved.contains(start_address) && reserved.contains(last_address),
|
||||
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||
p2i(start_address), p2i(last_address)));
|
||||
"MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||
p2i(start_address), p2i(last_address));
|
||||
guarantee(start_address > prev_last_addr,
|
||||
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||
p2i(start_address), p2i(prev_last_addr)));
|
||||
"Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||
p2i(start_address), p2i(prev_last_addr));
|
||||
prev_last_addr = last_address;
|
||||
|
||||
// Check for ranges that start in the same G1 region in which the previous
|
||||
@ -1017,7 +1017,7 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||
|
||||
while (curr_region != NULL) {
|
||||
assert(curr_region->is_empty() && !curr_region->is_pinned(),
|
||||
err_msg("Region already in use (index %u)", curr_region->hrm_index()));
|
||||
"Region already in use (index %u)", curr_region->hrm_index());
|
||||
_hr_printer.alloc(curr_region, G1HRPrinter::Archive);
|
||||
curr_region->set_allocation_context(AllocationContext::system());
|
||||
curr_region->set_archive();
|
||||
@ -1055,11 +1055,11 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
|
||||
HeapWord* last_address = ranges[i].last();
|
||||
|
||||
assert(reserved.contains(start_address) && reserved.contains(last_address),
|
||||
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||
p2i(start_address), p2i(last_address)));
|
||||
"MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||
p2i(start_address), p2i(last_address));
|
||||
assert(start_address > prev_last_addr,
|
||||
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||
p2i(start_address), p2i(prev_last_addr)));
|
||||
"Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||
p2i(start_address), p2i(prev_last_addr));
|
||||
|
||||
HeapRegion* start_region = _hrm.addr_to_region(start_address);
|
||||
HeapRegion* last_region = _hrm.addr_to_region(last_address);
|
||||
@ -1076,7 +1076,7 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
|
||||
HeapRegion* curr_region = start_region;
|
||||
while (curr_region != NULL) {
|
||||
guarantee(curr_region->is_archive(),
|
||||
err_msg("Expected archive region at index %u", curr_region->hrm_index()));
|
||||
"Expected archive region at index %u", curr_region->hrm_index());
|
||||
if (curr_region != last_region) {
|
||||
curr_region = _hrm.next_region_in_heap(curr_region);
|
||||
} else {
|
||||
@ -1139,11 +1139,11 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||
HeapWord* last_address = ranges[i].last();
|
||||
|
||||
assert(reserved.contains(start_address) && reserved.contains(last_address),
|
||||
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||
p2i(start_address), p2i(last_address)));
|
||||
"MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||
p2i(start_address), p2i(last_address));
|
||||
assert(start_address > prev_last_addr,
|
||||
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||
p2i(start_address), p2i(prev_last_addr)));
|
||||
"Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||
p2i(start_address), p2i(prev_last_addr));
|
||||
size_used += ranges[i].byte_size();
|
||||
prev_last_addr = last_address;
|
||||
|
||||
@ -1168,7 +1168,7 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||
HeapRegion* curr_region = start_region;
|
||||
while (curr_region != NULL) {
|
||||
guarantee(curr_region->is_archive(),
|
||||
err_msg("Expected archive region at index %u", curr_region->hrm_index()));
|
||||
"Expected archive region at index %u", curr_region->hrm_index());
|
||||
uint curr_index = curr_region->hrm_index();
|
||||
_old_set.remove(curr_region);
|
||||
curr_region->set_free();
|
||||
@ -1755,9 +1755,9 @@ resize_if_necessary_after_full_collection(size_t word_size) {
|
||||
// This assert only makes sense here, before we adjust them
|
||||
// with respect to the min and max heap size.
|
||||
assert(minimum_desired_capacity <= maximum_desired_capacity,
|
||||
err_msg("minimum_desired_capacity = " SIZE_FORMAT ", "
|
||||
"maximum_desired_capacity = " SIZE_FORMAT,
|
||||
minimum_desired_capacity, maximum_desired_capacity));
|
||||
"minimum_desired_capacity = " SIZE_FORMAT ", "
|
||||
"maximum_desired_capacity = " SIZE_FORMAT,
|
||||
minimum_desired_capacity, maximum_desired_capacity);
|
||||
|
||||
// Should not be greater than the heap max size. No need to adjust
|
||||
// it with respect to the heap min size as it's a lower bound (i.e.,
|
||||
@ -2538,9 +2538,9 @@ void G1CollectedHeap::allocate_dummy_regions() {
|
||||
|
||||
void G1CollectedHeap::increment_old_marking_cycles_started() {
|
||||
assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
|
||||
_old_marking_cycles_started == _old_marking_cycles_completed + 1,
|
||||
err_msg("Wrong marking cycle count (started: %d, completed: %d)",
|
||||
_old_marking_cycles_started, _old_marking_cycles_completed));
|
||||
_old_marking_cycles_started == _old_marking_cycles_completed + 1,
|
||||
"Wrong marking cycle count (started: %d, completed: %d)",
|
||||
_old_marking_cycles_started, _old_marking_cycles_completed);
|
||||
|
||||
_old_marking_cycles_started++;
|
||||
}
|
||||
@ -2564,17 +2564,17 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
|
||||
assert(concurrent ||
|
||||
(_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
|
||||
(_old_marking_cycles_started == _old_marking_cycles_completed + 2),
|
||||
err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
|
||||
"is inconsistent with _old_marking_cycles_completed = %u",
|
||||
_old_marking_cycles_started, _old_marking_cycles_completed));
|
||||
"for inner caller (Full GC): _old_marking_cycles_started = %u "
|
||||
"is inconsistent with _old_marking_cycles_completed = %u",
|
||||
_old_marking_cycles_started, _old_marking_cycles_completed);
|
||||
|
||||
// This is the case for the outer caller, i.e. the concurrent cycle.
|
||||
assert(!concurrent ||
|
||||
(_old_marking_cycles_started == _old_marking_cycles_completed + 1),
|
||||
err_msg("for outer caller (concurrent cycle): "
|
||||
"_old_marking_cycles_started = %u "
|
||||
"is inconsistent with _old_marking_cycles_completed = %u",
|
||||
_old_marking_cycles_started, _old_marking_cycles_completed));
|
||||
"for outer caller (concurrent cycle): "
|
||||
"_old_marking_cycles_started = %u "
|
||||
"is inconsistent with _old_marking_cycles_completed = %u",
|
||||
_old_marking_cycles_started, _old_marking_cycles_completed);
|
||||
|
||||
_old_marking_cycles_completed += 1;
|
||||
|
||||
@ -3124,7 +3124,7 @@ class VerifyKlassClosure: public KlassClosure {
|
||||
_young_ref_counter_closure.reset_count();
|
||||
k->oops_do(&_young_ref_counter_closure);
|
||||
if (_young_ref_counter_closure.count() > 0) {
|
||||
guarantee(k->has_modified_oops(), err_msg("Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k)));
|
||||
guarantee(k->has_modified_oops(), "Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k));
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -3194,8 +3194,8 @@ public:
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj),
|
||||
err_msg("Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
|
||||
p2i(p), p2i(obj)));
|
||||
"Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
|
||||
p2i(p), p2i(obj));
|
||||
}
|
||||
};
|
||||
|
||||
@ -4709,11 +4709,11 @@ public:
|
||||
|
||||
~G1StringSymbolTableUnlinkTask() {
|
||||
guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
|
||||
err_msg("claim value %d after unlink less than initial string table size %d",
|
||||
StringTable::parallel_claimed_index(), _initial_string_table_size));
|
||||
"claim value %d after unlink less than initial string table size %d",
|
||||
StringTable::parallel_claimed_index(), _initial_string_table_size);
|
||||
guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
|
||||
err_msg("claim value %d after unlink less than initial symbol table size %d",
|
||||
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
|
||||
"claim value %d after unlink less than initial symbol table size %d",
|
||||
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
|
||||
|
||||
if (G1TraceStringSymbolTableScrubbing) {
|
||||
gclog_or_tty->print_cr("Cleaned string and symbol table, "
|
||||
@ -5113,7 +5113,7 @@ public:
|
||||
} else {
|
||||
assert(!obj->is_forwarded(), "invariant" );
|
||||
assert(cset_state.is_humongous(),
|
||||
err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state.value()));
|
||||
"Only allowed InCSet state is IsHumongous, but is %d", cset_state.value());
|
||||
_g1->set_humongous_is_live(obj);
|
||||
}
|
||||
}
|
||||
@ -5167,7 +5167,7 @@ public:
|
||||
_par_scan_state->push_on_queue(p);
|
||||
} else {
|
||||
assert(!Metaspace::contains((const void*)p),
|
||||
err_msg("Unexpectedly found a pointer from metadata: " PTR_FORMAT, p2i(p)));
|
||||
"Unexpectedly found a pointer from metadata: " PTR_FORMAT, p2i(p));
|
||||
_copy_non_heap_obj_cl->do_oop(p);
|
||||
}
|
||||
}
|
||||
@ -5823,7 +5823,7 @@ void G1CollectedHeap::verify_dirty_young_regions() {
|
||||
bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
|
||||
HeapWord* tams, HeapWord* end) {
|
||||
guarantee(tams <= end,
|
||||
err_msg("tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end)));
|
||||
"tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
|
||||
HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
|
||||
if (result < end) {
|
||||
gclog_or_tty->cr();
|
||||
@ -6174,9 +6174,8 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
||||
}
|
||||
|
||||
guarantee(obj->is_typeArray(),
|
||||
err_msg("Only eagerly reclaiming type arrays is supported, but the object "
|
||||
PTR_FORMAT " is not.",
|
||||
p2i(r->bottom())));
|
||||
"Only eagerly reclaiming type arrays is supported, but the object "
|
||||
PTR_FORMAT " is not.", p2i(r->bottom()));
|
||||
|
||||
if (G1TraceEagerReclaimHumongousObjects) {
|
||||
gclog_or_tty->print_cr("Dead humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
|
||||
@ -6405,8 +6404,8 @@ void G1CollectedHeap::increase_used(size_t bytes) {
|
||||
|
||||
void G1CollectedHeap::decrease_used(size_t bytes) {
|
||||
assert(_summary_bytes_used >= bytes,
|
||||
err_msg("invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,
|
||||
_summary_bytes_used, bytes));
|
||||
"invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,
|
||||
_summary_bytes_used, bytes);
|
||||
_summary_bytes_used -= bytes;
|
||||
}
|
||||
|
||||
@ -6488,9 +6487,9 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
|
||||
}
|
||||
}
|
||||
assert(used_unlocked() == recalculate_used(),
|
||||
err_msg("inconsistent used_unlocked(), "
|
||||
"value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
|
||||
used_unlocked(), recalculate_used()));
|
||||
"inconsistent used_unlocked(), "
|
||||
"value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
|
||||
used_unlocked(), recalculate_used());
|
||||
}
|
||||
|
||||
void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
|
||||
@ -6631,35 +6630,35 @@ public:
|
||||
if (hr->is_young()) {
|
||||
// TODO
|
||||
} else if (hr->is_starts_humongous()) {
|
||||
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
|
||||
assert(hr->containing_set() == _humongous_set, "Heap region %u is starts humongous but not in humongous set.", hr->hrm_index());
|
||||
_humongous_count.increment(1u, hr->capacity());
|
||||
} else if (hr->is_empty()) {
|
||||
assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
|
||||
assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
|
||||
_free_count.increment(1u, hr->capacity());
|
||||
} else if (hr->is_old()) {
|
||||
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
|
||||
assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
|
||||
_old_count.increment(1u, hr->capacity());
|
||||
} else {
|
||||
// There are no other valid region types. Check for one invalid
|
||||
// one we can identify: pinned without old or humongous set.
|
||||
assert(!hr->is_pinned(), err_msg("Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index()));
|
||||
assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
|
||||
guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
|
||||
guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
|
||||
old_set->total_capacity_bytes(), _old_count.capacity()));
|
||||
guarantee(old_set->length() == _old_count.length(), "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length());
|
||||
guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), "Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
|
||||
old_set->total_capacity_bytes(), _old_count.capacity());
|
||||
|
||||
guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
|
||||
guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
|
||||
humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
|
||||
guarantee(humongous_set->length() == _humongous_count.length(), "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length());
|
||||
guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), "Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
|
||||
humongous_set->total_capacity_bytes(), _humongous_count.capacity());
|
||||
|
||||
guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
|
||||
guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
|
||||
free_list->total_capacity_bytes(), _free_count.capacity()));
|
||||
guarantee(free_list->num_free_regions() == _free_count.length(), "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length());
|
||||
guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), "Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
|
||||
free_list->total_capacity_bytes(), _free_count.capacity());
|
||||
}
|
||||
};
|
||||
|
||||
@ -6715,9 +6714,9 @@ class RegisterNMethodOopClosure: public OopClosure {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
assert(!hr->is_continues_humongous(),
|
||||
err_msg("trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
|
||||
" starting at " HR_FORMAT,
|
||||
p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
|
||||
"trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
|
||||
" starting at " HR_FORMAT,
|
||||
p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
|
||||
|
||||
// HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
|
||||
hr->add_strong_code_root_locked(_nm);
|
||||
@ -6742,9 +6741,9 @@ class UnregisterNMethodOopClosure: public OopClosure {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
assert(!hr->is_continues_humongous(),
|
||||
err_msg("trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
|
||||
" starting at " HR_FORMAT,
|
||||
p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
|
||||
"trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
|
||||
" starting at " HR_FORMAT,
|
||||
p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
|
||||
|
||||
hr->remove_strong_code_root(_nm);
|
||||
}
|
||||
|
@ -368,17 +368,17 @@ private:
|
||||
// These are macros so that, if the assert fires, we get the correct
|
||||
// line number, file, etc.
|
||||
|
||||
#define heap_locking_asserts_err_msg(_extra_message_) \
|
||||
err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
|
||||
(_extra_message_), \
|
||||
BOOL_TO_STR(Heap_lock->owned_by_self()), \
|
||||
BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
|
||||
BOOL_TO_STR(Thread::current()->is_VM_thread()))
|
||||
#define heap_locking_asserts_params(_extra_message_) \
|
||||
"%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
|
||||
(_extra_message_), \
|
||||
BOOL_TO_STR(Heap_lock->owned_by_self()), \
|
||||
BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
|
||||
BOOL_TO_STR(Thread::current()->is_VM_thread())
|
||||
|
||||
#define assert_heap_locked() \
|
||||
do { \
|
||||
assert(Heap_lock->owned_by_self(), \
|
||||
heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
|
||||
heap_locking_asserts_params("should be holding the Heap_lock")); \
|
||||
} while (0)
|
||||
|
||||
#define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \
|
||||
@ -386,7 +386,7 @@ private:
|
||||
assert(Heap_lock->owned_by_self() || \
|
||||
(SafepointSynchronize::is_at_safepoint() && \
|
||||
((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
|
||||
heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
|
||||
heap_locking_asserts_params("should be holding the Heap_lock or " \
|
||||
"should be at a safepoint")); \
|
||||
} while (0)
|
||||
|
||||
@ -394,21 +394,21 @@ private:
|
||||
do { \
|
||||
assert(Heap_lock->owned_by_self() && \
|
||||
!SafepointSynchronize::is_at_safepoint(), \
|
||||
heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
|
||||
heap_locking_asserts_params("should be holding the Heap_lock and " \
|
||||
"should not be at a safepoint")); \
|
||||
} while (0)
|
||||
|
||||
#define assert_heap_not_locked() \
|
||||
do { \
|
||||
assert(!Heap_lock->owned_by_self(), \
|
||||
heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
|
||||
heap_locking_asserts_params("should not be holding the Heap_lock")); \
|
||||
} while (0)
|
||||
|
||||
#define assert_heap_not_locked_and_not_at_safepoint() \
|
||||
do { \
|
||||
assert(!Heap_lock->owned_by_self() && \
|
||||
!SafepointSynchronize::is_at_safepoint(), \
|
||||
heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
|
||||
heap_locking_asserts_params("should not be holding the Heap_lock and " \
|
||||
"should not be at a safepoint")); \
|
||||
} while (0)
|
||||
|
||||
@ -416,13 +416,13 @@ private:
|
||||
do { \
|
||||
assert(SafepointSynchronize::is_at_safepoint() && \
|
||||
((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
|
||||
heap_locking_asserts_err_msg("should be at a safepoint")); \
|
||||
heap_locking_asserts_params("should be at a safepoint")); \
|
||||
} while (0)
|
||||
|
||||
#define assert_not_at_safepoint() \
|
||||
do { \
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), \
|
||||
heap_locking_asserts_err_msg("should not be at a safepoint")); \
|
||||
heap_locking_asserts_params("should not be at a safepoint")); \
|
||||
} while (0)
|
||||
|
||||
protected:
|
||||
|
@ -67,8 +67,8 @@ inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at
|
||||
|
||||
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
|
||||
assert(is_in_reserved(addr),
|
||||
err_msg("Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end())));
|
||||
"Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end()));
|
||||
return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
|
||||
}
|
||||
|
||||
@ -80,8 +80,8 @@ template <class T>
|
||||
inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
|
||||
assert(addr != NULL, "invariant");
|
||||
assert(is_in_g1_reserved((const void*) addr),
|
||||
err_msg("Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
|
||||
p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
|
||||
"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
|
||||
p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
|
||||
return _hrm.addr_to_region((HeapWord*) addr);
|
||||
}
|
||||
|
||||
|
@ -813,8 +813,8 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
|
||||
update_survivors_policy();
|
||||
|
||||
assert(_g1->used() == _g1->recalculate_used(),
|
||||
err_msg("sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
|
||||
_g1->used(), _g1->recalculate_used()));
|
||||
"sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
|
||||
_g1->used(), _g1->recalculate_used());
|
||||
|
||||
double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
|
||||
_trace_young_gen_time_data.record_start_collection(s_w_t_ms);
|
||||
@ -1876,8 +1876,7 @@ double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms)
|
||||
finalize_incremental_cset_building();
|
||||
|
||||
guarantee(target_pause_time_ms > 0.0,
|
||||
err_msg("target_pause_time_ms = %1.6lf should be positive",
|
||||
target_pause_time_ms));
|
||||
"target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
|
||||
guarantee(_collection_set == NULL, "Precondition");
|
||||
|
||||
double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
|
||||
|
@ -839,7 +839,7 @@ public:
|
||||
case InCSetState::Old:
|
||||
return REGIONS_UNLIMITED;
|
||||
default:
|
||||
assert(false, err_msg("Unknown dest state: " CSETSTATE_FORMAT, dest.value()));
|
||||
assert(false, "Unknown dest state: " CSETSTATE_FORMAT, dest.value());
|
||||
break;
|
||||
}
|
||||
// keep some compilers happy
|
||||
|
@ -165,9 +165,9 @@ public:
|
||||
size_t size_second_obj = ((oop)end_first_obj)->size();
|
||||
HeapWord* end_of_second_obj = end_first_obj + size_second_obj;
|
||||
assert(end == end_of_second_obj,
|
||||
err_msg("More than two objects were used to fill the area from " PTR_FORMAT " to " PTR_FORMAT ", "
|
||||
"second objects size " SIZE_FORMAT " ends at " PTR_FORMAT,
|
||||
p2i(start), p2i(end), size_second_obj, p2i(end_of_second_obj)));
|
||||
"More than two objects were used to fill the area from " PTR_FORMAT " to " PTR_FORMAT ", "
|
||||
"second objects size " SIZE_FORMAT " ends at " PTR_FORMAT,
|
||||
p2i(start), p2i(end), size_second_obj, p2i(end_of_second_obj));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@ -215,7 +215,7 @@ public:
|
||||
bool during_initial_mark = _g1h->collector_state()->during_initial_mark_pause();
|
||||
bool during_conc_mark = _g1h->collector_state()->mark_in_progress();
|
||||
|
||||
assert(!hr->is_pinned(), err_msg("Unexpected pinned region at index %u", hr->hrm_index()));
|
||||
assert(!hr->is_pinned(), "Unexpected pinned region at index %u", hr->hrm_index());
|
||||
assert(hr->in_collection_set(), "bad CS");
|
||||
|
||||
if (_hrclaimer->claim_region(hr->hrm_index())) {
|
||||
|
@ -45,13 +45,13 @@ void G1EvacStats::adjust_desired_plab_sz() {
|
||||
|
||||
if (_allocated == 0) {
|
||||
assert((_unused == 0),
|
||||
err_msg("Inconsistency in PLAB stats: "
|
||||
"_allocated: " SIZE_FORMAT ", "
|
||||
"_wasted: " SIZE_FORMAT ", "
|
||||
"_region_end_waste: " SIZE_FORMAT ", "
|
||||
"_unused: " SIZE_FORMAT ", "
|
||||
"_used : " SIZE_FORMAT,
|
||||
_allocated, _wasted, _region_end_waste, _unused, used()));
|
||||
"Inconsistency in PLAB stats: "
|
||||
"_allocated: " SIZE_FORMAT ", "
|
||||
"_wasted: " SIZE_FORMAT ", "
|
||||
"_region_end_waste: " SIZE_FORMAT ", "
|
||||
"_unused: " SIZE_FORMAT ", "
|
||||
"_used : " SIZE_FORMAT,
|
||||
_allocated, _wasted, _region_end_waste, _unused, used());
|
||||
_allocated = 1;
|
||||
}
|
||||
// The size of the PLAB caps the amount of space that can be wasted at the
|
||||
|
@ -130,8 +130,8 @@ class WorkerDataArray : public CHeapObj<mtGC> {
|
||||
WorkerDataArray<size_t>* thread_work_items() { return _thread_work_items; }
|
||||
|
||||
void set(uint worker_i, T value) {
|
||||
assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
|
||||
assert(_data[worker_i] == WorkerDataArray<T>::uninitialized(), err_msg("Overwriting data for worker %d in %s", worker_i, _title));
|
||||
assert(worker_i < _length, "Worker %d is greater than max: %d", worker_i, _length);
|
||||
assert(_data[worker_i] == WorkerDataArray<T>::uninitialized(), "Overwriting data for worker %d in %s", worker_i, _title);
|
||||
_data[worker_i] = value;
|
||||
_has_new_data = true;
|
||||
}
|
||||
@ -142,14 +142,14 @@ class WorkerDataArray : public CHeapObj<mtGC> {
|
||||
}
|
||||
|
||||
T get(uint worker_i) {
|
||||
assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
|
||||
assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data added for worker %d", worker_i));
|
||||
assert(worker_i < _length, "Worker %d is greater than max: %d", worker_i, _length);
|
||||
assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), "No data added for worker %d", worker_i);
|
||||
return _data[worker_i];
|
||||
}
|
||||
|
||||
void add(uint worker_i, T value) {
|
||||
assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
|
||||
assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data to add to for worker %d", worker_i));
|
||||
assert(worker_i < _length, "Worker %d is greater than max: %d", worker_i, _length);
|
||||
assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), "No data to add to for worker %d", worker_i);
|
||||
_data[worker_i] += value;
|
||||
_has_new_data = true;
|
||||
}
|
||||
@ -235,7 +235,7 @@ void WorkerDataArray<T>::verify(uint active_threads) {
|
||||
assert(active_threads <= _length, "Wrong number of active threads");
|
||||
for (uint i = 0; i < active_threads; i++) {
|
||||
assert(_data[i] != WorkerDataArray<T>::uninitialized(),
|
||||
err_msg("Invalid data for worker %u in '%s'", i, _title));
|
||||
"Invalid data for worker %u in '%s'", i, _title);
|
||||
}
|
||||
if (_thread_work_items != NULL) {
|
||||
_thread_work_items->verify(active_threads);
|
||||
@ -479,7 +479,7 @@ class G1GCParPhasePrinter : public StackObj {
|
||||
print_count_values(buf, phase_id, thread_work_items);
|
||||
}
|
||||
|
||||
assert(thread_work_items->_print_sum, err_msg("%s does not have print sum true even though it is a count", thread_work_items->_title));
|
||||
assert(thread_work_items->_print_sum, "%s does not have print sum true even though it is a count", thread_work_items->_title);
|
||||
|
||||
buf.append_and_print_cr(" Min: " SIZE_FORMAT ", Avg: %.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT ", Sum: " SIZE_FORMAT "]",
|
||||
_phase_times->min_thread_work_items(phase_id), _phase_times->average_thread_work_items(phase_id), _phase_times->max_thread_work_items(phase_id),
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user