Merge
This commit is contained in:
commit
e082ba12fc
@ -161,8 +161,8 @@ AC_DEFUN_ONCE([LIB_SETUP_ZLIB],
|
||||
AC_MSG_CHECKING([for which zlib to use])
|
||||
|
||||
DEFAULT_ZLIB=system
|
||||
if test "x$OPENJDK_TARGET_OS" = xwindows; then
|
||||
# On windows default is bundled...on others default is system
|
||||
if test "x$OPENJDK_TARGET_OS" = xwindows -o "x$OPENJDK_TARGET_OS" = xaix; then
|
||||
# On windows and aix default is bundled, on others default is system
|
||||
DEFAULT_ZLIB=bundled
|
||||
fi
|
||||
|
||||
|
@ -100,12 +100,17 @@ define SetupJvmtiGeneration
|
||||
endef
|
||||
|
||||
$(eval $(call SetupJvmtiGeneration, jvmtiEnter.cpp, jvmtiEnter.xsl, \
|
||||
-PARAM majorversion $(VERSION_FEATURE) \
|
||||
-PARAM interface jvmti))
|
||||
$(eval $(call SetupJvmtiGeneration, jvmtiEnterTrace.cpp, jvmtiEnter.xsl, \
|
||||
-PARAM majorversion $(VERSION_FEATURE) \
|
||||
-PARAM interface jvmti -PARAM trace Trace))
|
||||
$(eval $(call SetupJvmtiGeneration, jvmtiEnv.hpp, jvmtiHpp.xsl))
|
||||
$(eval $(call SetupJvmtiGeneration, jvmti.h, jvmtiH.xsl))
|
||||
$(eval $(call SetupJvmtiGeneration, jvmti.html, jvmti.xsl))
|
||||
$(eval $(call SetupJvmtiGeneration, jvmtiEnv.hpp, jvmtiHpp.xsl, \
|
||||
-PARAM majorversion $(VERSION_FEATURE)))
|
||||
$(eval $(call SetupJvmtiGeneration, jvmti.h, jvmtiH.xsl, \
|
||||
-PARAM majorversion $(VERSION_FEATURE)))
|
||||
$(eval $(call SetupJvmtiGeneration, jvmti.html, jvmti.xsl, \
|
||||
-PARAM majorversion $(VERSION_FEATURE)))
|
||||
|
||||
JVMTI_BC_SRCDIR := $(TOPDIR)/src/hotspot/share/interpreter
|
||||
|
||||
|
@ -1413,7 +1413,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
} else {
|
||||
Thread* THREAD = Thread::current();
|
||||
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
|
||||
SignatureStream ss(method->signature());
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
@ -1421,7 +1420,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Arrays are passed as int, elem* pair
|
||||
out_sig_bt[argc++] = T_INT;
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
Symbol* atype = ss.as_symbol(CHECK_NULL);
|
||||
Symbol* atype = ss.as_symbol();
|
||||
const char* at = atype->as_C_string();
|
||||
if (strlen(at) == 2) {
|
||||
assert(at[0] == '[', "must be");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -1908,14 +1908,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
} else {
|
||||
Thread* THREAD = Thread::current();
|
||||
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
|
||||
SignatureStream ss(method->signature());
|
||||
int o = 0;
|
||||
for (int i = 0; i < total_in_args ; i++, o++) {
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
// Arrays are passed as int, elem* pair
|
||||
Symbol* atype = ss.as_symbol(CHECK_NULL);
|
||||
Symbol* atype = ss.as_symbol();
|
||||
const char* at = atype->as_C_string();
|
||||
if (strlen(at) == 2) {
|
||||
assert(at[0] == '[', "must be");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -1618,14 +1618,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
} else {
|
||||
Thread* THREAD = Thread::current();
|
||||
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
|
||||
SignatureStream ss(method->signature());
|
||||
int o = 0;
|
||||
for (int i = 0; i < total_in_args; i++, o++) {
|
||||
if (in_sig_bt[i] == T_ARRAY) {
|
||||
// Arrays are passed as tuples (int, elem*).
|
||||
Symbol* atype = ss.as_symbol(CHECK_NULL);
|
||||
Symbol* atype = ss.as_symbol();
|
||||
const char* at = atype->as_C_string();
|
||||
if (strlen(at) == 2) {
|
||||
assert(at[0] == '[', "must be");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1906,7 +1906,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Arrays are passed as int, elem* pair
|
||||
out_sig_bt[argc++] = T_INT;
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
Symbol* atype = ss.as_symbol(CHECK_NULL);
|
||||
Symbol* atype = ss.as_symbol();
|
||||
const char* at = atype->as_C_string();
|
||||
if (strlen(at) == 2) {
|
||||
assert(at[0] == '[', "must be");
|
||||
|
@ -159,10 +159,10 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
#ifdef ASSERT
|
||||
// read the value once
|
||||
volatile intptr_t data = method_holder->data();
|
||||
volatile address destination = jump->jump_destination();
|
||||
assert(data == 0 || data == (intptr_t)callee(),
|
||||
Method* old_method = reinterpret_cast<Method*>(method_holder->data());
|
||||
address destination = jump->jump_destination();
|
||||
assert(old_method == NULL || old_method == callee() ||
|
||||
!old_method->method_holder()->is_loader_alive(),
|
||||
"a) MT-unsafe modification of inline cache");
|
||||
assert(destination == (address)-1 || destination == entry,
|
||||
"b) MT-unsafe modification of inline cache");
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
#define __ masm->
|
||||
@ -344,3 +345,33 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
|
||||
__ bind(continuation);
|
||||
#endif
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
|
||||
BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
if (bs == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
Label bad_call;
|
||||
__ cmpptr(rbx, 0); // rbx contains the incoming method for c2i adapters.
|
||||
__ jcc(Assembler::equal, bad_call);
|
||||
|
||||
// Pointer chase to the method holder to find out if the method is concurrently unloading.
|
||||
Label method_live;
|
||||
__ load_method_holder_cld(rscratch1, rbx);
|
||||
|
||||
// Is it a strong CLD?
|
||||
__ movl(rscratch2, Address(rscratch1, ClassLoaderData::keep_alive_offset()));
|
||||
__ cmpptr(rscratch2, 0);
|
||||
__ jcc(Assembler::greater, method_live);
|
||||
|
||||
// Is it a weak but alive CLD?
|
||||
__ movptr(rscratch1, Address(rscratch1, ClassLoaderData::holder_offset()));
|
||||
__ resolve_weak_handle(rscratch1, rscratch2);
|
||||
__ cmpptr(rscratch1, 0);
|
||||
__ jcc(Assembler::notEqual, method_live);
|
||||
|
||||
__ bind(bad_call);
|
||||
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
|
||||
__ bind(method_live);
|
||||
}
|
||||
|
@ -85,6 +85,7 @@ public:
|
||||
virtual void barrier_stubs_init() {}
|
||||
|
||||
virtual void nmethod_entry_barrier(MacroAssembler* masm);
|
||||
virtual void c2i_entry_barrier(MacroAssembler* masm);
|
||||
};
|
||||
|
||||
#endif // CPU_X86_GC_SHARED_BARRIERSETASSEMBLER_X86_HPP
|
||||
|
@ -5175,6 +5175,23 @@ void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
|
||||
result, Address(result, 0), tmp, /*tmp_thread*/noreg);
|
||||
}
|
||||
|
||||
// ((WeakHandle)result).resolve();
|
||||
void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) {
|
||||
assert_different_registers(rresult, rtmp);
|
||||
Label resolved;
|
||||
|
||||
// A null weak handle resolves to null.
|
||||
cmpptr(rresult, 0);
|
||||
jcc(Assembler::equal, resolved);
|
||||
|
||||
// Only 64 bit platforms support GCs that require a tmp register
|
||||
// Only IN_HEAP loads require a thread_tmp register
|
||||
// WeakHandle::resolve is an indirection like jweak.
|
||||
access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF,
|
||||
rresult, Address(rresult, 0), rtmp, /*tmp_thread*/noreg);
|
||||
bind(resolved);
|
||||
}
|
||||
|
||||
void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) {
|
||||
// get mirror
|
||||
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
|
||||
@ -5185,6 +5202,13 @@ void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp)
|
||||
resolve_oop_handle(mirror, tmp);
|
||||
}
|
||||
|
||||
void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) {
|
||||
movptr(rresult, Address(rmethod, Method::const_offset()));
|
||||
movptr(rresult, Address(rresult, ConstMethod::constants_offset()));
|
||||
movptr(rresult, Address(rresult, ConstantPool::pool_holder_offset_in_bytes()));
|
||||
movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset()));
|
||||
}
|
||||
|
||||
void MacroAssembler::load_klass(Register dst, Register src) {
|
||||
#ifdef _LP64
|
||||
if (UseCompressedClassPointers) {
|
||||
|
@ -313,7 +313,9 @@ class MacroAssembler: public Assembler {
|
||||
void testbool(Register dst);
|
||||
|
||||
void resolve_oop_handle(Register result, Register tmp = rscratch2);
|
||||
void resolve_weak_handle(Register result, Register tmp);
|
||||
void load_mirror(Register mirror, Register method, Register tmp = rscratch2);
|
||||
void load_method_holder_cld(Register rresult, Register rmethod);
|
||||
|
||||
// oop manipulations
|
||||
void load_klass(Register dst, Register src);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1593,7 +1593,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
} else {
|
||||
Thread* THREAD = Thread::current();
|
||||
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
|
||||
SignatureStream ss(method->signature());
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
@ -1601,7 +1600,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Arrays are passed as int, elem* pair
|
||||
out_sig_bt[argc++] = T_INT;
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
Symbol* atype = ss.as_symbol(CHECK_NULL);
|
||||
Symbol* atype = ss.as_symbol();
|
||||
const char* at = atype->as_C_string();
|
||||
if (strlen(at) == 2) {
|
||||
assert(at[0] == '[', "must be");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -971,6 +971,9 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||
|
||||
address c2i_entry = __ pc();
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->c2i_entry_barrier(masm);
|
||||
|
||||
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
|
||||
|
||||
__ flush();
|
||||
@ -1968,7 +1971,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
out_sig_bt[argc++] = in_sig_bt[i];
|
||||
}
|
||||
} else {
|
||||
Thread* THREAD = Thread::current();
|
||||
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
|
||||
SignatureStream ss(method->signature());
|
||||
for (int i = 0; i < total_in_args ; i++ ) {
|
||||
@ -1976,7 +1978,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Arrays are passed as int, elem* pair
|
||||
out_sig_bt[argc++] = T_INT;
|
||||
out_sig_bt[argc++] = T_ADDRESS;
|
||||
Symbol* atype = ss.as_symbol(CHECK_NULL);
|
||||
Symbol* atype = ss.as_symbol();
|
||||
const char* at = atype->as_C_string();
|
||||
if (strlen(at) == 2) {
|
||||
assert(at[0] == '[', "must be");
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "utilities/virtualizationSupport.hpp"
|
||||
#include "vm_version_x86.hpp"
|
||||
|
||||
|
||||
@ -1581,6 +1582,7 @@ void VM_Version::print_platform_virtualization_info(outputStream* st) {
|
||||
st->print_cr("KVM virtualization detected");
|
||||
} else if (vrt == VMWare) {
|
||||
st->print_cr("VMWare virtualization detected");
|
||||
VirtualizationSupport::print_virtualization_info(st);
|
||||
} else if (vrt == HyperV) {
|
||||
st->print_cr("HyperV virtualization detected");
|
||||
}
|
||||
@ -1684,6 +1686,8 @@ void VM_Version::check_virtualizations() {
|
||||
|
||||
if (strncmp("VMwareVMware", signature, 12) == 0) {
|
||||
Abstract_VM_Version::_detected_virtualization = VMWare;
|
||||
// check for extended metrics from guestlib
|
||||
VirtualizationSupport::initialize();
|
||||
}
|
||||
|
||||
if (strncmp("Microsoft Hv", signature, 12) == 0) {
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1389,12 +1389,15 @@ void os::print_os_info(outputStream* st) {
|
||||
|
||||
os::Posix::print_rlimit_info(st);
|
||||
|
||||
// _SC_THREAD_THREADS_MAX is the maximum number of threads within a process.
|
||||
long tmax = sysconf(_SC_THREAD_THREADS_MAX);
|
||||
st->print_cr("maximum #threads within a process:%ld", tmax);
|
||||
|
||||
// load average
|
||||
st->print("load average:");
|
||||
double loadavg[3] = {-1.L, -1.L, -1.L};
|
||||
os::loadavg(loadavg, 3);
|
||||
st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
|
||||
st->cr();
|
||||
st->print_cr("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
|
||||
|
||||
// print wpar info
|
||||
libperfstat::wparinfo_t wi;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,7 +39,7 @@
|
||||
#endif
|
||||
|
||||
static int z_get_mempolicy(uint32_t* mode, const unsigned long *nmask, unsigned long maxnode, uintptr_t addr, int flags) {
|
||||
return syscall(__NR_get_mempolicy, mode, nmask, maxnode, addr, flags);
|
||||
return syscall(SYS_get_mempolicy, mode, nmask, maxnode, addr, flags);
|
||||
}
|
||||
|
||||
void ZNUMA::initialize_platform() {
|
||||
|
@ -201,7 +201,7 @@ void os::init_system_properties_values() {
|
||||
char *home_path;
|
||||
char *dll_path;
|
||||
char *pslash;
|
||||
char *bin = "\\bin";
|
||||
const char *bin = "\\bin";
|
||||
char home_dir[MAX_PATH + 1];
|
||||
char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
|
||||
|
||||
@ -2185,7 +2185,7 @@ extern "C" void events();
|
||||
|
||||
#define def_excpt(val) { #val, (val) }
|
||||
|
||||
static const struct { char* name; uint number; } exceptlabels[] = {
|
||||
static const struct { const char* name; uint number; } exceptlabels[] = {
|
||||
def_excpt(EXCEPTION_ACCESS_VIOLATION),
|
||||
def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
|
||||
def_excpt(EXCEPTION_BREAKPOINT),
|
||||
@ -5335,7 +5335,7 @@ int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
|
||||
DWORD exit_code;
|
||||
|
||||
char * cmd_string;
|
||||
char * cmd_prefix = "cmd /C ";
|
||||
const char * cmd_prefix = "cmd /C ";
|
||||
size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
|
||||
cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
|
||||
if (cmd_string == NULL) {
|
||||
@ -5674,8 +5674,8 @@ void TestReserveMemorySpecial_test() {
|
||||
*/
|
||||
int os::get_signal_number(const char* name) {
|
||||
static const struct {
|
||||
char* name;
|
||||
int number;
|
||||
const char* name;
|
||||
int number;
|
||||
} siglabels [] =
|
||||
// derived from version 6.0 VC98/include/signal.h
|
||||
{"ABRT", SIGABRT, // abnormal termination triggered by abort cl
|
||||
|
@ -26,8 +26,10 @@
|
||||
#include "gc/z/zBackingFile_linux_x86.hpp"
|
||||
#include "gc/z/zBackingPath_linux_x86.hpp"
|
||||
#include "gc/z/zErrno.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zLargePages.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
@ -36,9 +38,54 @@
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/statfs.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
//
|
||||
// Support for building on older Linux systems
|
||||
//
|
||||
|
||||
// System calls
|
||||
#ifndef SYS_fallocate
|
||||
#define SYS_fallocate 285
|
||||
#endif
|
||||
#ifndef SYS_memfd_create
|
||||
#define SYS_memfd_create 319
|
||||
#endif
|
||||
|
||||
// memfd_create(2) flags
|
||||
#ifndef MFD_CLOEXEC
|
||||
#define MFD_CLOEXEC 0x0001U
|
||||
#endif
|
||||
#ifndef MFD_HUGETLB
|
||||
#define MFD_HUGETLB 0x0004U
|
||||
#endif
|
||||
|
||||
// open(2) flags
|
||||
#ifndef O_CLOEXEC
|
||||
#define O_CLOEXEC 02000000
|
||||
#endif
|
||||
#ifndef O_TMPFILE
|
||||
#define O_TMPFILE (020000000 | O_DIRECTORY)
|
||||
#endif
|
||||
|
||||
// fallocate(2) flags
|
||||
#ifndef FALLOC_FL_KEEP_SIZE
|
||||
#define FALLOC_FL_KEEP_SIZE 0x01
|
||||
#endif
|
||||
#ifndef FALLOC_FL_PUNCH_HOLE
|
||||
#define FALLOC_FL_PUNCH_HOLE 0x02
|
||||
#endif
|
||||
|
||||
// Filesystem types, see statfs(2)
|
||||
#ifndef TMPFS_MAGIC
|
||||
#define TMPFS_MAGIC 0x01021994
|
||||
#endif
|
||||
#ifndef HUGETLBFS_MAGIC
|
||||
#define HUGETLBFS_MAGIC 0x958458f6
|
||||
#endif
|
||||
|
||||
// Filesystem names
|
||||
#define ZFILESYSTEM_TMPFS "tmpfs"
|
||||
#define ZFILESYSTEM_HUGETLBFS "hugetlbfs"
|
||||
@ -49,31 +96,6 @@
|
||||
// Java heap filename
|
||||
#define ZFILENAME_HEAP "java_heap"
|
||||
|
||||
// Support for building on older Linux systems
|
||||
#ifndef __NR_memfd_create
|
||||
#define __NR_memfd_create 319
|
||||
#endif
|
||||
#ifndef MFD_CLOEXEC
|
||||
#define MFD_CLOEXEC 0x0001U
|
||||
#endif
|
||||
#ifndef MFD_HUGETLB
|
||||
#define MFD_HUGETLB 0x0004U
|
||||
#endif
|
||||
#ifndef O_CLOEXEC
|
||||
#define O_CLOEXEC 02000000
|
||||
#endif
|
||||
#ifndef O_TMPFILE
|
||||
#define O_TMPFILE (020000000 | O_DIRECTORY)
|
||||
#endif
|
||||
|
||||
// Filesystem types, see statfs(2)
|
||||
#ifndef TMPFS_MAGIC
|
||||
#define TMPFS_MAGIC 0x01021994
|
||||
#endif
|
||||
#ifndef HUGETLBFS_MAGIC
|
||||
#define HUGETLBFS_MAGIC 0x958458f6
|
||||
#endif
|
||||
|
||||
// Preferred tmpfs mount points, ordered by priority
|
||||
static const char* z_preferred_tmpfs_mountpoints[] = {
|
||||
"/dev/shm",
|
||||
@ -88,15 +110,22 @@ static const char* z_preferred_hugetlbfs_mountpoints[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static int z_memfd_create(const char *name, unsigned int flags) {
|
||||
return syscall(__NR_memfd_create, name, flags);
|
||||
static int z_fallocate_hugetlbfs_attempts = 3;
|
||||
static bool z_fallocate_supported = true;
|
||||
|
||||
static int z_fallocate(int fd, int mode, size_t offset, size_t length) {
|
||||
return syscall(SYS_fallocate, fd, mode, offset, length);
|
||||
}
|
||||
|
||||
bool ZBackingFile::_hugetlbfs_mmap_retry = true;
|
||||
static int z_memfd_create(const char *name, unsigned int flags) {
|
||||
return syscall(SYS_memfd_create, name, flags);
|
||||
}
|
||||
|
||||
ZBackingFile::ZBackingFile() :
|
||||
_fd(-1),
|
||||
_size(0),
|
||||
_filesystem(0),
|
||||
_block_size(0),
|
||||
_available(0),
|
||||
_initialized(false) {
|
||||
|
||||
@ -107,46 +136,53 @@ ZBackingFile::ZBackingFile() :
|
||||
}
|
||||
|
||||
// Get filesystem statistics
|
||||
struct statfs statfs_buf;
|
||||
if (fstatfs(_fd, &statfs_buf) == -1) {
|
||||
struct statfs buf;
|
||||
if (fstatfs(_fd, &buf) == -1) {
|
||||
ZErrno err;
|
||||
log_error(gc, init)("Failed to determine filesystem type for backing file (%s)",
|
||||
err.to_string());
|
||||
log_error(gc)("Failed to determine filesystem type for backing file (%s)", err.to_string());
|
||||
return;
|
||||
}
|
||||
|
||||
_filesystem = statfs_buf.f_type;
|
||||
_available = statfs_buf.f_bavail * statfs_buf.f_bsize;
|
||||
_filesystem = buf.f_type;
|
||||
_block_size = buf.f_bsize;
|
||||
_available = buf.f_bavail * _block_size;
|
||||
|
||||
// Make sure we're on a supported filesystem
|
||||
if (!is_tmpfs() && !is_hugetlbfs()) {
|
||||
log_error(gc, init)("Backing file must be located on a %s or a %s filesystem",
|
||||
ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
|
||||
log_error(gc)("Backing file must be located on a %s or a %s filesystem",
|
||||
ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure the filesystem type matches requested large page type
|
||||
if (ZLargePages::is_transparent() && !is_tmpfs()) {
|
||||
log_error(gc, init)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem",
|
||||
ZFILESYSTEM_TMPFS);
|
||||
log_error(gc)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem",
|
||||
ZFILESYSTEM_TMPFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
|
||||
log_error(gc, init)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",
|
||||
ZFILESYSTEM_TMPFS);
|
||||
log_error(gc)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",
|
||||
ZFILESYSTEM_TMPFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ZLargePages::is_explicit() && !is_hugetlbfs()) {
|
||||
log_error(gc, init)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled when using a %s filesystem",
|
||||
ZFILESYSTEM_HUGETLBFS);
|
||||
log_error(gc)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled "
|
||||
"when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ZLargePages::is_explicit() && is_hugetlbfs()) {
|
||||
log_error(gc, init)("-XX:+UseLargePages must be enabled when using a %s filesystem",
|
||||
ZFILESYSTEM_HUGETLBFS);
|
||||
log_error(gc)("-XX:+UseLargePages must be enabled when using a %s filesystem",
|
||||
ZFILESYSTEM_HUGETLBFS);
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t expected_block_size = is_tmpfs() ? os::vm_page_size() : os::large_page_size();
|
||||
if (expected_block_size != _block_size) {
|
||||
log_error(gc)("%s filesystem has unexpected block size " SIZE_FORMAT " (expected " SIZE_FORMAT ")",
|
||||
is_tmpfs() ? ZFILESYSTEM_TMPFS : ZFILESYSTEM_HUGETLBFS, _block_size, expected_block_size);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -165,7 +201,7 @@ int ZBackingFile::create_mem_fd(const char* name) const {
|
||||
if (fd == -1) {
|
||||
ZErrno err;
|
||||
log_debug(gc, init)("Failed to create memfd file (%s)",
|
||||
((UseLargePages && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
|
||||
((ZLargePages::is_explicit() && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -185,7 +221,7 @@ int ZBackingFile::create_file_fd(const char* name) const {
|
||||
// Find mountpoint
|
||||
ZBackingPath path(filesystem, preferred_mountpoints);
|
||||
if (path.get() == NULL) {
|
||||
log_error(gc, init)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
|
||||
log_error(gc)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -201,7 +237,7 @@ int ZBackingFile::create_file_fd(const char* name) const {
|
||||
struct stat stat_buf;
|
||||
if (fstat(fd_anon, &stat_buf) == -1) {
|
||||
ZErrno err;
|
||||
log_error(gc, init)("Failed to determine inode number for anonymous file (%s)", err.to_string());
|
||||
log_error(gc)("Failed to determine inode number for anonymous file (%s)", err.to_string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -220,14 +256,14 @@ int ZBackingFile::create_file_fd(const char* name) const {
|
||||
const int fd = os::open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
|
||||
if (fd == -1) {
|
||||
ZErrno err;
|
||||
log_error(gc, init)("Failed to create file %s (%s)", filename, err.to_string());
|
||||
log_error(gc)("Failed to create file %s (%s)", filename, err.to_string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Unlink file
|
||||
if (unlink(filename) == -1) {
|
||||
ZErrno err;
|
||||
log_error(gc, init)("Failed to unlink file %s (%s)", filename, err.to_string());
|
||||
log_error(gc)("Failed to unlink file %s (%s)", filename, err.to_string());
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -262,6 +298,10 @@ int ZBackingFile::fd() const {
|
||||
return _fd;
|
||||
}
|
||||
|
||||
size_t ZBackingFile::size() const {
|
||||
return _size;
|
||||
}
|
||||
|
||||
size_t ZBackingFile::available() const {
|
||||
return _available;
|
||||
}
|
||||
@ -280,147 +320,271 @@ bool ZBackingFile::tmpfs_supports_transparent_huge_pages() const {
|
||||
return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;
|
||||
}
|
||||
|
||||
bool ZBackingFile::try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
|
||||
// Try first smaller part.
|
||||
const size_t offset0 = offset;
|
||||
const size_t length0 = align_up(length / 2, alignment);
|
||||
if (!try_expand_tmpfs(offset0, length0, alignment)) {
|
||||
return false;
|
||||
ZErrno ZBackingFile::fallocate_compat_ftruncate(size_t size) const {
|
||||
while (ftruncate(_fd, size) == -1) {
|
||||
if (errno != EINTR) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
}
|
||||
|
||||
// Try second smaller part.
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZBackingFile::fallocate_compat_mmap(size_t offset, size_t length, bool touch) const {
|
||||
// On hugetlbfs, mapping a file segment will fail immediately, without
|
||||
// the need to touch the mapped pages first, if there aren't enough huge
|
||||
// pages available to back the mapping.
|
||||
void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
|
||||
if (addr == MAP_FAILED) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
|
||||
// Once mapped, the huge pages are only reserved. We need to touch them
|
||||
// to associate them with the file segment. Note that we can not punch
|
||||
// hole in file segments which only have reserved pages.
|
||||
if (touch) {
|
||||
char* const start = (char*)addr;
|
||||
char* const end = start + length;
|
||||
os::pretouch_memory(start, end, _block_size);
|
||||
}
|
||||
|
||||
// Unmap again. From now on, the huge pages that were mapped are allocated
|
||||
// to this file. There's no risk in getting SIGBUS when touching them.
|
||||
if (munmap(addr, length) == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZBackingFile::fallocate_compat_pwrite(size_t offset, size_t length) const {
|
||||
uint8_t data = 0;
|
||||
|
||||
// Allocate backing memory by writing to each block
|
||||
for (size_t pos = offset; pos < offset + length; pos += _block_size) {
|
||||
if (pwrite(_fd, &data, sizeof(data), pos) == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZBackingFile::fallocate_fill_hole_compat(size_t offset, size_t length) {
|
||||
// fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs
|
||||
// since Linux 4.3. When fallocate(2) is not supported we emulate it using
|
||||
// ftruncate/pwrite (for tmpfs) or ftruncate/mmap/munmap (for hugetlbfs).
|
||||
|
||||
const size_t end = offset + length;
|
||||
if (end > _size) {
|
||||
// Increase file size
|
||||
const ZErrno err = fallocate_compat_ftruncate(end);
|
||||
if (err) {
|
||||
// Failed
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate backing memory
|
||||
const ZErrno err = is_hugetlbfs() ? fallocate_compat_mmap(offset, length, false /* touch */)
|
||||
: fallocate_compat_pwrite(offset, length);
|
||||
if (err) {
|
||||
if (end > _size) {
|
||||
// Restore file size
|
||||
fallocate_compat_ftruncate(_size);
|
||||
}
|
||||
|
||||
// Failed
|
||||
return err;
|
||||
}
|
||||
|
||||
if (end > _size) {
|
||||
// Record new file size
|
||||
_size = end;
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZBackingFile::fallocate_fill_hole_syscall(size_t offset, size_t length) {
|
||||
const int mode = 0; // Allocate
|
||||
const int res = z_fallocate(_fd, mode, offset, length);
|
||||
if (res == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
|
||||
const size_t end = offset + length;
|
||||
if (end > _size) {
|
||||
// Record new file size
|
||||
_size = end;
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZBackingFile::fallocate_fill_hole(size_t offset, size_t length) {
|
||||
// Using compat mode is more efficient when allocating space on hugetlbfs.
|
||||
// Note that allocating huge pages this way will only reserve them, and not
|
||||
// associate them with segments of the file. We must guarantee that we at
|
||||
// some point touch these segments, otherwise we can not punch hole in them.
|
||||
if (z_fallocate_supported && !is_hugetlbfs()) {
|
||||
const ZErrno err = fallocate_fill_hole_syscall(offset, length);
|
||||
if (!err) {
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (err != ENOSYS && err != EOPNOTSUPP) {
|
||||
// Failed
|
||||
return err;
|
||||
}
|
||||
|
||||
// Not supported
|
||||
log_debug(gc)("Falling back to fallocate() compatibility mode");
|
||||
z_fallocate_supported = false;
|
||||
}
|
||||
|
||||
return fallocate_fill_hole_compat(offset, length);
|
||||
}
|
||||
|
||||
ZErrno ZBackingFile::fallocate_punch_hole(size_t offset, size_t length) {
|
||||
if (is_hugetlbfs()) {
|
||||
// We can only punch hole in pages that have been touched. Non-touched
|
||||
// pages are only reserved, and not associated with any specific file
|
||||
// segment. We don't know which pages have been previously touched, so
|
||||
// we always touch them here to guarantee that we can punch hole.
|
||||
const ZErrno err = fallocate_compat_mmap(offset, length, true /* touch */);
|
||||
if (err) {
|
||||
// Failed
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
const int mode = FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE;
|
||||
if (z_fallocate(_fd, mode, offset, length) == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
}
|
||||
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZBackingFile::split_and_fallocate(bool punch_hole, size_t offset, size_t length) {
|
||||
// Try first half
|
||||
const size_t offset0 = offset;
|
||||
const size_t length0 = align_up(length / 2, _block_size);
|
||||
const ZErrno err0 = fallocate(punch_hole, offset0, length0);
|
||||
if (err0) {
|
||||
return err0;
|
||||
}
|
||||
|
||||
// Try second half
|
||||
const size_t offset1 = offset0 + length0;
|
||||
const size_t length1 = length - length0;
|
||||
if (!try_expand_tmpfs(offset1, length1, alignment)) {
|
||||
return false;
|
||||
const ZErrno err1 = fallocate(punch_hole, offset1, length1);
|
||||
if (err1) {
|
||||
return err1;
|
||||
}
|
||||
|
||||
return true;
|
||||
// Success
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const {
|
||||
assert(length > 0, "Invalid length");
|
||||
assert(is_aligned(length, alignment), "Invalid length");
|
||||
ZErrno ZBackingFile::fallocate(bool punch_hole, size_t offset, size_t length) {
|
||||
assert(is_aligned(offset, _block_size), "Invalid offset");
|
||||
assert(is_aligned(length, _block_size), "Invalid length");
|
||||
|
||||
ZErrno err = posix_fallocate(_fd, offset, length);
|
||||
|
||||
if (err == EINTR && length > alignment) {
|
||||
// Calling posix_fallocate() with a large length can take a long
|
||||
// time to complete. When running profilers, such as VTune, this
|
||||
// syscall will be constantly interrupted by signals. Expanding
|
||||
// the file in smaller steps avoids this problem.
|
||||
return try_split_and_expand_tmpfs(offset, length, alignment);
|
||||
const ZErrno err = punch_hole ? fallocate_punch_hole(offset, length) : fallocate_fill_hole(offset, length);
|
||||
if (err == EINTR && length > _block_size) {
|
||||
// Calling fallocate(2) with a large length can take a long time to
|
||||
// complete. When running profilers, such as VTune, this syscall will
|
||||
// be constantly interrupted by signals. Expanding the file in smaller
|
||||
// steps avoids this problem.
|
||||
return split_and_fallocate(punch_hole, offset, length);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
bool ZBackingFile::commit_inner(size_t offset, size_t length) {
|
||||
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
||||
offset / M, (offset + length) / M, length / M);
|
||||
|
||||
retry:
|
||||
const ZErrno err = fallocate(false /* punch_hole */, offset, length);
|
||||
if (err) {
|
||||
log_error(gc)("Failed to allocate backing file (%s)", err.to_string());
|
||||
if (err == ENOSPC && !is_init_completed() && is_hugetlbfs() && z_fallocate_hugetlbfs_attempts-- > 0) {
|
||||
// If we fail to allocate during initialization, due to lack of space on
|
||||
// the hugetlbfs filesystem, then we wait and retry a few times before
|
||||
// giving up. Otherwise there is a risk that running JVMs back-to-back
|
||||
// will fail, since there is a delay between process termination and the
|
||||
// huge pages owned by that process being returned to the huge page pool
|
||||
// and made available for new allocations.
|
||||
log_debug(gc, init)("Failed to commit memory (%s), retrying", err.to_string());
|
||||
|
||||
// Wait and retry in one second, in the hope that huge pages will be
|
||||
// available by then.
|
||||
sleep(1);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
// Failed
|
||||
log_error(gc)("Failed to commit memory (%s)", err.to_string());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Success
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length) const {
|
||||
assert(is_tmpfs(), "Wrong filesystem");
|
||||
return try_expand_tmpfs(offset, length, os::vm_page_size());
|
||||
}
|
||||
|
||||
bool ZBackingFile::try_expand_hugetlbfs(size_t offset, size_t length) const {
|
||||
assert(is_hugetlbfs(), "Wrong filesystem");
|
||||
|
||||
// Prior to kernel 4.3, hugetlbfs did not support posix_fallocate().
|
||||
// Instead of posix_fallocate() we can use a well-known workaround,
|
||||
// which involves truncating the file to requested size and then try
|
||||
// to map it to verify that there are enough huge pages available to
|
||||
// back it.
|
||||
while (ftruncate(_fd, offset + length) == -1) {
|
||||
ZErrno err;
|
||||
if (err != EINTR) {
|
||||
log_error(gc)("Failed to truncate backing file (%s)", err.to_string());
|
||||
return false;
|
||||
}
|
||||
size_t ZBackingFile::commit(size_t offset, size_t length) {
|
||||
// Try to commit the whole region
|
||||
if (commit_inner(offset, length)) {
|
||||
// Success
|
||||
return length;
|
||||
}
|
||||
|
||||
// If we fail mapping during initialization, i.e. when we are pre-mapping
|
||||
// the heap, then we wait and retry a few times before giving up. Otherwise
|
||||
// there is a risk that running JVMs back-to-back will fail, since there
|
||||
// is a delay between process termination and the huge pages owned by that
|
||||
// process being returned to the huge page pool and made available for new
|
||||
// allocations.
|
||||
void* addr = MAP_FAILED;
|
||||
const int max_attempts = 5;
|
||||
for (int attempt = 1; attempt <= max_attempts; attempt++) {
|
||||
addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
|
||||
if (addr != MAP_FAILED || !_hugetlbfs_mmap_retry) {
|
||||
// Mapping was successful or mmap retry is disabled
|
||||
break;
|
||||
}
|
||||
|
||||
ZErrno err;
|
||||
log_debug(gc)("Failed to map backing file (%s), attempt %d of %d",
|
||||
err.to_string(), attempt, max_attempts);
|
||||
|
||||
// Wait and retry in one second, in the hope that
|
||||
// huge pages will be available by then.
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
// Disable mmap retry from now on
|
||||
if (_hugetlbfs_mmap_retry) {
|
||||
_hugetlbfs_mmap_retry = false;
|
||||
}
|
||||
|
||||
if (addr == MAP_FAILED) {
|
||||
// Not enough huge pages left
|
||||
ZErrno err;
|
||||
log_error(gc)("Failed to map backing file (%s)", err.to_string());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Successful mapping, unmap again. From now on the pages we mapped
|
||||
// will be reserved for this file.
|
||||
if (munmap(addr, length) == -1) {
|
||||
ZErrno err;
|
||||
log_error(gc)("Failed to unmap backing file (%s)", err.to_string());
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ZBackingFile::try_expand_tmpfs_or_hugetlbfs(size_t offset, size_t length, size_t alignment) const {
|
||||
assert(is_aligned(offset, alignment), "Invalid offset");
|
||||
assert(is_aligned(length, alignment), "Invalid length");
|
||||
|
||||
log_debug(gc)("Expanding heap from " SIZE_FORMAT "M to " SIZE_FORMAT "M", offset / M, (offset + length) / M);
|
||||
|
||||
return is_hugetlbfs() ? try_expand_hugetlbfs(offset, length) : try_expand_tmpfs(offset, length);
|
||||
}
|
||||
|
||||
size_t ZBackingFile::try_expand(size_t offset, size_t length, size_t alignment) const {
|
||||
// Failed, try to commit as much as possible
|
||||
size_t start = offset;
|
||||
size_t end = offset + length;
|
||||
|
||||
// Try to expand
|
||||
if (try_expand_tmpfs_or_hugetlbfs(start, length, alignment)) {
|
||||
// Success
|
||||
return end;
|
||||
}
|
||||
|
||||
// Failed, try to expand as much as possible
|
||||
for (;;) {
|
||||
length = align_down((end - start) / 2, alignment);
|
||||
if (length < alignment) {
|
||||
// Done, don't expand more
|
||||
return start;
|
||||
length = align_down((end - start) / 2, ZGranuleSize);
|
||||
if (length < ZGranuleSize) {
|
||||
// Done, don't commit more
|
||||
return start - offset;
|
||||
}
|
||||
|
||||
if (try_expand_tmpfs_or_hugetlbfs(start, length, alignment)) {
|
||||
// Success, try expand more
|
||||
if (commit_inner(start, length)) {
|
||||
// Success, try commit more
|
||||
start += length;
|
||||
} else {
|
||||
// Failed, try expand less
|
||||
// Failed, try commit less
|
||||
end -= length;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZBackingFile::uncommit(size_t offset, size_t length) {
|
||||
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
||||
offset / M, (offset + length) / M, length / M);
|
||||
|
||||
const ZErrno err = fallocate(true /* punch_hole */, offset, length);
|
||||
if (err) {
|
||||
log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
|
||||
return 0;
|
||||
}
|
||||
|
||||
return length;
|
||||
}
|
||||
|
@ -26,12 +26,14 @@
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class ZErrno;
|
||||
|
||||
class ZBackingFile {
|
||||
private:
|
||||
static bool _hugetlbfs_mmap_retry;
|
||||
|
||||
int _fd;
|
||||
size_t _size;
|
||||
uint64_t _filesystem;
|
||||
size_t _block_size;
|
||||
size_t _available;
|
||||
bool _initialized;
|
||||
|
||||
@ -43,11 +45,17 @@ private:
|
||||
bool is_hugetlbfs() const;
|
||||
bool tmpfs_supports_transparent_huge_pages() const;
|
||||
|
||||
bool try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
|
||||
bool try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
|
||||
bool try_expand_tmpfs(size_t offset, size_t length) const;
|
||||
bool try_expand_hugetlbfs(size_t offset, size_t length) const;
|
||||
bool try_expand_tmpfs_or_hugetlbfs(size_t offset, size_t length, size_t alignment) const;
|
||||
ZErrno fallocate_compat_ftruncate(size_t size) const;
|
||||
ZErrno fallocate_compat_mmap(size_t offset, size_t length, bool reserve_only) const;
|
||||
ZErrno fallocate_compat_pwrite(size_t offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole_compat(size_t offset, size_t length);
|
||||
ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length);
|
||||
ZErrno fallocate_fill_hole(size_t offset, size_t length);
|
||||
ZErrno fallocate_punch_hole(size_t offset, size_t length);
|
||||
ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length);
|
||||
ZErrno fallocate(bool punch_hole, size_t offset, size_t length);
|
||||
|
||||
bool commit_inner(size_t offset, size_t length);
|
||||
|
||||
public:
|
||||
ZBackingFile();
|
||||
@ -55,9 +63,11 @@ public:
|
||||
bool is_initialized() const;
|
||||
|
||||
int fd() const;
|
||||
size_t size() const;
|
||||
size_t available() const;
|
||||
|
||||
size_t try_expand(size_t offset, size_t length, size_t alignment) const;
|
||||
size_t commit(size_t offset, size_t length);
|
||||
size_t uncommit(size_t offset, size_t length);
|
||||
};
|
||||
|
||||
#endif // OS_CPU_LINUX_X86_GC_Z_ZBACKINGFILE_LINUX_X86_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -72,7 +72,7 @@ void ZBackingPath::get_mountpoints(const char* filesystem, ZArray<char*>* mountp
|
||||
FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r");
|
||||
if (fd == NULL) {
|
||||
ZErrno err;
|
||||
log_error(gc, init)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
|
||||
log_error(gc)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
|
||||
return;
|
||||
}
|
||||
|
||||
@ -113,10 +113,10 @@ char* ZBackingPath::find_preferred_mountpoint(const char* filesystem,
|
||||
}
|
||||
|
||||
// Preferred mount point not found
|
||||
log_error(gc, init)("More than one %s filesystem found:", filesystem);
|
||||
log_error(gc)("More than one %s filesystem found:", filesystem);
|
||||
ZArrayIterator<char*> iter2(mountpoints);
|
||||
for (char* mountpoint; iter2.next(&mountpoint);) {
|
||||
log_error(gc, init)(" %s", mountpoint);
|
||||
log_error(gc)(" %s", mountpoint);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@ -130,7 +130,7 @@ char* ZBackingPath::find_mountpoint(const char* filesystem, const char** preferr
|
||||
|
||||
if (mountpoints.size() == 0) {
|
||||
// No mount point found
|
||||
log_error(gc, init)("Failed to find an accessible %s filesystem", filesystem);
|
||||
log_error(gc)("Failed to find an accessible %s filesystem", filesystem);
|
||||
} else if (mountpoints.size() == 1) {
|
||||
// One mount point found
|
||||
path = strdup(mountpoints.at(0));
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "gc/z/zPhysicalMemory.inline.hpp"
|
||||
#include "gc/z/zPhysicalMemoryBacking_linux_x86.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
@ -40,7 +41,11 @@
|
||||
#include <sys/mman.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
//
|
||||
// Support for building on older Linux systems
|
||||
//
|
||||
|
||||
// madvise(2) flags
|
||||
#ifndef MADV_HUGEPAGE
|
||||
#define MADV_HUGEPAGE 14
|
||||
#endif
|
||||
@ -48,22 +53,37 @@
|
||||
// Proc file entry for max map mount
|
||||
#define ZFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count"
|
||||
|
||||
ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
|
||||
_manager(),
|
||||
_file() {
|
||||
bool ZPhysicalMemoryBacking::is_initialized() const {
|
||||
return _file.is_initialized();
|
||||
}
|
||||
|
||||
if (!_file.is_initialized()) {
|
||||
void ZPhysicalMemoryBacking::warn_available_space(size_t max) const {
|
||||
// Note that the available space on a tmpfs or a hugetlbfs filesystem
|
||||
// will be zero if no size limit was specified when it was mounted.
|
||||
const size_t available = _file.available();
|
||||
if (available == 0) {
|
||||
// No size limit set, skip check
|
||||
log_info(gc, init)("Available space on backing filesystem: N/A");
|
||||
return;
|
||||
}
|
||||
|
||||
// Check and warn if max map count is too low
|
||||
check_max_map_count(max_capacity);
|
||||
log_info(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", available / M);
|
||||
|
||||
// Check and warn if available space on filesystem is too low
|
||||
check_available_space_on_filesystem(max_capacity);
|
||||
// Warn if the filesystem doesn't currently have enough space available to hold
|
||||
// the max heap size. The max heap size will be capped if we later hit this limit
|
||||
// when trying to expand the heap.
|
||||
if (available < max) {
|
||||
log_warning(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning(gc)("Not enough space available on the backing filesystem to hold the current max Java heap");
|
||||
log_warning(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly "
|
||||
"(available", max / M);
|
||||
log_warning(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem "
|
||||
"size could", available / M);
|
||||
log_warning(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity) const {
|
||||
void ZPhysicalMemoryBacking::warn_max_map_count(size_t max) const {
|
||||
const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
|
||||
FILE* const file = fopen(filename, "r");
|
||||
if (file == NULL) {
|
||||
@ -86,62 +106,101 @@ void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity) const {
|
||||
// However, ZGC tends to create the most mappings and dominate the total count.
|
||||
// In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
|
||||
// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
|
||||
const size_t required_max_map_count = (max_capacity / ZGranuleSize) * 3 * 1.2;
|
||||
const size_t required_max_map_count = (max / ZGranuleSize) * 3 * 1.2;
|
||||
if (actual_max_map_count < required_max_map_count) {
|
||||
log_warning(gc, init)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning(gc, init)("The system limit on number of memory mappings per process might be too low "
|
||||
"for the given");
|
||||
log_warning(gc, init)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
|
||||
max_capacity / M, filename);
|
||||
log_warning(gc, init)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing "
|
||||
"execution with the current", required_max_map_count, actual_max_map_count);
|
||||
log_warning(gc, init)("limit could lead to a fatal error, due to failure to map memory.");
|
||||
log_warning(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning(gc)("The system limit on number of memory mappings per process might be too low for the given");
|
||||
log_warning(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
|
||||
max / M, filename);
|
||||
log_warning(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution "
|
||||
"with the current", required_max_map_count, actual_max_map_count);
|
||||
log_warning(gc)("limit could lead to a fatal error, due to failure to map memory.");
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::check_available_space_on_filesystem(size_t max_capacity) const {
|
||||
// Note that the available space on a tmpfs or a hugetlbfs filesystem
|
||||
// will be zero if no size limit was specified when it was mounted.
|
||||
const size_t available = _file.available();
|
||||
if (available == 0) {
|
||||
// No size limit set, skip check
|
||||
log_info(gc, init)("Available space on backing filesystem: N/A");
|
||||
return;
|
||||
}
|
||||
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
|
||||
// Warn if available space is too low
|
||||
warn_available_space(max);
|
||||
|
||||
log_info(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M",
|
||||
available / M);
|
||||
|
||||
// Warn if the filesystem doesn't currently have enough space available to hold
|
||||
// the max heap size. The max heap size will be capped if we later hit this limit
|
||||
// when trying to expand the heap.
|
||||
if (available < max_capacity) {
|
||||
log_warning(gc, init)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning(gc, init)("Not enough space available on the backing filesystem to hold the current "
|
||||
"max Java heap");
|
||||
log_warning(gc, init)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem "
|
||||
"accordingly (available", max_capacity / M);
|
||||
log_warning(gc, init)("space is currently " SIZE_FORMAT "M). Continuing execution with the current "
|
||||
"filesystem size could", available / M);
|
||||
log_warning(gc, init)("lead to a premature OutOfMemoryError being thrown, due to failure to map "
|
||||
"memory.");
|
||||
}
|
||||
// Warn if max map count is too low
|
||||
warn_max_map_count(max);
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryBacking::is_initialized() const {
|
||||
return _file.is_initialized();
|
||||
bool ZPhysicalMemoryBacking::supports_uncommit() {
|
||||
assert(!is_init_completed(), "Invalid state");
|
||||
assert(_file.size() >= ZGranuleSize, "Invalid size");
|
||||
|
||||
// Test if uncommit is supported by uncommitting and then re-committing a granule
|
||||
return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::try_expand(size_t old_capacity, size_t new_capacity) {
|
||||
assert(old_capacity < new_capacity, "Invalid old/new capacity");
|
||||
size_t ZPhysicalMemoryBacking::commit(size_t size) {
|
||||
size_t committed = 0;
|
||||
|
||||
const size_t capacity = _file.try_expand(old_capacity, new_capacity - old_capacity, ZGranuleSize);
|
||||
if (capacity > old_capacity) {
|
||||
// Add expanded capacity to free list
|
||||
_manager.free(old_capacity, capacity - old_capacity);
|
||||
// Fill holes in the backing file
|
||||
while (committed < size) {
|
||||
size_t allocated = 0;
|
||||
const size_t remaining = size - committed;
|
||||
const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
|
||||
if (start == UINTPTR_MAX) {
|
||||
// No holes to commit
|
||||
break;
|
||||
}
|
||||
|
||||
// Try commit hole
|
||||
const size_t filled = _file.commit(start, allocated);
|
||||
if (filled > 0) {
|
||||
// Successful or partialy successful
|
||||
_committed.free(start, filled);
|
||||
committed += filled;
|
||||
}
|
||||
if (filled < allocated) {
|
||||
// Failed or partialy failed
|
||||
_uncommitted.free(start + filled, allocated - filled);
|
||||
return committed;
|
||||
}
|
||||
}
|
||||
|
||||
return capacity;
|
||||
// Expand backing file
|
||||
if (committed < size) {
|
||||
const size_t remaining = size - committed;
|
||||
const uintptr_t start = _file.size();
|
||||
const size_t expanded = _file.commit(start, remaining);
|
||||
if (expanded > 0) {
|
||||
// Successful or partialy successful
|
||||
_committed.free(start, expanded);
|
||||
committed += expanded;
|
||||
}
|
||||
}
|
||||
|
||||
return committed;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::uncommit(size_t size) {
|
||||
size_t uncommitted = 0;
|
||||
|
||||
// Punch holes in backing file
|
||||
while (uncommitted < size) {
|
||||
size_t allocated = 0;
|
||||
const size_t remaining = size - uncommitted;
|
||||
const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
|
||||
assert(start != UINTPTR_MAX, "Allocation should never fail");
|
||||
|
||||
// Try punch hole
|
||||
const size_t punched = _file.uncommit(start, allocated);
|
||||
if (punched > 0) {
|
||||
// Successful or partialy successful
|
||||
_uncommitted.free(start, punched);
|
||||
uncommitted += punched;
|
||||
}
|
||||
if (punched < allocated) {
|
||||
// Failed or partialy failed
|
||||
_committed.free(start + punched, allocated - punched);
|
||||
return uncommitted;
|
||||
}
|
||||
}
|
||||
|
||||
return uncommitted;
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
|
||||
@ -151,7 +210,7 @@ ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
|
||||
|
||||
// Allocate segments
|
||||
for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
|
||||
const uintptr_t start = _manager.alloc_from_front(ZGranuleSize);
|
||||
const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
|
||||
assert(start != UINTPTR_MAX, "Allocation should never fail");
|
||||
pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
|
||||
}
|
||||
@ -159,13 +218,13 @@ ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
|
||||
return pmem;
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::free(ZPhysicalMemory pmem) {
|
||||
void ZPhysicalMemoryBacking::free(const ZPhysicalMemory& pmem) {
|
||||
const size_t nsegments = pmem.nsegments();
|
||||
|
||||
// Free segments
|
||||
for (size_t i = 0; i < nsegments; i++) {
|
||||
const ZPhysicalMemorySegment segment = pmem.segment(i);
|
||||
_manager.free(segment.start(), segment.size());
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
_committed.free(segment.start(), segment.size());
|
||||
}
|
||||
}
|
||||
|
||||
@ -178,10 +237,10 @@ void ZPhysicalMemoryBacking::map_failed(ZErrno err) const {
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::advise_view(uintptr_t addr, size_t size) const {
|
||||
if (madvise((void*)addr, size, MADV_HUGEPAGE) == -1) {
|
||||
void ZPhysicalMemoryBacking::advise_view(uintptr_t addr, size_t size, int advice) const {
|
||||
if (madvise((void*)addr, size, advice) == -1) {
|
||||
ZErrno err;
|
||||
log_error(gc)("Failed to advise use of transparent huge pages (%s)", err.to_string());
|
||||
log_error(gc)("Failed to advise on memory (advice %d, %s)", advice, err.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
@ -190,41 +249,42 @@ void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
|
||||
os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const {
|
||||
void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const {
|
||||
const size_t nsegments = pmem.nsegments();
|
||||
size_t size = 0;
|
||||
|
||||
// Map segments
|
||||
for (size_t i = 0; i < nsegments; i++) {
|
||||
const ZPhysicalMemorySegment segment = pmem.segment(i);
|
||||
const size_t size = segment.size();
|
||||
const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _file.fd(), segment.start());
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
const uintptr_t segment_addr = addr + size;
|
||||
const void* const res = mmap((void*)segment_addr, segment.size(), PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _file.fd(), segment.start());
|
||||
if (res == MAP_FAILED) {
|
||||
ZErrno err;
|
||||
map_failed(err);
|
||||
}
|
||||
|
||||
// Advise on use of transparent huge pages before touching it
|
||||
if (ZLargePages::is_transparent()) {
|
||||
advise_view(addr, size);
|
||||
}
|
||||
size += segment.size();
|
||||
}
|
||||
|
||||
// NUMA interleave memory before touching it
|
||||
ZNUMA::memory_interleave(addr, size);
|
||||
// Advise on use of transparent huge pages before touching it
|
||||
if (ZLargePages::is_transparent()) {
|
||||
advise_view(addr, size, MADV_HUGEPAGE);
|
||||
}
|
||||
|
||||
if (pretouch) {
|
||||
pretouch_view(addr, size);
|
||||
}
|
||||
// NUMA interleave memory before touching it
|
||||
ZNUMA::memory_interleave(addr, size);
|
||||
|
||||
addr += size;
|
||||
// Pre-touch memory
|
||||
if (pretouch) {
|
||||
pretouch_view(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const {
|
||||
void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
|
||||
// Note that we must keep the address space reservation intact and just detach
|
||||
// the backing memory. For this reason we map a new anonymous, non-accessible
|
||||
// and non-reserved page over the mapping instead of actually unmapping.
|
||||
const size_t size = pmem.size();
|
||||
const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
|
||||
const void* const res = mmap((void*)addr, pmem.size(), PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
|
||||
if (res == MAP_FAILED) {
|
||||
ZErrno err;
|
||||
map_failed(err);
|
||||
@ -232,11 +292,11 @@ void ZPhysicalMemoryBacking::unmap_view(ZPhysicalMemory pmem, uintptr_t addr) co
|
||||
}
|
||||
|
||||
uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
|
||||
// From an NMT point of view we treat the first heap mapping (marked0) as committed
|
||||
// From an NMT point of view we treat the first heap view (marked0) as committed
|
||||
return ZAddress::marked0(offset);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const {
|
||||
void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
if (ZVerifyViews) {
|
||||
// Map good view
|
||||
map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
|
||||
@ -248,7 +308,7 @@ void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const {
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const {
|
||||
void ZPhysicalMemoryBacking::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
if (ZVerifyViews) {
|
||||
// Unmap good view
|
||||
unmap_view(pmem, ZAddress::good(offset));
|
||||
@ -260,13 +320,13 @@ void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::debug_map(ZPhysicalMemory pmem, uintptr_t offset) const {
|
||||
void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
// Map good view
|
||||
assert(ZVerifyViews, "Should be enabled");
|
||||
map_view(pmem, ZAddress::good(offset), false /* pretouch */);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::debug_unmap(ZPhysicalMemory pmem, uintptr_t offset) const {
|
||||
void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
|
||||
// Unmap good view
|
||||
assert(ZVerifyViews, "Should be enabled");
|
||||
unmap_view(pmem, ZAddress::good(offset));
|
||||
|
@ -32,35 +32,39 @@ class ZPhysicalMemory;
|
||||
|
||||
class ZPhysicalMemoryBacking {
|
||||
private:
|
||||
ZMemoryManager _manager;
|
||||
ZBackingFile _file;
|
||||
ZMemoryManager _committed;
|
||||
ZMemoryManager _uncommitted;
|
||||
|
||||
void warn_available_space(size_t max) const;
|
||||
void warn_max_map_count(size_t max) const;
|
||||
|
||||
void check_max_map_count(size_t max_capacity) const;
|
||||
void check_available_space_on_filesystem(size_t max_capacity) const;
|
||||
void map_failed(ZErrno err) const;
|
||||
|
||||
void advise_view(uintptr_t addr, size_t size) const;
|
||||
void advise_view(uintptr_t addr, size_t size, int advice) const;
|
||||
void pretouch_view(uintptr_t addr, size_t size) const;
|
||||
void map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const;
|
||||
void unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const;
|
||||
void map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const;
|
||||
void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
|
||||
|
||||
public:
|
||||
ZPhysicalMemoryBacking(size_t max_capacity);
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
size_t try_expand(size_t old_capacity, size_t new_capacity);
|
||||
void warn_commit_limits(size_t max) const;
|
||||
bool supports_uncommit();
|
||||
|
||||
size_t commit(size_t size);
|
||||
size_t uncommit(size_t size);
|
||||
|
||||
ZPhysicalMemory alloc(size_t size);
|
||||
void free(ZPhysicalMemory pmem);
|
||||
void free(const ZPhysicalMemory& pmem);
|
||||
|
||||
uintptr_t nmt_address(uintptr_t offset) const;
|
||||
|
||||
void map(ZPhysicalMemory pmem, uintptr_t offset) const;
|
||||
void unmap(ZPhysicalMemory pmem, uintptr_t offset) const;
|
||||
void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
|
||||
void debug_map(ZPhysicalMemory pmem, uintptr_t offset) const;
|
||||
void debug_unmap(ZPhysicalMemory pmem, uintptr_t offset) const;
|
||||
void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
|
||||
};
|
||||
|
||||
#endif // OS_CPU_LINUX_X86_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP
|
||||
|
@ -278,7 +278,7 @@ void AOTCompiledMethod::metadata_do(MetadataClosure* f) {
|
||||
}
|
||||
}
|
||||
} else if (iter.type() == relocInfo::static_call_type ||
|
||||
iter.type() == relocInfo::opt_virtual_call_type){
|
||||
iter.type() == relocInfo::opt_virtual_call_type) {
|
||||
// Check Method* in AOT c2i stub for other calls.
|
||||
Metadata* meta = (Metadata*)nativeLoadGot_at(nativePltCall_at(iter.addr())->plt_c2i_stub())->data();
|
||||
if (meta != NULL) {
|
||||
|
@ -405,8 +405,7 @@ ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
|
||||
// This is a name from a signature. Strip off the trimmings.
|
||||
// Call recursive to keep scope of strippedsym.
|
||||
TempNewSymbol strippedsym = SymbolTable::new_symbol(sym->as_utf8()+1,
|
||||
sym->utf8_length()-2,
|
||||
KILL_COMPILE_ON_FATAL_(_unloaded_ciinstance_klass));
|
||||
sym->utf8_length()-2);
|
||||
ciSymbol* strippedname = get_symbol(strippedsym);
|
||||
return get_klass_by_name_impl(accessing_klass, cpool, strippedname, require_local);
|
||||
}
|
||||
@ -459,8 +458,7 @@ ciKlass* ciEnv::get_klass_by_name_impl(ciKlass* accessing_klass,
|
||||
// We have an unloaded array.
|
||||
// Build it on the fly if the element class exists.
|
||||
TempNewSymbol elem_sym = SymbolTable::new_symbol(sym->as_utf8()+1,
|
||||
sym->utf8_length()-1,
|
||||
KILL_COMPILE_ON_FATAL_(fail_type));
|
||||
sym->utf8_length()-1);
|
||||
|
||||
// Get element ciKlass recursively.
|
||||
ciKlass* elem_klass =
|
||||
|
@ -336,7 +336,7 @@ class CompileReplay : public StackObj {
|
||||
Symbol* parse_symbol(TRAPS) {
|
||||
const char* str = parse_escaped_string();
|
||||
if (str != NULL) {
|
||||
Symbol* sym = SymbolTable::lookup(str, (int)strlen(str), CHECK_NULL);
|
||||
Symbol* sym = SymbolTable::new_symbol(str);
|
||||
return sym;
|
||||
}
|
||||
return NULL;
|
||||
@ -345,7 +345,7 @@ class CompileReplay : public StackObj {
|
||||
// Parse a valid klass name and look it up
|
||||
Klass* parse_klass(TRAPS) {
|
||||
const char* str = parse_escaped_string();
|
||||
Symbol* klass_name = SymbolTable::lookup(str, (int)strlen(str), CHECK_NULL);
|
||||
Symbol* klass_name = SymbolTable::new_symbol(str);
|
||||
if (klass_name != NULL) {
|
||||
Klass* k = NULL;
|
||||
if (_iklass != NULL) {
|
||||
@ -371,7 +371,7 @@ class CompileReplay : public StackObj {
|
||||
|
||||
// Lookup a klass
|
||||
Klass* resolve_klass(const char* klass, TRAPS) {
|
||||
Symbol* klass_name = SymbolTable::lookup(klass, (int)strlen(klass), CHECK_NULL);
|
||||
Symbol* klass_name = SymbolTable::new_symbol(klass);
|
||||
return SystemDictionary::resolve_or_fail(klass_name, _loader, _protection_domain, true, THREAD);
|
||||
}
|
||||
|
||||
@ -800,8 +800,8 @@ class CompileReplay : public StackObj {
|
||||
const char* field_name = parse_escaped_string();
|
||||
const char* field_signature = parse_string();
|
||||
fieldDescriptor fd;
|
||||
Symbol* name = SymbolTable::lookup(field_name, (int)strlen(field_name), CHECK);
|
||||
Symbol* sig = SymbolTable::lookup(field_signature, (int)strlen(field_signature), CHECK);
|
||||
Symbol* name = SymbolTable::new_symbol(field_name);
|
||||
Symbol* sig = SymbolTable::new_symbol(field_signature);
|
||||
if (!k->find_local_field(name, sig, &fd) ||
|
||||
!fd.is_static() ||
|
||||
fd.has_initial_value()) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -58,16 +58,9 @@ ciSignature::ciSignature(ciKlass* accessing_klass, const constantPoolHandle& cpo
|
||||
if (!ss.is_object()) {
|
||||
type = ciType::make(ss.type());
|
||||
} else {
|
||||
Symbol* name = ss.as_symbol(THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
type = ss.is_array() ? (ciType*)ciEnv::unloaded_ciobjarrayklass()
|
||||
: (ciType*)ciEnv::unloaded_ciinstance_klass();
|
||||
env->record_out_of_memory_failure();
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
} else {
|
||||
ciSymbol* klass_name = env->get_symbol(name);
|
||||
type = env->get_klass_by_name_impl(_accessing_klass, cpool, klass_name, false);
|
||||
}
|
||||
Symbol* name = ss.as_symbol();
|
||||
ciSymbol* klass_name = env->get_symbol(name);
|
||||
type = env->get_klass_by_name_impl(_accessing_klass, cpool, klass_name, false);
|
||||
}
|
||||
_types->append(type);
|
||||
if (ss.at_return_type()) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -132,12 +132,7 @@ const char* ciSymbol::as_klass_external_name() const {
|
||||
// Make a ciSymbol from a C string (implementation).
|
||||
ciSymbol* ciSymbol::make_impl(const char* s) {
|
||||
EXCEPTION_CONTEXT;
|
||||
TempNewSymbol sym = SymbolTable::new_symbol(s, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
CURRENT_THREAD_ENV->record_out_of_memory_failure();
|
||||
return ciEnv::_unloaded_cisymbol;
|
||||
}
|
||||
TempNewSymbol sym = SymbolTable::new_symbol(s);
|
||||
return CURRENT_THREAD_ENV->get_symbol(sym);
|
||||
}
|
||||
|
||||
|
@ -335,8 +335,7 @@ void ClassFileParser::parse_constant_pool_entries(const ClassFileStream* const s
|
||||
names,
|
||||
lengths,
|
||||
indices,
|
||||
hashValues,
|
||||
CHECK);
|
||||
hashValues);
|
||||
names_count = 0;
|
||||
}
|
||||
} else {
|
||||
@ -373,8 +372,7 @@ void ClassFileParser::parse_constant_pool_entries(const ClassFileStream* const s
|
||||
names,
|
||||
lengths,
|
||||
indices,
|
||||
hashValues,
|
||||
CHECK);
|
||||
hashValues);
|
||||
}
|
||||
|
||||
// Copy _current pointer of local copy back to stream.
|
||||
@ -823,7 +821,7 @@ void ClassFileParser::patch_constant_pool(ConstantPool* cp,
|
||||
guarantee_property(java_lang_String::is_instance(patch()),
|
||||
"Illegal class patch at %d in class file %s",
|
||||
index, CHECK);
|
||||
Symbol* const name = java_lang_String::as_symbol(patch(), CHECK);
|
||||
Symbol* const name = java_lang_String::as_symbol(patch());
|
||||
patch_class(cp, index, NULL, name);
|
||||
}
|
||||
break;
|
||||
@ -5723,7 +5721,7 @@ void ClassFileParser::prepend_host_package_name(const InstanceKlass* unsafe_anon
|
||||
// The new class name is created with a refcount of one. When installed into the InstanceKlass,
|
||||
// it'll be two and when the ClassFileParser destructor runs, it'll go back to one and get deleted
|
||||
// when the class is unloaded.
|
||||
_class_name = SymbolTable::new_symbol(new_anon_name, symbol_len, CHECK);
|
||||
_class_name = SymbolTable::new_symbol(new_anon_name, symbol_len);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -326,8 +326,7 @@ InstanceKlass* ClassListParser::load_class_from_source(Symbol* class_name, TRAPS
|
||||
}
|
||||
|
||||
Klass* ClassListParser::load_current_class(TRAPS) {
|
||||
TempNewSymbol class_name_symbol = SymbolTable::new_symbol(_class_name, THREAD);
|
||||
guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol.");
|
||||
TempNewSymbol class_name_symbol = SymbolTable::new_symbol(_class_name);
|
||||
|
||||
Klass *klass = NULL;
|
||||
if (!is_loading_from_source()) {
|
||||
|
@ -244,7 +244,7 @@ PackageEntry* ClassLoader::get_package_entry(const char* class_name, ClassLoader
|
||||
return NULL;
|
||||
}
|
||||
PackageEntryTable* pkgEntryTable = loader_data->packages();
|
||||
TempNewSymbol pkg_symbol = SymbolTable::new_symbol(pkg_name, CHECK_NULL);
|
||||
TempNewSymbol pkg_symbol = SymbolTable::new_symbol(pkg_name);
|
||||
return pkgEntryTable->lookup_only(pkg_symbol);
|
||||
}
|
||||
|
||||
@ -646,7 +646,7 @@ void ClassLoader::setup_patch_mod_entries() {
|
||||
|
||||
for (int i = 0; i < num_of_entries; i++) {
|
||||
const char* module_name = (patch_mod_args->at(i))->module_name();
|
||||
Symbol* const module_sym = SymbolTable::lookup(module_name, (int)strlen(module_name), CHECK);
|
||||
Symbol* const module_sym = SymbolTable::new_symbol(module_name);
|
||||
assert(module_sym != NULL, "Failed to obtain Symbol for module name");
|
||||
ModuleClassPathList* module_cpl = new ModuleClassPathList(module_sym);
|
||||
|
||||
@ -1126,7 +1126,7 @@ bool ClassLoader::add_package(const char *fullq_class_name, s2 classpath_index,
|
||||
const char *cp = package_from_name(fullq_class_name);
|
||||
if (cp != NULL) {
|
||||
PackageEntryTable* pkg_entry_tbl = ClassLoaderData::the_null_class_loader_data()->packages();
|
||||
TempNewSymbol pkg_symbol = SymbolTable::new_symbol(cp, CHECK_false);
|
||||
TempNewSymbol pkg_symbol = SymbolTable::new_symbol(cp);
|
||||
PackageEntry* pkg_entry = pkg_entry_tbl->lookup_only(pkg_symbol);
|
||||
if (pkg_entry != NULL) {
|
||||
assert(classpath_index != -1, "Unexpected classpath_index");
|
||||
@ -1141,7 +1141,7 @@ bool ClassLoader::add_package(const char *fullq_class_name, s2 classpath_index,
|
||||
oop ClassLoader::get_system_package(const char* name, TRAPS) {
|
||||
// Look up the name in the boot loader's package entry table.
|
||||
if (name != NULL) {
|
||||
TempNewSymbol package_sym = SymbolTable::new_symbol(name, (int)strlen(name), CHECK_NULL);
|
||||
TempNewSymbol package_sym = SymbolTable::new_symbol(name);
|
||||
// Look for the package entry in the boot loader's package entry table.
|
||||
PackageEntry* package =
|
||||
ClassLoaderData::the_null_class_loader_data()->packages()->lookup_only(package_sym);
|
||||
|
@ -108,8 +108,7 @@ void ClassLoaderData::initialize_name(Handle class_loader) {
|
||||
const char* cl_instance_name = java_lang_String::as_utf8_string(cl_name);
|
||||
|
||||
if (cl_instance_name != NULL && cl_instance_name[0] != '\0') {
|
||||
// Can't throw InternalError and SymbolTable doesn't throw OOM anymore.
|
||||
_name = SymbolTable::new_symbol(cl_instance_name, CATCH);
|
||||
_name = SymbolTable::new_symbol(cl_instance_name);
|
||||
}
|
||||
}
|
||||
|
||||
@ -125,8 +124,7 @@ void ClassLoaderData::initialize_name(Handle class_loader) {
|
||||
(cl_name_and_id == NULL) ? _class_loader_klass->external_name() :
|
||||
java_lang_String::as_utf8_string(cl_name_and_id);
|
||||
assert(cl_instance_name_and_id != NULL && cl_instance_name_and_id[0] != '\0', "class loader has no name and id");
|
||||
// Can't throw InternalError and SymbolTable doesn't throw OOM anymore.
|
||||
_name_and_id = SymbolTable::new_symbol(cl_instance_name_and_id, CATCH);
|
||||
_name_and_id = SymbolTable::new_symbol(cl_instance_name_and_id);
|
||||
}
|
||||
|
||||
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_unsafe_anonymous) :
|
||||
|
@ -300,6 +300,10 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
ModuleEntryTable* modules();
|
||||
bool modules_defined() { return (_modules != NULL); }
|
||||
|
||||
// Offsets
|
||||
static ByteSize holder_offset() { return in_ByteSize(offset_of(ClassLoaderData, _holder)); }
|
||||
static ByteSize keep_alive_offset() { return in_ByteSize(offset_of(ClassLoaderData, _keep_alive)); }
|
||||
|
||||
// Loaded class dictionary
|
||||
Dictionary* dictionary() const { return _dictionary; }
|
||||
|
||||
|
@ -459,7 +459,7 @@ class MethodFamily : public ResourceObj {
|
||||
};
|
||||
|
||||
Symbol* MethodFamily::generate_no_defaults_message(TRAPS) const {
|
||||
return SymbolTable::new_symbol("No qualifying defaults found", THREAD);
|
||||
return SymbolTable::new_symbol("No qualifying defaults found");
|
||||
}
|
||||
|
||||
Symbol* MethodFamily::generate_method_message(Symbol *klass_name, Method* method, TRAPS) const {
|
||||
@ -472,7 +472,7 @@ Symbol* MethodFamily::generate_method_message(Symbol *klass_name, Method* method
|
||||
ss.write((const char*)name->bytes(), name->utf8_length());
|
||||
ss.write((const char*)signature->bytes(), signature->utf8_length());
|
||||
ss.print(" is abstract");
|
||||
return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD);
|
||||
return SymbolTable::new_symbol(ss.base(), (int)ss.size());
|
||||
}
|
||||
|
||||
Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const {
|
||||
@ -487,7 +487,7 @@ Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods
|
||||
ss.print(".");
|
||||
ss.write((const char*)name->bytes(), name->utf8_length());
|
||||
}
|
||||
return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD);
|
||||
return SymbolTable::new_symbol(ss.base(), (int)ss.size());
|
||||
}
|
||||
|
||||
|
||||
|
@ -569,19 +569,19 @@ char* java_lang_String::as_quoted_ascii(oop java_string) {
|
||||
return result;
|
||||
}
|
||||
|
||||
Symbol* java_lang_String::as_symbol(oop java_string, TRAPS) {
|
||||
Symbol* java_lang_String::as_symbol(oop java_string) {
|
||||
typeArrayOop value = java_lang_String::value(java_string);
|
||||
int length = java_lang_String::length(java_string, value);
|
||||
bool is_latin1 = java_lang_String::is_latin1(java_string);
|
||||
if (!is_latin1) {
|
||||
jchar* base = (length == 0) ? NULL : value->char_at_addr(0);
|
||||
Symbol* sym = SymbolTable::lookup_unicode(base, length, THREAD);
|
||||
Symbol* sym = SymbolTable::new_symbol(base, length);
|
||||
return sym;
|
||||
} else {
|
||||
ResourceMark rm;
|
||||
jbyte* position = (length == 0) ? NULL : value->byte_at_addr(0);
|
||||
const char* base = UNICODE::as_utf8(position, length);
|
||||
Symbol* sym = SymbolTable::lookup(base, length, THREAD);
|
||||
Symbol* sym = SymbolTable::new_symbol(base, length);
|
||||
return sym;
|
||||
}
|
||||
}
|
||||
@ -1443,7 +1443,7 @@ void java_lang_Class::print_signature(oop java_class, outputStream* st) {
|
||||
if (is_instance) st->print(";");
|
||||
}
|
||||
|
||||
Symbol* java_lang_Class::as_signature(oop java_class, bool intern_if_not_found, TRAPS) {
|
||||
Symbol* java_lang_Class::as_signature(oop java_class, bool intern_if_not_found) {
|
||||
assert(java_lang_Class::is_instance(java_class), "must be a Class object");
|
||||
Symbol* name;
|
||||
if (is_primitive(java_class)) {
|
||||
@ -1464,7 +1464,7 @@ Symbol* java_lang_Class::as_signature(oop java_class, bool intern_if_not_found,
|
||||
if (!intern_if_not_found) {
|
||||
name = SymbolTable::probe(sigstr, siglen);
|
||||
} else {
|
||||
name = SymbolTable::new_symbol(sigstr, siglen, THREAD);
|
||||
name = SymbolTable::new_symbol(sigstr, siglen);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1895,7 +1895,7 @@ Symbol* java_lang_Throwable::detail_message(oop throwable) {
|
||||
PRESERVE_EXCEPTION_MARK; // Keep original exception
|
||||
oop detailed_message = java_lang_Throwable::message(throwable);
|
||||
if (detailed_message != NULL) {
|
||||
return java_lang_String::as_symbol(detailed_message, THREAD);
|
||||
return java_lang_String::as_symbol(detailed_message);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@ -3703,7 +3703,7 @@ void java_lang_invoke_MethodType::print_signature(oop mt, outputStream* st) {
|
||||
java_lang_Class::print_signature(rtype(mt), st);
|
||||
}
|
||||
|
||||
Symbol* java_lang_invoke_MethodType::as_signature(oop mt, bool intern_if_not_found, TRAPS) {
|
||||
Symbol* java_lang_invoke_MethodType::as_signature(oop mt, bool intern_if_not_found) {
|
||||
ResourceMark rm;
|
||||
stringStream buffer(128);
|
||||
print_signature(mt, &buffer);
|
||||
@ -3713,7 +3713,7 @@ Symbol* java_lang_invoke_MethodType::as_signature(oop mt, bool intern_if_not_fou
|
||||
if (!intern_if_not_found) {
|
||||
name = SymbolTable::probe(sigstr, siglen);
|
||||
} else {
|
||||
name = SymbolTable::new_symbol(sigstr, siglen, THREAD);
|
||||
name = SymbolTable::new_symbol(sigstr, siglen);
|
||||
}
|
||||
return name;
|
||||
}
|
||||
@ -4375,11 +4375,11 @@ void JavaClasses::serialize_offsets(SerializeClosure* soc) {
|
||||
bool JavaClasses::check_offset(const char *klass_name, int hardcoded_offset, const char *field_name, const char* field_sig) {
|
||||
EXCEPTION_MARK;
|
||||
fieldDescriptor fd;
|
||||
TempNewSymbol klass_sym = SymbolTable::new_symbol(klass_name, CATCH);
|
||||
TempNewSymbol klass_sym = SymbolTable::new_symbol(klass_name);
|
||||
Klass* k = SystemDictionary::resolve_or_fail(klass_sym, true, CATCH);
|
||||
InstanceKlass* ik = InstanceKlass::cast(k);
|
||||
TempNewSymbol f_name = SymbolTable::new_symbol(field_name, CATCH);
|
||||
TempNewSymbol f_sig = SymbolTable::new_symbol(field_sig, CATCH);
|
||||
TempNewSymbol f_name = SymbolTable::new_symbol(field_name);
|
||||
TempNewSymbol f_sig = SymbolTable::new_symbol(field_sig);
|
||||
if (!ik->find_local_field(f_name, f_sig, &fd)) {
|
||||
tty->print_cr("Nonstatic field %s.%s not found", klass_name, field_name);
|
||||
return false;
|
||||
|
@ -205,7 +205,7 @@ class java_lang_String : AllStatic {
|
||||
static Handle internalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '.', '/', THREAD); }
|
||||
|
||||
// Conversion
|
||||
static Symbol* as_symbol(oop java_string, TRAPS);
|
||||
static Symbol* as_symbol(oop java_string);
|
||||
static Symbol* as_symbol_or_null(oop java_string);
|
||||
|
||||
// Testers
|
||||
@ -291,7 +291,7 @@ class java_lang_Class : AllStatic {
|
||||
static Klass* as_Klass_raw(oop java_class);
|
||||
static void set_klass(oop java_class, Klass* klass);
|
||||
static BasicType as_BasicType(oop java_class, Klass** reference_klass = NULL);
|
||||
static Symbol* as_signature(oop java_class, bool intern_if_not_found, TRAPS);
|
||||
static Symbol* as_signature(oop java_class, bool intern_if_not_found);
|
||||
static void print_signature(oop java_class, outputStream *st);
|
||||
static const char* as_external_name(oop java_class);
|
||||
// Testing
|
||||
@ -1166,7 +1166,7 @@ class java_lang_invoke_MethodType: AllStatic {
|
||||
static int ptype_slot_count(oop mt); // extra counts for long/double
|
||||
static int rtype_slot_count(oop mt); // extra counts for long/double
|
||||
|
||||
static Symbol* as_signature(oop mt, bool intern_if_not_found, TRAPS);
|
||||
static Symbol* as_signature(oop mt, bool intern_if_not_found);
|
||||
static void print_signature(oop mt, outputStream* st);
|
||||
|
||||
static bool is_instance(oop obj);
|
||||
|
@ -111,7 +111,7 @@ static ModuleEntry* get_module_entry(jobject module, TRAPS) {
|
||||
static PackageEntry* get_package_entry(ModuleEntry* module_entry, const char* package_name, TRAPS) {
|
||||
ResourceMark rm(THREAD);
|
||||
if (package_name == NULL) return NULL;
|
||||
TempNewSymbol pkg_symbol = SymbolTable::new_symbol(package_name, CHECK_NULL);
|
||||
TempNewSymbol pkg_symbol = SymbolTable::new_symbol(package_name);
|
||||
PackageEntryTable* package_entry_table = module_entry->loader_data()->packages();
|
||||
assert(package_entry_table != NULL, "Unexpected null package entry table");
|
||||
return package_entry_table->lookup_only(pkg_symbol);
|
||||
@ -148,7 +148,7 @@ static void define_javabase_module(jobject module, jstring version,
|
||||
const char* module_version = get_module_version(version);
|
||||
TempNewSymbol version_symbol;
|
||||
if (module_version != NULL) {
|
||||
version_symbol = SymbolTable::new_symbol(module_version, CHECK);
|
||||
version_symbol = SymbolTable::new_symbol(module_version);
|
||||
} else {
|
||||
version_symbol = NULL;
|
||||
}
|
||||
@ -160,7 +160,7 @@ static void define_javabase_module(jobject module, jstring version,
|
||||
module_location =
|
||||
java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(location));
|
||||
if (module_location != NULL) {
|
||||
location_symbol = SymbolTable::new_symbol(module_location, CHECK);
|
||||
location_symbol = SymbolTable::new_symbol(module_location);
|
||||
}
|
||||
}
|
||||
|
||||
@ -173,7 +173,7 @@ static void define_javabase_module(jobject module, jstring version,
|
||||
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
|
||||
err_msg("Invalid package name: %s for module: " JAVA_BASE_NAME, package_name));
|
||||
}
|
||||
Symbol* pkg_symbol = SymbolTable::new_symbol(package_name, CHECK);
|
||||
Symbol* pkg_symbol = SymbolTable::new_symbol(package_name);
|
||||
pkg_list->append(pkg_symbol);
|
||||
}
|
||||
|
||||
@ -345,7 +345,7 @@ void Modules::define_module(jobject module, jboolean is_open, jstring version,
|
||||
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), message);
|
||||
}
|
||||
|
||||
Symbol* pkg_symbol = SymbolTable::new_symbol(package_name, CHECK);
|
||||
Symbol* pkg_symbol = SymbolTable::new_symbol(package_name);
|
||||
pkg_list->append(pkg_symbol);
|
||||
}
|
||||
|
||||
@ -353,14 +353,14 @@ void Modules::define_module(jobject module, jboolean is_open, jstring version,
|
||||
assert(module_table != NULL, "module entry table shouldn't be null");
|
||||
|
||||
// Create symbol* entry for module name.
|
||||
TempNewSymbol module_symbol = SymbolTable::new_symbol(module_name, CHECK);
|
||||
TempNewSymbol module_symbol = SymbolTable::new_symbol(module_name);
|
||||
|
||||
bool dupl_modules = false;
|
||||
|
||||
// Create symbol* entry for module version.
|
||||
TempNewSymbol version_symbol;
|
||||
if (module_version != NULL) {
|
||||
version_symbol = SymbolTable::new_symbol(module_version, CHECK);
|
||||
version_symbol = SymbolTable::new_symbol(module_version);
|
||||
} else {
|
||||
version_symbol = NULL;
|
||||
}
|
||||
@ -372,7 +372,7 @@ void Modules::define_module(jobject module, jboolean is_open, jstring version,
|
||||
module_location =
|
||||
java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(location));
|
||||
if (module_location != NULL) {
|
||||
location_symbol = SymbolTable::new_symbol(module_location, CHECK);
|
||||
location_symbol = SymbolTable::new_symbol(module_location);
|
||||
}
|
||||
}
|
||||
|
||||
@ -657,7 +657,7 @@ jobject Modules::get_named_module(Handle h_loader, const char* package_name, TRA
|
||||
if (strlen(package_name) == 0) {
|
||||
return NULL;
|
||||
}
|
||||
TempNewSymbol package_sym = SymbolTable::new_symbol(package_name, CHECK_NULL);
|
||||
TempNewSymbol package_sym = SymbolTable::new_symbol(package_name);
|
||||
const PackageEntry* const pkg_entry =
|
||||
get_package_entry_by_name(package_sym, h_loader, THREAD);
|
||||
const ModuleEntry* const module_entry = (pkg_entry != NULL ? pkg_entry->module() : NULL);
|
||||
|
@ -92,8 +92,7 @@ VerificationType StackMapFrame::set_locals_from_arg(
|
||||
// local num may be greater than size of parameters because long/double occupies two slots
|
||||
while(!ss.at_return_type()) {
|
||||
init_local_num += _verifier->change_sig_to_verificationType(
|
||||
&ss, &_locals[init_local_num],
|
||||
CHECK_VERIFY_(verifier(), VerificationType::bogus_type()));
|
||||
&ss, &_locals[init_local_num]);
|
||||
ss.next();
|
||||
}
|
||||
_locals_size = init_local_num;
|
||||
@ -102,13 +101,12 @@ VerificationType StackMapFrame::set_locals_from_arg(
|
||||
case T_OBJECT:
|
||||
case T_ARRAY:
|
||||
{
|
||||
Symbol* sig = ss.as_symbol(CHECK_(VerificationType::bogus_type()));
|
||||
Symbol* sig = ss.as_symbol();
|
||||
if (!sig->is_permanent()) {
|
||||
// Create another symbol to save as signature stream unreferences
|
||||
// this symbol.
|
||||
Symbol *sig_copy =
|
||||
verifier()->create_temporary_symbol(sig, 0, sig->utf8_length(),
|
||||
CHECK_(VerificationType::bogus_type()));
|
||||
verifier()->create_temporary_symbol(sig, 0, sig->utf8_length());
|
||||
assert(sig_copy == sig, "symbols don't match");
|
||||
sig = sig_copy;
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ void SymbolTable::trigger_cleanup() {
|
||||
Service_lock->notify_all();
|
||||
}
|
||||
|
||||
Symbol* SymbolTable::allocate_symbol(const char* name, int len, bool c_heap, TRAPS) {
|
||||
Symbol* SymbolTable::allocate_symbol(const char* name, int len, bool c_heap) {
|
||||
assert (len <= Symbol::max_length(), "should be checked by caller");
|
||||
|
||||
Symbol* sym;
|
||||
@ -204,12 +204,12 @@ Symbol* SymbolTable::allocate_symbol(const char* name, int len, bool c_heap, TRA
|
||||
}
|
||||
if (c_heap) {
|
||||
// refcount starts as 1
|
||||
sym = new (len, THREAD) Symbol((const u1*)name, len, 1);
|
||||
sym = new (len) Symbol((const u1*)name, len, 1);
|
||||
assert(sym != NULL, "new should call vm_exit_out_of_memory if C_HEAP is exhausted");
|
||||
} else {
|
||||
// Allocate to global arena
|
||||
MutexLocker ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena
|
||||
sym = new (len, arena(), THREAD) Symbol((const u1*)name, len, PERM_REFCOUNT);
|
||||
sym = new (len, arena()) Symbol((const u1*)name, len, PERM_REFCOUNT);
|
||||
}
|
||||
return sym;
|
||||
}
|
||||
@ -317,25 +317,26 @@ Symbol* SymbolTable::lookup_common(const char* name,
|
||||
return sym;
|
||||
}
|
||||
|
||||
Symbol* SymbolTable::lookup(const char* name, int len, TRAPS) {
|
||||
Symbol* SymbolTable::new_symbol(const char* name, int len) {
|
||||
unsigned int hash = hash_symbol(name, len, SymbolTable::_alt_hash);
|
||||
Symbol* sym = SymbolTable::the_table()->lookup_common(name, len, hash);
|
||||
if (sym == NULL) {
|
||||
sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, true, CHECK_NULL);
|
||||
sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, true);
|
||||
}
|
||||
assert(sym->refcount() != 0, "lookup should have incremented the count");
|
||||
assert(sym->equals(name, len), "symbol must be properly initialized");
|
||||
return sym;
|
||||
}
|
||||
|
||||
Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) {
|
||||
Symbol* SymbolTable::new_symbol(const Symbol* sym, int begin, int end) {
|
||||
assert(begin <= end && end <= sym->utf8_length(), "just checking");
|
||||
assert(sym->refcount() != 0, "require a valid symbol");
|
||||
const char* name = (const char*)sym->base() + begin;
|
||||
int len = end - begin;
|
||||
unsigned int hash = hash_symbol(name, len, SymbolTable::_alt_hash);
|
||||
Symbol* found = SymbolTable::the_table()->lookup_common(name, len, hash);
|
||||
if (found == NULL) {
|
||||
found = SymbolTable::the_table()->do_add_if_needed(name, len, hash, true, THREAD);
|
||||
found = SymbolTable::the_table()->do_add_if_needed(name, len, hash, true);
|
||||
}
|
||||
return found;
|
||||
}
|
||||
@ -347,8 +348,8 @@ private:
|
||||
int _len;
|
||||
const char* _str;
|
||||
public:
|
||||
SymbolTableLookup(Thread* thread, const char* key, int len, uintx hash)
|
||||
: _thread(thread), _hash(hash), _len(len), _str(key) {}
|
||||
SymbolTableLookup(const char* key, int len, uintx hash)
|
||||
: _hash(hash), _len(len), _str(key) {}
|
||||
uintx get_hash() const {
|
||||
return _hash;
|
||||
}
|
||||
@ -388,7 +389,7 @@ public:
|
||||
|
||||
Symbol* SymbolTable::do_lookup(const char* name, int len, uintx hash) {
|
||||
Thread* thread = Thread::current();
|
||||
SymbolTableLookup lookup(thread, name, len, hash);
|
||||
SymbolTableLookup lookup(name, len, hash);
|
||||
SymbolTableGet stg;
|
||||
bool rehash_warning = false;
|
||||
_local_table->get(thread, lookup, stg, &rehash_warning);
|
||||
@ -406,23 +407,23 @@ Symbol* SymbolTable::lookup_only(const char* name, int len, unsigned int& hash)
|
||||
// Suggestion: Push unicode-based lookup all the way into the hashing
|
||||
// and probing logic, so there is no need for convert_to_utf8 until
|
||||
// an actual new Symbol* is created.
|
||||
Symbol* SymbolTable::lookup_unicode(const jchar* name, int utf16_length, TRAPS) {
|
||||
Symbol* SymbolTable::new_symbol(const jchar* name, int utf16_length) {
|
||||
int utf8_length = UNICODE::utf8_length((jchar*) name, utf16_length);
|
||||
char stack_buf[ON_STACK_BUFFER_LENGTH];
|
||||
if (utf8_length < (int) sizeof(stack_buf)) {
|
||||
char* chars = stack_buf;
|
||||
UNICODE::convert_to_utf8(name, utf16_length, chars);
|
||||
return lookup(chars, utf8_length, THREAD);
|
||||
return new_symbol(chars, utf8_length);
|
||||
} else {
|
||||
ResourceMark rm(THREAD);
|
||||
ResourceMark rm;
|
||||
char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);
|
||||
UNICODE::convert_to_utf8(name, utf16_length, chars);
|
||||
return lookup(chars, utf8_length, THREAD);
|
||||
return new_symbol(chars, utf8_length);
|
||||
}
|
||||
}
|
||||
|
||||
Symbol* SymbolTable::lookup_only_unicode(const jchar* name, int utf16_length,
|
||||
unsigned int& hash) {
|
||||
unsigned int& hash) {
|
||||
int utf8_length = UNICODE::utf8_length((jchar*) name, utf16_length);
|
||||
char stack_buf[ON_STACK_BUFFER_LENGTH];
|
||||
if (utf8_length < (int) sizeof(stack_buf)) {
|
||||
@ -439,32 +440,33 @@ Symbol* SymbolTable::lookup_only_unicode(const jchar* name, int utf16_length,
|
||||
|
||||
void SymbolTable::new_symbols(ClassLoaderData* loader_data, const constantPoolHandle& cp,
|
||||
int names_count, const char** names, int* lengths,
|
||||
int* cp_indices, unsigned int* hashValues, TRAPS) {
|
||||
int* cp_indices, unsigned int* hashValues) {
|
||||
bool c_heap = !loader_data->is_the_null_class_loader_data();
|
||||
for (int i = 0; i < names_count; i++) {
|
||||
const char *name = names[i];
|
||||
int len = lengths[i];
|
||||
unsigned int hash = hashValues[i];
|
||||
assert(SymbolTable::the_table()->lookup_shared(name, len, hash) == NULL, "must have checked already");
|
||||
Symbol* sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, c_heap, CHECK);
|
||||
Symbol* sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, c_heap);
|
||||
assert(sym->refcount() != 0, "lookup should have incremented the count");
|
||||
cp->symbol_at_put(cp_indices[i], sym);
|
||||
}
|
||||
}
|
||||
|
||||
Symbol* SymbolTable::do_add_if_needed(const char* name, int len, uintx hash, bool heap, TRAPS) {
|
||||
SymbolTableLookup lookup(THREAD, name, len, hash);
|
||||
Symbol* SymbolTable::do_add_if_needed(const char* name, int len, uintx hash, bool heap) {
|
||||
SymbolTableLookup lookup(name, len, hash);
|
||||
SymbolTableGet stg;
|
||||
bool clean_hint = false;
|
||||
bool rehash_warning = false;
|
||||
Symbol* sym = NULL;
|
||||
Thread* THREAD = Thread::current();
|
||||
|
||||
do {
|
||||
if (_local_table->get(THREAD, lookup, stg, &rehash_warning)) {
|
||||
sym = stg.get_res_sym();
|
||||
break;
|
||||
}
|
||||
sym = SymbolTable::the_table()->allocate_symbol(name, len, heap, THREAD);
|
||||
sym = SymbolTable::the_table()->allocate_symbol(name, len, heap);
|
||||
if (_local_table->insert(THREAD, lookup, sym, &rehash_warning, &clean_hint)) {
|
||||
break;
|
||||
}
|
||||
@ -481,12 +483,12 @@ Symbol* SymbolTable::do_add_if_needed(const char* name, int len, uintx hash, boo
|
||||
return sym;
|
||||
}
|
||||
|
||||
Symbol* SymbolTable::new_permanent_symbol(const char* name, TRAPS) {
|
||||
Symbol* SymbolTable::new_permanent_symbol(const char* name) {
|
||||
unsigned int hash = 0;
|
||||
int len = (int)strlen(name);
|
||||
Symbol* sym = SymbolTable::lookup_only(name, len, hash);
|
||||
if (sym == NULL) {
|
||||
sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, false, CHECK_NULL);
|
||||
sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, false);
|
||||
}
|
||||
if (!sym->is_permanent()) {
|
||||
sym->make_permanent();
|
||||
|
@ -43,7 +43,7 @@ class JavaThread;
|
||||
// it becomes "managed" by TempNewSymbol instances. As a handle class, TempNewSymbol
|
||||
// needs to maintain proper reference counting in context of copy semantics.
|
||||
//
|
||||
// In SymbolTable, new_symbol() and lookup() will create a Symbol* if not already in the
|
||||
// In SymbolTable, new_symbol() will create a Symbol* if not already in the
|
||||
// symbol table and add to the symbol's reference count.
|
||||
// probe() and lookup_only() will increment the refcount if symbol is found.
|
||||
class TempNewSymbol : public StackObj {
|
||||
@ -139,16 +139,19 @@ private:
|
||||
|
||||
SymbolTable();
|
||||
|
||||
Symbol* allocate_symbol(const char* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F
|
||||
Symbol* allocate_symbol(const char* name, int len, bool c_heap); // Assumes no characters larger than 0x7F
|
||||
Symbol* do_lookup(const char* name, int len, uintx hash);
|
||||
Symbol* do_add_if_needed(const char* name, int len, uintx hash, bool heap, TRAPS);
|
||||
Symbol* do_add_if_needed(const char* name, int len, uintx hash, bool heap);
|
||||
|
||||
// lookup only, won't add. Also calculate hash. Used by the ClassfileParser.
|
||||
static Symbol* lookup_only(const char* name, int len, unsigned int& hash);
|
||||
static Symbol* lookup_only_unicode(const jchar* name, int len, unsigned int& hash);
|
||||
|
||||
// Adding elements
|
||||
static void new_symbols(ClassLoaderData* loader_data,
|
||||
const constantPoolHandle& cp, int names_count,
|
||||
const char** name, int* lengths,
|
||||
int* cp_indices, unsigned int* hashValues,
|
||||
TRAPS);
|
||||
int* cp_indices, unsigned int* hashValues);
|
||||
|
||||
static Symbol* lookup_shared(const char* name, int len, unsigned int hash);
|
||||
Symbol* lookup_dynamic(const char* name, int len, unsigned int hash);
|
||||
@ -190,14 +193,6 @@ public:
|
||||
static void trigger_cleanup();
|
||||
|
||||
// Probing
|
||||
static Symbol* lookup(const char* name, int len, TRAPS);
|
||||
// lookup only, won't add. Also calculate hash.
|
||||
static Symbol* lookup_only(const char* name, int len, unsigned int& hash);
|
||||
// adds new symbol if not found
|
||||
static Symbol* lookup(const Symbol* sym, int begin, int end, TRAPS);
|
||||
// jchar (UTF16) version of lookups
|
||||
static Symbol* lookup_unicode(const jchar* name, int len, TRAPS);
|
||||
static Symbol* lookup_only_unicode(const jchar* name, int len, unsigned int& hash);
|
||||
// Needed for preloading classes in signatures when compiling.
|
||||
// Returns the symbol is already present in symbol table, otherwise
|
||||
// NULL. NO ALLOCATION IS GUARANTEED!
|
||||
@ -210,20 +205,18 @@ public:
|
||||
return lookup_only_unicode(name, len, ignore_hash);
|
||||
}
|
||||
|
||||
// Symbol creation
|
||||
static Symbol* new_symbol(const char* utf8_buffer, int length, TRAPS) {
|
||||
assert(utf8_buffer != NULL, "just checking");
|
||||
return lookup(utf8_buffer, length, THREAD);
|
||||
}
|
||||
static Symbol* new_symbol(const char* name, TRAPS) {
|
||||
return new_symbol(name, (int)strlen(name), THREAD);
|
||||
}
|
||||
static Symbol* new_symbol(const Symbol* sym, int begin, int end, TRAPS) {
|
||||
assert(begin <= end && end <= sym->utf8_length(), "just checking");
|
||||
return lookup(sym, begin, end, THREAD);
|
||||
// Symbol lookup and create if not found.
|
||||
// jchar (UTF16) version of lookup
|
||||
static Symbol* new_symbol(const jchar* name, int len);
|
||||
// char (UTF8) versions
|
||||
static Symbol* new_symbol(const Symbol* sym, int begin, int end);
|
||||
static Symbol* new_symbol(const char* utf8_buffer, int length);
|
||||
static Symbol* new_symbol(const char* name) {
|
||||
return new_symbol(name, (int)strlen(name));
|
||||
}
|
||||
|
||||
// Create a symbol in the arena for symbols that are not deleted
|
||||
static Symbol* new_permanent_symbol(const char* name, TRAPS);
|
||||
static Symbol* new_permanent_symbol(const char* name);
|
||||
|
||||
// Rehash the string table if it gets out of balance
|
||||
static void rehash_table();
|
||||
@ -245,7 +238,6 @@ public:
|
||||
static void dump(outputStream* st, bool verbose=false);
|
||||
// Debugging
|
||||
static void verify();
|
||||
static void read(const char* filename, TRAPS);
|
||||
|
||||
// Histogram
|
||||
static void print_histogram() PRODUCT_RETURN;
|
||||
|
@ -263,7 +263,7 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null_helper(Symbol* c
|
||||
ResourceMark rm(THREAD);
|
||||
// Ignore wrapping L and ;.
|
||||
TempNewSymbol name = SymbolTable::new_symbol(class_name->as_C_string() + 1,
|
||||
class_name->utf8_length() - 2, CHECK_NULL);
|
||||
class_name->utf8_length() - 2);
|
||||
return resolve_instance_class_or_null(name, class_loader, protection_domain, THREAD);
|
||||
} else {
|
||||
return resolve_instance_class_or_null(class_name, class_loader, protection_domain, THREAD);
|
||||
@ -2356,7 +2356,7 @@ Symbol* SystemDictionary::check_signature_loaders(Symbol* signature,
|
||||
SignatureStream sig_strm(signature, is_method);
|
||||
while (!sig_strm.is_done()) {
|
||||
if (sig_strm.is_object()) {
|
||||
Symbol* sig = sig_strm.as_symbol(CHECK_NULL);
|
||||
Symbol* sig = sig_strm.as_symbol();
|
||||
if (!add_loader_constraint(sig, loader1, loader2, THREAD)) {
|
||||
return sig;
|
||||
}
|
||||
@ -2626,7 +2626,7 @@ Handle SystemDictionary::find_method_handle_type(Symbol* signature,
|
||||
mirror = ss.as_java_mirror(class_loader, protection_domain,
|
||||
SignatureStream::NCDFError, CHECK_(empty));
|
||||
}
|
||||
assert(mirror != NULL, "%s", ss.as_symbol(THREAD)->as_C_string());
|
||||
assert(mirror != NULL, "%s", ss.as_symbol()->as_C_string());
|
||||
if (ss.at_return_type())
|
||||
rt = Handle(THREAD, mirror);
|
||||
else
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -129,13 +129,11 @@ VerificationType VerificationType::get_component(ClassVerifier *context, TRAPS)
|
||||
case 'D': return VerificationType(Double);
|
||||
case '[':
|
||||
component = context->create_temporary_symbol(
|
||||
name(), 1, name()->utf8_length(),
|
||||
CHECK_(VerificationType::bogus_type()));
|
||||
name(), 1, name()->utf8_length());
|
||||
return VerificationType::reference_type(component);
|
||||
case 'L':
|
||||
component = context->create_temporary_symbol(
|
||||
name(), 2, name()->utf8_length() - 1,
|
||||
CHECK_(VerificationType::bogus_type()));
|
||||
name(), 2, name()->utf8_length() - 1);
|
||||
return VerificationType::reference_type(component);
|
||||
default:
|
||||
// Met an invalid type signature, e.g. [X
|
||||
|
@ -595,9 +595,9 @@ VerificationType ClassVerifier::object_type() const {
|
||||
return VerificationType::reference_type(vmSymbols::java_lang_Object());
|
||||
}
|
||||
|
||||
TypeOrigin ClassVerifier::ref_ctx(const char* sig, TRAPS) {
|
||||
TypeOrigin ClassVerifier::ref_ctx(const char* sig) {
|
||||
VerificationType vt = VerificationType::reference_type(
|
||||
create_temporary_symbol(sig, (int)strlen(sig), THREAD));
|
||||
create_temporary_symbol(sig, (int)strlen(sig)));
|
||||
return TypeOrigin::implicit(vt);
|
||||
}
|
||||
|
||||
@ -647,7 +647,7 @@ void ClassVerifier::translate_signature(Symbol* const method_sig,
|
||||
|
||||
// Translate the signature arguments into verification types.
|
||||
while (!sig_stream.at_return_type()) {
|
||||
int n = change_sig_to_verificationType(&sig_stream, sig_type, CHECK_VERIFY(this));
|
||||
int n = change_sig_to_verificationType(&sig_stream, sig_type);
|
||||
assert(n <= 2, "Unexpected signature type");
|
||||
|
||||
// Store verification type(s). Longs and Doubles each have two verificationTypes.
|
||||
@ -664,7 +664,7 @@ void ClassVerifier::translate_signature(Symbol* const method_sig,
|
||||
|
||||
// Store verification type(s) for the return type, if there is one.
|
||||
if (sig_stream.type() != T_VOID) {
|
||||
int n = change_sig_to_verificationType(&sig_stream, sig_type, CHECK_VERIFY(this));
|
||||
int n = change_sig_to_verificationType(&sig_stream, sig_type);
|
||||
assert(n <= 2, "Unexpected signature return type");
|
||||
for (int y = 0; y < n; y++) {
|
||||
verif_types->push(sig_type[y]);
|
||||
@ -925,7 +925,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
VerificationType::reference_check(), CHECK_VERIFY(this));
|
||||
if (!atype.is_int_array()) {
|
||||
verify_error(ErrorContext::bad_type(bci,
|
||||
current_frame.stack_top_ctx(), ref_ctx("[I", THREAD)),
|
||||
current_frame.stack_top_ctx(), ref_ctx("[I")),
|
||||
bad_type_msg, "iaload");
|
||||
return;
|
||||
}
|
||||
@ -953,7 +953,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
VerificationType::reference_check(), CHECK_VERIFY(this));
|
||||
if (!atype.is_char_array()) {
|
||||
verify_error(ErrorContext::bad_type(bci,
|
||||
current_frame.stack_top_ctx(), ref_ctx("[C", THREAD)),
|
||||
current_frame.stack_top_ctx(), ref_ctx("[C")),
|
||||
bad_type_msg, "caload");
|
||||
return;
|
||||
}
|
||||
@ -967,7 +967,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
VerificationType::reference_check(), CHECK_VERIFY(this));
|
||||
if (!atype.is_short_array()) {
|
||||
verify_error(ErrorContext::bad_type(bci,
|
||||
current_frame.stack_top_ctx(), ref_ctx("[S", THREAD)),
|
||||
current_frame.stack_top_ctx(), ref_ctx("[S")),
|
||||
bad_type_msg, "saload");
|
||||
return;
|
||||
}
|
||||
@ -981,7 +981,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
VerificationType::reference_check(), CHECK_VERIFY(this));
|
||||
if (!atype.is_long_array()) {
|
||||
verify_error(ErrorContext::bad_type(bci,
|
||||
current_frame.stack_top_ctx(), ref_ctx("[J", THREAD)),
|
||||
current_frame.stack_top_ctx(), ref_ctx("[J")),
|
||||
bad_type_msg, "laload");
|
||||
return;
|
||||
}
|
||||
@ -996,7 +996,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
VerificationType::reference_check(), CHECK_VERIFY(this));
|
||||
if (!atype.is_float_array()) {
|
||||
verify_error(ErrorContext::bad_type(bci,
|
||||
current_frame.stack_top_ctx(), ref_ctx("[F", THREAD)),
|
||||
current_frame.stack_top_ctx(), ref_ctx("[F")),
|
||||
bad_type_msg, "faload");
|
||||
return;
|
||||
}
|
||||
@ -1010,7 +1010,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
VerificationType::reference_check(), CHECK_VERIFY(this));
|
||||
if (!atype.is_double_array()) {
|
||||
verify_error(ErrorContext::bad_type(bci,
|
||||
current_frame.stack_top_ctx(), ref_ctx("[D", THREAD)),
|
||||
current_frame.stack_top_ctx(), ref_ctx("[D")),
|
||||
bad_type_msg, "daload");
|
||||
return;
|
||||
}
|
||||
@ -1099,7 +1099,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
VerificationType::reference_check(), CHECK_VERIFY(this));
|
||||
if (!atype.is_int_array()) {
|
||||
verify_error(ErrorContext::bad_type(bci,
|
||||
current_frame.stack_top_ctx(), ref_ctx("[I", THREAD)),
|
||||
current_frame.stack_top_ctx(), ref_ctx("[I")),
|
||||
bad_type_msg, "iastore");
|
||||
return;
|
||||
}
|
||||
@ -1127,7 +1127,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
VerificationType::reference_check(), CHECK_VERIFY(this));
|
||||
if (!atype.is_char_array()) {
|
||||
verify_error(ErrorContext::bad_type(bci,
|
||||
current_frame.stack_top_ctx(), ref_ctx("[C", THREAD)),
|
||||
current_frame.stack_top_ctx(), ref_ctx("[C")),
|
||||
bad_type_msg, "castore");
|
||||
return;
|
||||
}
|
||||
@ -1141,7 +1141,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
VerificationType::reference_check(), CHECK_VERIFY(this));
|
||||
if (!atype.is_short_array()) {
|
||||
verify_error(ErrorContext::bad_type(bci,
|
||||
current_frame.stack_top_ctx(), ref_ctx("[S", THREAD)),
|
||||
current_frame.stack_top_ctx(), ref_ctx("[S")),
|
||||
bad_type_msg, "sastore");
|
||||
return;
|
||||
}
|
||||
@ -1156,7 +1156,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
VerificationType::reference_check(), CHECK_VERIFY(this));
|
||||
if (!atype.is_long_array()) {
|
||||
verify_error(ErrorContext::bad_type(bci,
|
||||
current_frame.stack_top_ctx(), ref_ctx("[J", THREAD)),
|
||||
current_frame.stack_top_ctx(), ref_ctx("[J")),
|
||||
bad_type_msg, "lastore");
|
||||
return;
|
||||
}
|
||||
@ -1170,7 +1170,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
VerificationType::reference_check(), CHECK_VERIFY(this));
|
||||
if (!atype.is_float_array()) {
|
||||
verify_error(ErrorContext::bad_type(bci,
|
||||
current_frame.stack_top_ctx(), ref_ctx("[F", THREAD)),
|
||||
current_frame.stack_top_ctx(), ref_ctx("[F")),
|
||||
bad_type_msg, "fastore");
|
||||
return;
|
||||
}
|
||||
@ -1185,7 +1185,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
VerificationType::reference_check(), CHECK_VERIFY(this));
|
||||
if (!atype.is_double_array()) {
|
||||
verify_error(ErrorContext::bad_type(bci,
|
||||
current_frame.stack_top_ctx(), ref_ctx("[D", THREAD)),
|
||||
current_frame.stack_top_ctx(), ref_ctx("[D")),
|
||||
bad_type_msg, "dastore");
|
||||
return;
|
||||
}
|
||||
@ -2183,8 +2183,7 @@ void ClassVerifier::verify_ldc(
|
||||
uintptr_t constant_type_buffer[2];
|
||||
VerificationType* v_constant_type = (VerificationType*)constant_type_buffer;
|
||||
SignatureStream sig_stream(constant_type, false);
|
||||
int n = change_sig_to_verificationType(
|
||||
&sig_stream, v_constant_type, CHECK_VERIFY(this));
|
||||
int n = change_sig_to_verificationType(&sig_stream, v_constant_type);
|
||||
int opcode_n = (opcode == Bytecodes::_ldc2_w ? 2 : 1);
|
||||
if (n != opcode_n) {
|
||||
// wrong kind of ldc; reverify against updated type mask
|
||||
@ -2325,8 +2324,7 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
|
||||
|
||||
SignatureStream sig_stream(field_sig, false);
|
||||
VerificationType stack_object_type;
|
||||
int n = change_sig_to_verificationType(
|
||||
&sig_stream, field_type, CHECK_VERIFY(this));
|
||||
int n = change_sig_to_verificationType(&sig_stream, field_type);
|
||||
u2 bci = bcs->bci();
|
||||
bool is_assignable;
|
||||
switch (bcs->raw_code()) {
|
||||
@ -3002,8 +3000,7 @@ VerificationType ClassVerifier::get_newarray_type(
|
||||
}
|
||||
|
||||
// from_bt[index] contains the array signature which has a length of 2
|
||||
Symbol* sig = create_temporary_symbol(
|
||||
from_bt[index], 2, CHECK_(VerificationType::bogus_type()));
|
||||
Symbol* sig = create_temporary_symbol(from_bt[index], 2);
|
||||
return VerificationType::reference_type(sig);
|
||||
}
|
||||
|
||||
@ -3041,8 +3038,7 @@ void ClassVerifier::verify_anewarray(
|
||||
int n = os::snprintf(arr_sig_str, length + 1, "[L%s;", component_name);
|
||||
assert(n == length, "Unexpected number of characters in string");
|
||||
}
|
||||
Symbol* arr_sig = create_temporary_symbol(
|
||||
arr_sig_str, length, CHECK_VERIFY(this));
|
||||
Symbol* arr_sig = create_temporary_symbol(arr_sig_str, length);
|
||||
VerificationType new_array_type = VerificationType::reference_type(arr_sig);
|
||||
current_frame->push_stack(new_array_type, CHECK_VERIFY(this));
|
||||
}
|
||||
@ -3150,18 +3146,18 @@ void ClassVerifier::verify_return_value(
|
||||
// These are stored in the verifier until the end of verification so that
|
||||
// they can be reference counted.
|
||||
Symbol* ClassVerifier::create_temporary_symbol(const Symbol *s, int begin,
|
||||
int end, TRAPS) {
|
||||
int end) {
|
||||
const char* name = (const char*)s->base() + begin;
|
||||
int length = end - begin;
|
||||
return create_temporary_symbol(name, length, CHECK_NULL);
|
||||
return create_temporary_symbol(name, length);
|
||||
}
|
||||
|
||||
Symbol* ClassVerifier::create_temporary_symbol(const char *name, int length, TRAPS) {
|
||||
Symbol* ClassVerifier::create_temporary_symbol(const char *name, int length) {
|
||||
// Quick deduplication check
|
||||
if (_previous_symbol != NULL && _previous_symbol->equals(name, length)) {
|
||||
return _previous_symbol;
|
||||
}
|
||||
Symbol* sym = SymbolTable::new_symbol(name, length, CHECK_NULL);
|
||||
Symbol* sym = SymbolTable::new_symbol(name, length);
|
||||
if (!sym->is_permanent()) {
|
||||
if (_symbols == NULL) {
|
||||
_symbols = new GrowableArray<Symbol*>(50, 0, NULL);
|
||||
|
@ -446,7 +446,7 @@ class ClassVerifier : public StackObj {
|
||||
}
|
||||
|
||||
int change_sig_to_verificationType(
|
||||
SignatureStream* sig_type, VerificationType* inference_type, TRAPS);
|
||||
SignatureStream* sig_type, VerificationType* inference_type);
|
||||
|
||||
VerificationType cp_index_to_type(int index, const constantPoolHandle& cp, TRAPS) {
|
||||
return VerificationType::reference_type(cp->klass_name_at(index));
|
||||
@ -456,8 +456,8 @@ class ClassVerifier : public StackObj {
|
||||
// their reference counts need to be decremented when the verifier object
|
||||
// goes out of scope. Since these symbols escape the scope in which they're
|
||||
// created, we can't use a TempNewSymbol.
|
||||
Symbol* create_temporary_symbol(const Symbol* s, int begin, int end, TRAPS);
|
||||
Symbol* create_temporary_symbol(const char *s, int length, TRAPS);
|
||||
Symbol* create_temporary_symbol(const Symbol* s, int begin, int end);
|
||||
Symbol* create_temporary_symbol(const char *s, int length);
|
||||
Symbol* create_temporary_symbol(Symbol* s) {
|
||||
if (s == _previous_symbol) {
|
||||
return s;
|
||||
@ -473,18 +473,18 @@ class ClassVerifier : public StackObj {
|
||||
return s;
|
||||
}
|
||||
|
||||
TypeOrigin ref_ctx(const char* str, TRAPS);
|
||||
TypeOrigin ref_ctx(const char* str);
|
||||
|
||||
};
|
||||
|
||||
inline int ClassVerifier::change_sig_to_verificationType(
|
||||
SignatureStream* sig_type, VerificationType* inference_type, TRAPS) {
|
||||
SignatureStream* sig_type, VerificationType* inference_type) {
|
||||
BasicType bt = sig_type->type();
|
||||
switch (bt) {
|
||||
case T_OBJECT:
|
||||
case T_ARRAY:
|
||||
{
|
||||
Symbol* name = sig_type->as_symbol(CHECK_0);
|
||||
Symbol* name = sig_type->as_symbol();
|
||||
// Create another symbol to save as signature stream unreferences this symbol.
|
||||
Symbol* name_copy = create_temporary_symbol(name);
|
||||
assert(name_copy == name, "symbols don't match");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -84,7 +84,7 @@ void vmSymbols::initialize(TRAPS) {
|
||||
if (!UseSharedSpaces) {
|
||||
const char* string = &vm_symbol_bodies[0];
|
||||
for (int index = (int)FIRST_SID; index < (int)SID_LIMIT; index++) {
|
||||
Symbol* sym = SymbolTable::new_permanent_symbol(string, CHECK);
|
||||
Symbol* sym = SymbolTable::new_permanent_symbol(string);
|
||||
_symbols[index] = sym;
|
||||
string += strlen(string); // skip string body
|
||||
string += 1; // skip trailing null
|
||||
@ -141,7 +141,7 @@ void vmSymbols::initialize(TRAPS) {
|
||||
// Spot-check correspondence between strings, symbols, and enums:
|
||||
assert(_symbols[NO_SID] == NULL, "must be");
|
||||
const char* str = "java/lang/Object";
|
||||
TempNewSymbol jlo = SymbolTable::new_permanent_symbol(str, CHECK);
|
||||
TempNewSymbol jlo = SymbolTable::new_permanent_symbol(str);
|
||||
assert(strncmp(str, (char*)jlo->base(), jlo->utf8_length()) == 0, "");
|
||||
assert(jlo == java_lang_Object(), "");
|
||||
SID sid = VM_SYMBOL_ENUM_NAME(java_lang_Object);
|
||||
@ -160,7 +160,7 @@ void vmSymbols::initialize(TRAPS) {
|
||||
// The string "format" happens (at the moment) not to be a vmSymbol,
|
||||
// though it is a method name in java.lang.String.
|
||||
str = "format";
|
||||
TempNewSymbol fmt = SymbolTable::new_permanent_symbol(str, CHECK);
|
||||
TempNewSymbol fmt = SymbolTable::new_permanent_symbol(str);
|
||||
sid = find_sid(fmt);
|
||||
assert(sid == NO_SID, "symbol index works (negative test)");
|
||||
}
|
||||
|
@ -468,39 +468,6 @@ bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
|
||||
return ic->set_to_clean();
|
||||
}
|
||||
|
||||
// static_stub_Relocations may have dangling references to
|
||||
// nmethods so trim them out here. Otherwise it looks like
|
||||
// compiled code is maintaining a link to dead metadata.
|
||||
void CompiledMethod::clean_ic_stubs() {
|
||||
#ifdef ASSERT
|
||||
address low_boundary = oops_reloc_begin();
|
||||
RelocIterator iter(this, low_boundary);
|
||||
while (iter.next()) {
|
||||
address static_call_addr = NULL;
|
||||
if (iter.type() == relocInfo::opt_virtual_call_type) {
|
||||
CompiledIC* cic = CompiledIC_at(&iter);
|
||||
if (!cic->is_call_to_interpreted()) {
|
||||
static_call_addr = iter.addr();
|
||||
}
|
||||
} else if (iter.type() == relocInfo::static_call_type) {
|
||||
CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
|
||||
if (!csc->is_call_to_interpreted()) {
|
||||
static_call_addr = iter.addr();
|
||||
}
|
||||
}
|
||||
if (static_call_addr != NULL) {
|
||||
RelocIterator sciter(this, low_boundary);
|
||||
while (sciter.next()) {
|
||||
if (sciter.type() == relocInfo::static_stub_type &&
|
||||
sciter.static_stub_reloc()->static_call() == static_call_addr) {
|
||||
sciter.static_stub_reloc()->clear_inline_cache();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Clean references to unloaded nmethods at addr from this one, which is not unloaded.
|
||||
template <class CompiledICorStaticCall>
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
|
||||
@ -549,9 +516,6 @@ bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// All static stubs need to be cleaned.
|
||||
clean_ic_stubs();
|
||||
|
||||
#ifdef ASSERT
|
||||
// Check that the metadata embedded in the nmethod is alive
|
||||
CheckClass check_class;
|
||||
@ -581,6 +545,7 @@ bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool cl
|
||||
// Find all calls in an nmethod and clear the ones that point to non-entrant,
|
||||
// zombie and unloaded nmethods.
|
||||
RelocIterator iter(this, oops_reloc_begin());
|
||||
bool is_in_static_stub = false;
|
||||
while(iter.next()) {
|
||||
|
||||
switch (iter.type()) {
|
||||
@ -611,6 +576,45 @@ bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool cl
|
||||
}
|
||||
break;
|
||||
|
||||
case relocInfo::static_stub_type: {
|
||||
is_in_static_stub = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case relocInfo::metadata_type: {
|
||||
// Only the metadata relocations contained in static/opt virtual call stubs
|
||||
// contains the Method* passed to c2i adapters. It is the only metadata
|
||||
// relocation that needs to be walked, as it is the one metadata relocation
|
||||
// that violates the invariant that all metadata relocations have an oop
|
||||
// in the compiled method (due to deferred resolution and code patching).
|
||||
|
||||
// This causes dead metadata to remain in compiled methods that are not
|
||||
// unloading. Unless these slippery metadata relocations of the static
|
||||
// stubs are at least cleared, subsequent class redefinition operations
|
||||
// will access potentially free memory, and JavaThread execution
|
||||
// concurrent to class unloading may call c2i adapters with dead methods.
|
||||
if (!is_in_static_stub) {
|
||||
// The first metadata relocation after a static stub relocation is the
|
||||
// metadata relocation of the static stub used to pass the Method* to
|
||||
// c2i adapters.
|
||||
continue;
|
||||
}
|
||||
is_in_static_stub = false;
|
||||
metadata_Relocation* r = iter.metadata_reloc();
|
||||
Metadata* md = r->metadata_value();
|
||||
if (md != NULL && md->is_method()) {
|
||||
Method* method = static_cast<Method*>(md);
|
||||
if (!method->method_holder()->is_loader_alive()) {
|
||||
Atomic::store((Method*)NULL, r->metadata_addr());
|
||||
|
||||
if (!r->metadata_is_immediate()) {
|
||||
r->fix_metadata_relocation();
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -395,8 +395,6 @@ public:
|
||||
private:
|
||||
bool static clean_ic_if_metadata_is_dead(CompiledIC *ic);
|
||||
|
||||
void clean_ic_stubs();
|
||||
|
||||
public:
|
||||
// GC unloading support
|
||||
// Cleans unloaded klasses and unloaded nmethods in inline caches
|
||||
|
@ -1555,7 +1555,7 @@ void nmethod::metadata_do(MetadataClosure* f) {
|
||||
// Visit all immediate references that are embedded in the instruction stream.
|
||||
RelocIterator iter(this, oops_reloc_begin());
|
||||
while (iter.next()) {
|
||||
if (iter.type() == relocInfo::metadata_type ) {
|
||||
if (iter.type() == relocInfo::metadata_type) {
|
||||
metadata_Relocation* r = iter.metadata_reloc();
|
||||
// In this metadata, we must only follow those metadatas directly embedded in
|
||||
// the code. Other metadatas (oop_index>0) are seen as part of
|
||||
|
@ -829,8 +829,8 @@ void CompilerOracle::parse_compile_only(char * line) {
|
||||
}
|
||||
|
||||
EXCEPTION_MARK;
|
||||
Symbol* c_name = SymbolTable::new_symbol(className, CHECK);
|
||||
Symbol* m_name = SymbolTable::new_symbol(methodName, CHECK);
|
||||
Symbol* c_name = SymbolTable::new_symbol(className);
|
||||
Symbol* m_name = SymbolTable::new_symbol(methodName);
|
||||
Symbol* signature = NULL;
|
||||
|
||||
BasicMatcher* bm = new BasicMatcher();
|
||||
|
@ -300,10 +300,10 @@ void MethodMatcher::parse_method_pattern(char*& line, const char*& error_msg, Me
|
||||
}
|
||||
line += bytes_read;
|
||||
}
|
||||
signature = SymbolTable::new_symbol(sig, CHECK);
|
||||
signature = SymbolTable::new_symbol(sig);
|
||||
}
|
||||
Symbol* c_name = SymbolTable::new_symbol(class_name, CHECK);
|
||||
Symbol* m_name = SymbolTable::new_symbol(method_name, CHECK);
|
||||
Symbol* c_name = SymbolTable::new_symbol(class_name);
|
||||
Symbol* m_name = SymbolTable::new_symbol(method_name);
|
||||
|
||||
matcher->init(c_name, c_match, m_name, m_match, signature);
|
||||
return;
|
||||
|
@ -27,7 +27,7 @@
|
||||
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/g1EvacStats.hpp"
|
||||
#include "gc/g1/g1InCSetState.hpp"
|
||||
#include "gc/g1/g1HeapRegionAttr.hpp"
|
||||
|
||||
class G1CollectedHeap;
|
||||
|
||||
@ -249,14 +249,14 @@ public:
|
||||
class G1GCAllocRegion : public G1AllocRegion {
|
||||
protected:
|
||||
G1EvacStats* _stats;
|
||||
InCSetState::in_cset_state_t _purpose;
|
||||
G1HeapRegionAttr::region_type_t _purpose;
|
||||
|
||||
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
||||
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
|
||||
|
||||
virtual size_t retire(bool fill_up);
|
||||
|
||||
G1GCAllocRegion(const char* name, bool bot_updates, G1EvacStats* stats, InCSetState::in_cset_state_t purpose)
|
||||
G1GCAllocRegion(const char* name, bool bot_updates, G1EvacStats* stats, G1HeapRegionAttr::region_type_t purpose)
|
||||
: G1AllocRegion(name, bot_updates), _stats(stats), _purpose(purpose) {
|
||||
assert(stats != NULL, "Must pass non-NULL PLAB statistics");
|
||||
}
|
||||
@ -265,13 +265,13 @@ protected:
|
||||
class SurvivorGCAllocRegion : public G1GCAllocRegion {
|
||||
public:
|
||||
SurvivorGCAllocRegion(G1EvacStats* stats)
|
||||
: G1GCAllocRegion("Survivor GC Alloc Region", false /* bot_updates */, stats, InCSetState::Young) { }
|
||||
: G1GCAllocRegion("Survivor GC Alloc Region", false /* bot_updates */, stats, G1HeapRegionAttr::Young) { }
|
||||
};
|
||||
|
||||
class OldGCAllocRegion : public G1GCAllocRegion {
|
||||
public:
|
||||
OldGCAllocRegion(G1EvacStats* stats)
|
||||
: G1GCAllocRegion("Old GC Alloc Region", true /* bot_updates */, stats, InCSetState::Old) { }
|
||||
: G1GCAllocRegion("Old GC Alloc Region", true /* bot_updates */, stats, G1HeapRegionAttr::Old) { }
|
||||
|
||||
// This specialization of release() makes sure that the last card that has
|
||||
// been allocated into has been completely filled by a dummy object. This
|
||||
|
@ -39,8 +39,8 @@ G1Allocator::G1Allocator(G1CollectedHeap* heap) :
|
||||
_survivor_is_full(false),
|
||||
_old_is_full(false),
|
||||
_mutator_alloc_region(),
|
||||
_survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)),
|
||||
_old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)),
|
||||
_survivor_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Young)),
|
||||
_old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)),
|
||||
_retained_old_gc_alloc_region(NULL) {
|
||||
}
|
||||
|
||||
@ -161,7 +161,7 @@ size_t G1Allocator::used_in_alloc_regions() {
|
||||
}
|
||||
|
||||
|
||||
HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
|
||||
HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
|
||||
size_t word_size) {
|
||||
size_t temp = 0;
|
||||
HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp);
|
||||
@ -171,14 +171,14 @@ HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
|
||||
return result;
|
||||
}
|
||||
|
||||
HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
|
||||
HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
|
||||
size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_word_size) {
|
||||
switch (dest.value()) {
|
||||
case InCSetState::Young:
|
||||
switch (dest.type()) {
|
||||
case G1HeapRegionAttr::Young:
|
||||
return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
|
||||
case InCSetState::Old:
|
||||
case G1HeapRegionAttr::Old:
|
||||
return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
@ -246,22 +246,22 @@ uint G1PLABAllocator::calc_survivor_alignment_bytes() {
|
||||
G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_allocator(allocator),
|
||||
_surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)),
|
||||
_tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)),
|
||||
_surviving_alloc_buffer(_g1h->desired_plab_sz(G1HeapRegionAttr::Young)),
|
||||
_tenured_alloc_buffer(_g1h->desired_plab_sz(G1HeapRegionAttr::Old)),
|
||||
_survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
|
||||
for (uint state = 0; state < InCSetState::Num; state++) {
|
||||
for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
_direct_allocated[state] = 0;
|
||||
_alloc_buffers[state] = NULL;
|
||||
}
|
||||
_alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
|
||||
_alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
|
||||
_alloc_buffers[G1HeapRegionAttr::Young] = &_surviving_alloc_buffer;
|
||||
_alloc_buffers[G1HeapRegionAttr::Old] = &_tenured_alloc_buffer;
|
||||
}
|
||||
|
||||
bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
|
||||
return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);
|
||||
}
|
||||
|
||||
HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
|
||||
HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest,
|
||||
size_t word_sz,
|
||||
bool* plab_refill_failed) {
|
||||
size_t plab_word_size = _g1h->desired_plab_sz(dest);
|
||||
@ -300,17 +300,17 @@ HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
|
||||
// Try direct allocation.
|
||||
HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz);
|
||||
if (result != NULL) {
|
||||
_direct_allocated[dest.value()] += word_sz;
|
||||
_direct_allocated[dest.type()] += word_sz;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz) {
|
||||
void G1PLABAllocator::undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz) {
|
||||
alloc_buffer(dest)->undo_allocation(obj, word_sz);
|
||||
}
|
||||
|
||||
void G1PLABAllocator::flush_and_retire_stats() {
|
||||
for (uint state = 0; state < InCSetState::Num; state++) {
|
||||
for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
PLAB* const buf = _alloc_buffers[state];
|
||||
if (buf != NULL) {
|
||||
G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
|
||||
@ -323,7 +323,7 @@ void G1PLABAllocator::flush_and_retire_stats() {
|
||||
|
||||
size_t G1PLABAllocator::waste() const {
|
||||
size_t result = 0;
|
||||
for (uint state = 0; state < InCSetState::Num; state++) {
|
||||
for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
PLAB * const buf = _alloc_buffers[state];
|
||||
if (buf != NULL) {
|
||||
result += buf->waste();
|
||||
@ -334,7 +334,7 @@ size_t G1PLABAllocator::waste() const {
|
||||
|
||||
size_t G1PLABAllocator::undo_waste() const {
|
||||
size_t result = 0;
|
||||
for (uint state = 0; state < InCSetState::Num; state++) {
|
||||
for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
|
||||
PLAB * const buf = _alloc_buffers[state];
|
||||
if (buf != NULL) {
|
||||
result += buf->undo_waste();
|
||||
|
@ -26,7 +26,7 @@
|
||||
#define SHARE_GC_G1_G1ALLOCATOR_HPP
|
||||
|
||||
#include "gc/g1/g1AllocRegion.hpp"
|
||||
#include "gc/g1/g1InCSetState.hpp"
|
||||
#include "gc/g1/g1HeapRegionAttr.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/plab.hpp"
|
||||
|
||||
@ -112,10 +112,10 @@ public:
|
||||
// allocation region, either by picking one or expanding the
|
||||
// heap, and then allocate a block of the given size. The block
|
||||
// may not be a humongous - it must fit into a single heap region.
|
||||
HeapWord* par_allocate_during_gc(InCSetState dest,
|
||||
HeapWord* par_allocate_during_gc(G1HeapRegionAttr dest,
|
||||
size_t word_size);
|
||||
|
||||
HeapWord* par_allocate_during_gc(InCSetState dest,
|
||||
HeapWord* par_allocate_during_gc(G1HeapRegionAttr dest,
|
||||
size_t min_word_size,
|
||||
size_t desired_word_size,
|
||||
size_t* actual_word_size);
|
||||
@ -132,7 +132,7 @@ private:
|
||||
|
||||
PLAB _surviving_alloc_buffer;
|
||||
PLAB _tenured_alloc_buffer;
|
||||
PLAB* _alloc_buffers[InCSetState::Num];
|
||||
PLAB* _alloc_buffers[G1HeapRegionAttr::Num];
|
||||
|
||||
// The survivor alignment in effect in bytes.
|
||||
// == 0 : don't align survivors
|
||||
@ -142,10 +142,10 @@ private:
|
||||
const uint _survivor_alignment_bytes;
|
||||
|
||||
// Number of words allocated directly (not counting PLAB allocation).
|
||||
size_t _direct_allocated[InCSetState::Num];
|
||||
size_t _direct_allocated[G1HeapRegionAttr::Num];
|
||||
|
||||
void flush_and_retire_stats();
|
||||
inline PLAB* alloc_buffer(InCSetState dest);
|
||||
inline PLAB* alloc_buffer(G1HeapRegionAttr dest);
|
||||
|
||||
// Calculate the survivor space object alignment in bytes. Returns that or 0 if
|
||||
// there are no restrictions on survivor alignment.
|
||||
@ -162,20 +162,20 @@ public:
|
||||
// allocating a new PLAB. Returns the address of the allocated memory, NULL if
|
||||
// not successful. Plab_refill_failed indicates whether an attempt to refill the
|
||||
// PLAB failed or not.
|
||||
HeapWord* allocate_direct_or_new_plab(InCSetState dest,
|
||||
HeapWord* allocate_direct_or_new_plab(G1HeapRegionAttr dest,
|
||||
size_t word_sz,
|
||||
bool* plab_refill_failed);
|
||||
|
||||
// Allocate word_sz words in the PLAB of dest. Returns the address of the
|
||||
// allocated memory, NULL if not successful.
|
||||
inline HeapWord* plab_allocate(InCSetState dest,
|
||||
inline HeapWord* plab_allocate(G1HeapRegionAttr dest,
|
||||
size_t word_sz);
|
||||
|
||||
inline HeapWord* allocate(InCSetState dest,
|
||||
inline HeapWord* allocate(G1HeapRegionAttr dest,
|
||||
size_t word_sz,
|
||||
bool* refill_failed);
|
||||
|
||||
void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz);
|
||||
void undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz);
|
||||
};
|
||||
|
||||
// G1ArchiveRegionMap is a boolean array used to mark G1 regions as
|
||||
|
@ -63,15 +63,15 @@ inline HeapWord* G1Allocator::attempt_allocation_force(size_t word_size) {
|
||||
return mutator_alloc_region()->attempt_allocation_force(word_size);
|
||||
}
|
||||
|
||||
inline PLAB* G1PLABAllocator::alloc_buffer(InCSetState dest) {
|
||||
inline PLAB* G1PLABAllocator::alloc_buffer(G1HeapRegionAttr dest) {
|
||||
assert(dest.is_valid(),
|
||||
"Allocation buffer index out of bounds: " CSETSTATE_FORMAT, dest.value());
|
||||
assert(_alloc_buffers[dest.value()] != NULL,
|
||||
"Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value());
|
||||
return _alloc_buffers[dest.value()];
|
||||
"Allocation buffer index out of bounds: %s", dest.get_type_str());
|
||||
assert(_alloc_buffers[dest.type()] != NULL,
|
||||
"Allocation buffer is NULL: %s", dest.get_type_str());
|
||||
return _alloc_buffers[dest.type()];
|
||||
}
|
||||
|
||||
inline HeapWord* G1PLABAllocator::plab_allocate(InCSetState dest,
|
||||
inline HeapWord* G1PLABAllocator::plab_allocate(G1HeapRegionAttr dest,
|
||||
size_t word_sz) {
|
||||
PLAB* buffer = alloc_buffer(dest);
|
||||
if (_survivor_alignment_bytes == 0 || !dest.is_young()) {
|
||||
@ -81,7 +81,7 @@ inline HeapWord* G1PLABAllocator::plab_allocate(InCSetState dest,
|
||||
}
|
||||
}
|
||||
|
||||
inline HeapWord* G1PLABAllocator::allocate(InCSetState dest,
|
||||
inline HeapWord* G1PLABAllocator::allocate(G1HeapRegionAttr dest,
|
||||
size_t word_sz,
|
||||
bool* refill_failed) {
|
||||
HeapWord* const obj = plab_allocate(dest, word_sz);
|
||||
|
@ -128,6 +128,11 @@ public:
|
||||
return biased_base()[biased_index];
|
||||
}
|
||||
|
||||
T* get_ref_by_index(uintptr_t index) const {
|
||||
verify_index(index);
|
||||
return &this->base()[index];
|
||||
}
|
||||
|
||||
// Return the index of the element of the given array that covers the given
|
||||
// word in the heap.
|
||||
idx_t get_index_by_address(HeapWord* value) const {
|
||||
|
@ -1536,7 +1536,7 @@ G1CollectedHeap::G1CollectedHeap() :
|
||||
_ref_processor_cm(NULL),
|
||||
_is_alive_closure_cm(this),
|
||||
_is_subject_to_discovery_cm(this),
|
||||
_in_cset_fast_test() {
|
||||
_region_attr() {
|
||||
|
||||
_verifier = new G1HeapVerifier(this);
|
||||
|
||||
@ -1772,7 +1772,7 @@ jint G1CollectedHeap::initialize() {
|
||||
HeapWord* end = _hrm->reserved().end();
|
||||
size_t granularity = HeapRegion::GrainBytes;
|
||||
|
||||
_in_cset_fast_test.initialize(start, end, granularity);
|
||||
_region_attr.initialize(start, end, granularity);
|
||||
_humongous_reclaim_candidates.initialize(start, end, granularity);
|
||||
}
|
||||
|
||||
@ -2626,7 +2626,7 @@ bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const
|
||||
G1EagerReclaimHumongousObjects && rem_set->is_empty();
|
||||
}
|
||||
|
||||
class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
|
||||
class RegisterRegionsWithRegionAttrTableClosure : public HeapRegionClosure {
|
||||
private:
|
||||
size_t _total_humongous;
|
||||
size_t _candidate_humongous;
|
||||
@ -2690,24 +2690,26 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
|
||||
}
|
||||
|
||||
public:
|
||||
RegisterHumongousWithInCSetFastTestClosure()
|
||||
RegisterRegionsWithRegionAttrTableClosure()
|
||||
: _total_humongous(0),
|
||||
_candidate_humongous(0),
|
||||
_dcq(&G1BarrierSet::dirty_card_queue_set()) {
|
||||
}
|
||||
|
||||
virtual bool do_heap_region(HeapRegion* r) {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
if (!r->is_starts_humongous()) {
|
||||
g1h->register_region_with_region_attr(r);
|
||||
return false;
|
||||
}
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
bool is_candidate = humongous_region_is_candidate(g1h, r);
|
||||
uint rindex = r->hrm_index();
|
||||
g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
|
||||
if (is_candidate) {
|
||||
_candidate_humongous++;
|
||||
g1h->register_humongous_region_with_cset(rindex);
|
||||
g1h->register_humongous_region_with_region_attr(rindex);
|
||||
// Is_candidate already filters out humongous object with large remembered sets.
|
||||
// If we have a humongous object with a few remembered sets, we simply flush these
|
||||
// remembered set entries into the DCQS. That will result in automatic
|
||||
@ -2743,8 +2745,14 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
|
||||
// collecting remembered set entries for humongous regions that were not
|
||||
// reclaimed.
|
||||
r->rem_set()->set_state_complete();
|
||||
#ifdef ASSERT
|
||||
G1HeapRegionAttr region_attr = g1h->region_attr(oop(r->bottom()));
|
||||
assert(region_attr.needs_remset_update(), "must be");
|
||||
#endif
|
||||
}
|
||||
assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
|
||||
} else {
|
||||
g1h->register_region_with_region_attr(r);
|
||||
}
|
||||
_total_humongous++;
|
||||
|
||||
@ -2757,21 +2765,15 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
|
||||
void flush_rem_set_entries() { _dcq.flush(); }
|
||||
};
|
||||
|
||||
void G1CollectedHeap::register_humongous_regions_with_cset() {
|
||||
if (!G1EagerReclaimHumongousObjects) {
|
||||
phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
|
||||
return;
|
||||
}
|
||||
double time = os::elapsed_counter();
|
||||
void G1CollectedHeap::register_regions_with_region_attr() {
|
||||
Ticks start = Ticks::now();
|
||||
|
||||
// Collect reclaim candidate information and register candidates with cset.
|
||||
RegisterHumongousWithInCSetFastTestClosure cl;
|
||||
RegisterRegionsWithRegionAttrTableClosure cl;
|
||||
heap_region_iterate(&cl);
|
||||
|
||||
time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
|
||||
phase_times()->record_fast_reclaim_humongous_stats(time,
|
||||
cl.total_humongous(),
|
||||
cl.candidate_humongous());
|
||||
phase_times()->record_register_regions((Ticks::now() - start).seconds() * 1000.0,
|
||||
cl.total_humongous(),
|
||||
cl.candidate_humongous());
|
||||
_has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
|
||||
|
||||
// Finally flush all remembered set entries to re-check into the global DCQS.
|
||||
@ -2861,7 +2863,7 @@ void G1CollectedHeap::start_new_collection_set() {
|
||||
|
||||
collection_set()->start_incremental_building();
|
||||
|
||||
clear_cset_fast_test();
|
||||
clear_region_attr();
|
||||
|
||||
guarantee(_eden.length() == 0, "eden should have been cleared");
|
||||
policy()->transfer_survivors_to_cset(survivor());
|
||||
@ -3302,17 +3304,17 @@ public:
|
||||
oop obj = *p;
|
||||
assert(obj != NULL, "the caller should have filtered out NULL values");
|
||||
|
||||
const InCSetState cset_state =_g1h->in_cset_state(obj);
|
||||
if (!cset_state.is_in_cset_or_humongous()) {
|
||||
const G1HeapRegionAttr region_attr =_g1h->region_attr(obj);
|
||||
if (!region_attr.is_in_cset_or_humongous()) {
|
||||
return;
|
||||
}
|
||||
if (cset_state.is_in_cset()) {
|
||||
if (region_attr.is_in_cset()) {
|
||||
assert( obj->is_forwarded(), "invariant" );
|
||||
*p = obj->forwardee();
|
||||
} else {
|
||||
assert(!obj->is_forwarded(), "invariant" );
|
||||
assert(cset_state.is_humongous(),
|
||||
"Only allowed InCSet state is IsHumongous, but is %d", cset_state.value());
|
||||
assert(region_attr.is_humongous(),
|
||||
"Only allowed G1HeapRegionAttr state is IsHumongous, but is %d", region_attr.type());
|
||||
_g1h->set_humongous_is_live(obj);
|
||||
}
|
||||
}
|
||||
@ -3572,10 +3574,10 @@ void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_i
|
||||
// Initialize the GC alloc regions.
|
||||
_allocator->init_gc_alloc_regions(evacuation_info);
|
||||
|
||||
register_humongous_regions_with_cset();
|
||||
register_regions_with_region_attr();
|
||||
assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
|
||||
|
||||
rem_set()->prepare_for_oops_into_collection_set_do();
|
||||
rem_set()->prepare_for_scan_rem_set();
|
||||
_preserved_marks_set.assert_empty();
|
||||
|
||||
#if COMPILER2_OR_JVMCI
|
||||
@ -3788,7 +3790,7 @@ void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet*
|
||||
void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
|
||||
// Also cleans the card table from temporary duplicate detection information used
|
||||
// during UpdateRS/ScanRS.
|
||||
rem_set()->cleanup_after_oops_into_collection_set_do();
|
||||
rem_set()->cleanup_after_scan_rem_set();
|
||||
|
||||
// Process any discovered reference objects - we have
|
||||
// to do this _before_ we retire the GC alloc regions
|
||||
@ -3970,7 +3972,7 @@ private:
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
|
||||
g1h->clear_in_cset(r);
|
||||
g1h->clear_region_attr(r);
|
||||
|
||||
if (r->is_young()) {
|
||||
assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(),
|
||||
@ -4031,7 +4033,7 @@ private:
|
||||
G1Policy* policy = g1h->policy();
|
||||
policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
|
||||
|
||||
g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
|
||||
g1h->alloc_buffer_stats(G1HeapRegionAttr::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
|
||||
}
|
||||
};
|
||||
|
||||
@ -4365,7 +4367,7 @@ class G1AbandonCollectionSetClosure : public HeapRegionClosure {
|
||||
public:
|
||||
virtual bool do_heap_region(HeapRegion* r) {
|
||||
assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
|
||||
G1CollectedHeap::heap()->clear_in_cset(r);
|
||||
G1CollectedHeap::heap()->clear_region_attr(r);
|
||||
r->set_young_index_in_cset(-1);
|
||||
return false;
|
||||
}
|
||||
@ -4582,7 +4584,7 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
|
||||
|
||||
// Methods for the GC alloc regions
|
||||
|
||||
bool G1CollectedHeap::has_more_regions(InCSetState dest) {
|
||||
bool G1CollectedHeap::has_more_regions(G1HeapRegionAttr dest) {
|
||||
if (dest.is_old()) {
|
||||
return true;
|
||||
} else {
|
||||
@ -4590,7 +4592,7 @@ bool G1CollectedHeap::has_more_regions(InCSetState dest) {
|
||||
}
|
||||
}
|
||||
|
||||
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) {
|
||||
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest) {
|
||||
assert(FreeList_lock->owned_by_self(), "pre-condition");
|
||||
|
||||
if (!has_more_regions(dest)) {
|
||||
@ -4618,6 +4620,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState d
|
||||
_verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
|
||||
}
|
||||
_policy->remset_tracker()->update_at_allocate(new_alloc_region);
|
||||
register_region_with_region_attr(new_alloc_region);
|
||||
_hr_printer.alloc(new_alloc_region);
|
||||
return new_alloc_region;
|
||||
}
|
||||
@ -4626,12 +4629,12 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState d
|
||||
|
||||
void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes,
|
||||
InCSetState dest) {
|
||||
G1HeapRegionAttr dest) {
|
||||
policy()->record_bytes_copied_during_gc(allocated_bytes);
|
||||
if (dest.is_old()) {
|
||||
old_set_add(alloc_region);
|
||||
} else {
|
||||
assert(dest.is_young(), "Retiring alloc region should be young(%d)", dest.value());
|
||||
assert(dest.is_young(), "Retiring alloc region should be young (%d)", dest.type());
|
||||
_survivor.add_used_bytes(allocated_bytes);
|
||||
}
|
||||
|
||||
|
@ -40,7 +40,7 @@
|
||||
#include "gc/g1/g1HeapTransition.hpp"
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/g1HRPrinter.hpp"
|
||||
#include "gc/g1/g1InCSetState.hpp"
|
||||
#include "gc/g1/g1HeapRegionAttr.hpp"
|
||||
#include "gc/g1/g1MonitoringSupport.hpp"
|
||||
#include "gc/g1/g1SurvivorRegions.hpp"
|
||||
#include "gc/g1/g1YCTypes.hpp"
|
||||
@ -464,10 +464,10 @@ private:
|
||||
size_t allocated_bytes);
|
||||
|
||||
// For GC alloc regions.
|
||||
bool has_more_regions(InCSetState dest);
|
||||
HeapRegion* new_gc_alloc_region(size_t word_size, InCSetState dest);
|
||||
bool has_more_regions(G1HeapRegionAttr dest);
|
||||
HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest);
|
||||
void retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes, InCSetState dest);
|
||||
size_t allocated_bytes, G1HeapRegionAttr dest);
|
||||
|
||||
// - if explicit_gc is true, the GC is for a System.gc() etc,
|
||||
// otherwise it's for a failed allocation.
|
||||
@ -551,10 +551,10 @@ public:
|
||||
bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
|
||||
|
||||
// Returns the PLAB statistics for a given destination.
|
||||
inline G1EvacStats* alloc_buffer_stats(InCSetState dest);
|
||||
inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
|
||||
|
||||
// Determines PLAB size for a given destination.
|
||||
inline size_t desired_plab_sz(InCSetState dest);
|
||||
inline size_t desired_plab_sz(G1HeapRegionAttr dest);
|
||||
|
||||
// Do anything common to GC's.
|
||||
void gc_prologue(bool full);
|
||||
@ -573,27 +573,24 @@ public:
|
||||
inline void set_humongous_is_live(oop obj);
|
||||
|
||||
// Register the given region to be part of the collection set.
|
||||
inline void register_humongous_region_with_cset(uint index);
|
||||
// Register regions with humongous objects (actually on the start region) in
|
||||
// the in_cset_fast_test table.
|
||||
void register_humongous_regions_with_cset();
|
||||
inline void register_humongous_region_with_region_attr(uint index);
|
||||
// Update region attributes table with information about all regions.
|
||||
void register_regions_with_region_attr();
|
||||
// We register a region with the fast "in collection set" test. We
|
||||
// simply set to true the array slot corresponding to this region.
|
||||
void register_young_region_with_cset(HeapRegion* r) {
|
||||
_in_cset_fast_test.set_in_young(r->hrm_index());
|
||||
void register_young_region_with_region_attr(HeapRegion* r) {
|
||||
_region_attr.set_in_young(r->hrm_index());
|
||||
}
|
||||
void register_old_region_with_cset(HeapRegion* r) {
|
||||
_in_cset_fast_test.set_in_old(r->hrm_index());
|
||||
}
|
||||
void register_optional_region_with_cset(HeapRegion* r) {
|
||||
_in_cset_fast_test.set_optional(r->hrm_index());
|
||||
}
|
||||
void clear_in_cset(const HeapRegion* hr) {
|
||||
_in_cset_fast_test.clear(hr);
|
||||
inline void register_region_with_region_attr(HeapRegion* r);
|
||||
inline void register_old_region_with_region_attr(HeapRegion* r);
|
||||
inline void register_optional_region_with_region_attr(HeapRegion* r);
|
||||
|
||||
void clear_region_attr(const HeapRegion* hr) {
|
||||
_region_attr.clear(hr);
|
||||
}
|
||||
|
||||
void clear_cset_fast_test() {
|
||||
_in_cset_fast_test.clear();
|
||||
void clear_region_attr() {
|
||||
_region_attr.clear();
|
||||
}
|
||||
|
||||
bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
|
||||
@ -1110,11 +1107,11 @@ public:
|
||||
// This array is used for a quick test on whether a reference points into
|
||||
// the collection set or not. Each of the array's elements denotes whether the
|
||||
// corresponding region is in the collection set or not.
|
||||
G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
|
||||
G1HeapRegionAttrBiasedMappedArray _region_attr;
|
||||
|
||||
public:
|
||||
|
||||
inline InCSetState in_cset_state(const oop obj);
|
||||
inline G1HeapRegionAttr region_attr(const oop obj);
|
||||
|
||||
// Return "TRUE" iff the given object address is in the reserved
|
||||
// region of g1.
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "gc/g1/heapRegionSet.inline.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
@ -38,11 +39,11 @@ G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
|
||||
return _policy->phase_times();
|
||||
}
|
||||
|
||||
G1EvacStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
|
||||
switch (dest.value()) {
|
||||
case InCSetState::Young:
|
||||
G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
|
||||
switch (dest.type()) {
|
||||
case G1HeapRegionAttr::Young:
|
||||
return &_survivor_evac_stats;
|
||||
case InCSetState::Old:
|
||||
case G1HeapRegionAttr::Old:
|
||||
return &_old_evac_stats;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
@ -50,7 +51,7 @@ G1EvacStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
|
||||
size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {
|
||||
size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());
|
||||
// Prevent humongous PLAB sizes for two reasons:
|
||||
// * PLABs are allocated using a similar paths as oops, but should
|
||||
@ -150,23 +151,35 @@ inline bool G1CollectedHeap::is_in_cset(oop obj) {
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) {
|
||||
return _in_cset_fast_test.is_in_cset(addr);
|
||||
return _region_attr.is_in_cset(addr);
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {
|
||||
return _in_cset_fast_test.is_in_cset(hr);
|
||||
return _region_attr.is_in_cset(hr);
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
|
||||
return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
|
||||
return _region_attr.is_in_cset_or_humongous((HeapWord*)obj);
|
||||
}
|
||||
|
||||
InCSetState G1CollectedHeap::in_cset_state(const oop obj) {
|
||||
return _in_cset_fast_test.at((HeapWord*)obj);
|
||||
G1HeapRegionAttr G1CollectedHeap::region_attr(const oop obj) {
|
||||
return _region_attr.at((HeapWord*)obj);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::register_humongous_region_with_cset(uint index) {
|
||||
_in_cset_fast_test.set_humongous(index);
|
||||
void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {
|
||||
_region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
|
||||
}
|
||||
|
||||
void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
|
||||
_region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
|
||||
}
|
||||
|
||||
void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
|
||||
_region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
|
||||
}
|
||||
|
||||
void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
|
||||
_region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -294,7 +307,7 @@ inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
|
||||
// thread (i.e. within the VM thread).
|
||||
if (is_humongous_reclaim_candidate(region)) {
|
||||
set_humongous_reclaim_candidate(region, false);
|
||||
_in_cset_fast_test.clear_humongous(region);
|
||||
_region_attr.clear_humongous(region);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -121,7 +121,7 @@ void G1CollectionSet::add_old_region(HeapRegion* hr) {
|
||||
assert(hr->is_old(), "the region should be old");
|
||||
|
||||
assert(!hr->in_collection_set(), "should not already be in the collection set");
|
||||
_g1h->register_old_region_with_cset(hr);
|
||||
_g1h->register_old_region_with_region_attr(hr);
|
||||
|
||||
_collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
|
||||
assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
|
||||
@ -137,7 +137,7 @@ void G1CollectionSet::add_optional_region(HeapRegion* hr) {
|
||||
assert(hr->is_old(), "the region should be old");
|
||||
assert(!hr->in_collection_set(), "should not already be in the CSet");
|
||||
|
||||
_g1h->register_optional_region_with_cset(hr);
|
||||
_g1h->register_optional_region_with_region_attr(hr);
|
||||
|
||||
hr->set_index_in_opt_cset(_num_optional_regions++);
|
||||
}
|
||||
@ -316,7 +316,7 @@ void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
|
||||
}
|
||||
|
||||
assert(!hr->in_collection_set(), "invariant");
|
||||
_g1h->register_young_region_with_cset(hr);
|
||||
_g1h->register_young_region_with_region_attr(hr);
|
||||
}
|
||||
|
||||
void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
|
||||
@ -492,7 +492,7 @@ void G1CollectionSet::move_candidates_to_collection_set(uint num_old_candidate_r
|
||||
HeapRegion* r = candidates()->at(candidate_idx + i);
|
||||
// This potentially optional candidate region is going to be an actual collection
|
||||
// set region. Clear cset marker.
|
||||
_g1h->clear_in_cset(r);
|
||||
_g1h->clear_region_attr(r);
|
||||
add_old_region(r);
|
||||
}
|
||||
candidates()->remove(num_old_candidate_regions);
|
||||
@ -526,7 +526,7 @@ void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* p
|
||||
for (uint i = 0; i < _num_optional_regions; i++) {
|
||||
HeapRegion* r = candidates()->at(candidates()->cur_idx() + i);
|
||||
pss->record_unused_optional_region(r);
|
||||
_g1h->clear_in_cset(r);
|
||||
_g1h->clear_region_attr(r);
|
||||
r->clear_index_in_opt_cset();
|
||||
}
|
||||
free_optional_regions();
|
||||
|
@ -170,7 +170,7 @@ void G1GCPhaseTimes::reset() {
|
||||
_recorded_total_free_cset_time_ms = 0.0;
|
||||
_recorded_serial_free_cset_time_ms = 0.0;
|
||||
_cur_fast_reclaim_humongous_time_ms = 0.0;
|
||||
_cur_fast_reclaim_humongous_register_time_ms = 0.0;
|
||||
_cur_region_register_time = 0.0;
|
||||
_cur_fast_reclaim_humongous_total = 0;
|
||||
_cur_fast_reclaim_humongous_candidates = 0;
|
||||
_cur_fast_reclaim_humongous_reclaimed = 0;
|
||||
@ -364,7 +364,7 @@ double G1GCPhaseTimes::print_pre_evacuate_collection_set() const {
|
||||
const double sum_ms = _root_region_scan_wait_time_ms +
|
||||
_recorded_young_cset_choice_time_ms +
|
||||
_recorded_non_young_cset_choice_time_ms +
|
||||
_cur_fast_reclaim_humongous_register_time_ms +
|
||||
_cur_region_register_time +
|
||||
_recorded_clear_claimed_marks_time_ms;
|
||||
|
||||
info_time("Pre Evacuate Collection Set", sum_ms);
|
||||
@ -374,8 +374,8 @@ double G1GCPhaseTimes::print_pre_evacuate_collection_set() const {
|
||||
}
|
||||
debug_time("Prepare TLABs", _cur_prepare_tlab_time_ms);
|
||||
debug_time("Choose Collection Set", (_recorded_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms));
|
||||
debug_time("Region Register", _cur_region_register_time);
|
||||
if (G1EagerReclaimHumongousObjects) {
|
||||
debug_time("Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
|
||||
trace_count("Humongous Total", _cur_fast_reclaim_humongous_total);
|
||||
trace_count("Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
|
||||
}
|
||||
|
@ -176,8 +176,9 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
|
||||
double _recorded_serial_free_cset_time_ms;
|
||||
|
||||
double _cur_region_register_time;
|
||||
|
||||
double _cur_fast_reclaim_humongous_time_ms;
|
||||
double _cur_fast_reclaim_humongous_register_time_ms;
|
||||
size_t _cur_fast_reclaim_humongous_total;
|
||||
size_t _cur_fast_reclaim_humongous_candidates;
|
||||
size_t _cur_fast_reclaim_humongous_reclaimed;
|
||||
@ -305,8 +306,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
_recorded_serial_free_cset_time_ms = time_ms;
|
||||
}
|
||||
|
||||
void record_fast_reclaim_humongous_stats(double time_ms, size_t total, size_t candidates) {
|
||||
_cur_fast_reclaim_humongous_register_time_ms = time_ms;
|
||||
void record_register_regions(double time_ms, size_t total, size_t candidates) {
|
||||
_cur_region_register_time = time_ms;
|
||||
_cur_fast_reclaim_humongous_total = total;
|
||||
_cur_fast_reclaim_humongous_candidates = candidates;
|
||||
}
|
||||
|
164
src/hotspot/share/gc/g1/g1HeapRegionAttr.hpp
Normal file
164
src/hotspot/share/gc/g1/g1HeapRegionAttr.hpp
Normal file
@ -0,0 +1,164 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_G1_G1HEAPREGIONATTR_HPP
|
||||
#define SHARE_GC_G1_G1HEAPREGIONATTR_HPP
|
||||
|
||||
#include "gc/g1/g1BiasedArray.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
|
||||
// Per-region attributes often used during garbage collection to avoid costly
|
||||
// lookups for that information all over the place.
|
||||
struct G1HeapRegionAttr {
|
||||
public:
|
||||
// We use different types to represent the state value depending on platform as
|
||||
// some have issues loading parts of words.
|
||||
#ifdef SPARC
|
||||
typedef int32_t region_type_t;
|
||||
typedef uint32_t needs_remset_update_t;
|
||||
#else
|
||||
typedef int8_t region_type_t;
|
||||
typedef uint8_t needs_remset_update_t;
|
||||
#endif
|
||||
|
||||
private:
|
||||
needs_remset_update_t _needs_remset_update;
|
||||
region_type_t _type;
|
||||
|
||||
public:
|
||||
// Selection of the values for the _type field were driven to micro-optimize the
|
||||
// encoding and frequency of the checks.
|
||||
// The most common check for a given reference is whether the region is in the
|
||||
// collection set or not, and which generation this region is in.
|
||||
// The selected encoding allows us to use a single check (> NotInCSet) for the
|
||||
// former.
|
||||
//
|
||||
// The other values are used for objects requiring various special cases,
|
||||
// for example eager reclamation of humongous objects or optional regions.
|
||||
static const region_type_t Optional = -2; // The region is optional and NOT in the current collection set.
|
||||
static const region_type_t Humongous = -1; // The region is a humongous candidate not in the current collection set.
|
||||
static const region_type_t NotInCSet = 0; // The region is not in the collection set.
|
||||
static const region_type_t Young = 1; // The region is in the collection set and a young region.
|
||||
static const region_type_t Old = 2; // The region is in the collection set and an old region.
|
||||
static const region_type_t Num = 3;
|
||||
|
||||
G1HeapRegionAttr(region_type_t type = NotInCSet, bool needs_remset_update = false) :
|
||||
_needs_remset_update(needs_remset_update), _type(type) {
|
||||
|
||||
assert(is_valid(), "Invalid type %d", _type);
|
||||
}
|
||||
|
||||
region_type_t type() const { return _type; }
|
||||
|
||||
const char* get_type_str() const {
|
||||
switch (type()) {
|
||||
case Optional: return "Optional";
|
||||
case Humongous: return "Humongous";
|
||||
case NotInCSet: return "NotInCSet";
|
||||
case Young: return "Young";
|
||||
case Old: return "Old";
|
||||
default: ShouldNotReachHere(); return "";
|
||||
}
|
||||
}
|
||||
|
||||
bool needs_remset_update() const { return _needs_remset_update != 0; }
|
||||
|
||||
void set_old() { _type = Old; }
|
||||
void clear_humongous() {
|
||||
assert(is_humongous() || !is_in_cset(), "must be");
|
||||
_type = NotInCSet;
|
||||
}
|
||||
void set_has_remset(bool value) { _needs_remset_update = value ? 1 : 0; }
|
||||
|
||||
bool is_in_cset_or_humongous() const { return is_in_cset() || is_humongous(); }
|
||||
bool is_in_cset() const { return type() > NotInCSet; }
|
||||
|
||||
bool is_humongous() const { return type() == Humongous; }
|
||||
bool is_young() const { return type() == Young; }
|
||||
bool is_old() const { return type() == Old; }
|
||||
bool is_optional() const { return type() == Optional; }
|
||||
|
||||
#ifdef ASSERT
|
||||
bool is_default() const { return type() == NotInCSet; }
|
||||
bool is_valid() const { return (type() >= Optional && type() < Num); }
|
||||
bool is_valid_gen() const { return (type() >= Young && type() <= Old); }
|
||||
#endif
|
||||
};
|
||||
|
||||
// Table for all regions in the heap for above.
|
||||
//
|
||||
// We use this to speed up reference processing during young collection and
|
||||
// quickly reclaim humongous objects. For the latter, at the start of GC, by adding
|
||||
// it as a humongous region we enable special handling for that region. During the
|
||||
// reference iteration closures, when we see a humongous region, we then simply mark
|
||||
// it as referenced, i.e. live, and remove it from this table to prevent further
|
||||
// processing on it.
|
||||
//
|
||||
// This means that this does NOT completely correspond to the information stored
|
||||
// in a HeapRegion, but only to what is interesting for the current young collection.
|
||||
class G1HeapRegionAttrBiasedMappedArray : public G1BiasedMappedArray<G1HeapRegionAttr> {
|
||||
protected:
|
||||
G1HeapRegionAttr default_value() const { return G1HeapRegionAttr(G1HeapRegionAttr::NotInCSet); }
|
||||
public:
|
||||
void set_optional(uintptr_t index, bool needs_remset_update) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
"Region attributes at index " INTPTR_FORMAT " should be default but is %s", index, get_by_index(index).get_type_str());
|
||||
set_by_index(index, G1HeapRegionAttr(G1HeapRegionAttr::Optional, needs_remset_update));
|
||||
}
|
||||
|
||||
void set_humongous(uintptr_t index, bool needs_remset_update) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
"Region attributes at index " INTPTR_FORMAT " should be default but is %s", index, get_by_index(index).get_type_str());
|
||||
set_by_index(index, G1HeapRegionAttr(G1HeapRegionAttr::Humongous, needs_remset_update));
|
||||
}
|
||||
|
||||
void clear_humongous(uintptr_t index) {
|
||||
get_ref_by_index(index)->clear_humongous();
|
||||
}
|
||||
|
||||
void set_has_remset(uintptr_t index, bool needs_remset_update) {
|
||||
get_ref_by_index(index)->set_has_remset(needs_remset_update);
|
||||
}
|
||||
|
||||
void set_in_young(uintptr_t index) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
"Region attributes at index " INTPTR_FORMAT " should be default but is %s", index, get_by_index(index).get_type_str());
|
||||
set_by_index(index, G1HeapRegionAttr(G1HeapRegionAttr::Young, true));
|
||||
}
|
||||
|
||||
void set_in_old(uintptr_t index, bool needs_remset_update) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
"Region attributes at index " INTPTR_FORMAT " should be default but is %s", index, get_by_index(index).get_type_str());
|
||||
set_by_index(index, G1HeapRegionAttr(G1HeapRegionAttr::Old, needs_remset_update));
|
||||
}
|
||||
|
||||
bool is_in_cset_or_humongous(HeapWord* addr) const { return at(addr).is_in_cset_or_humongous(); }
|
||||
bool is_in_cset(HeapWord* addr) const { return at(addr).is_in_cset(); }
|
||||
bool is_in_cset(const HeapRegion* hr) const { return get_by_index(hr->hrm_index()).is_in_cset(); }
|
||||
G1HeapRegionAttr at(HeapWord* addr) const { return get_by_address(addr); }
|
||||
void clear() { G1BiasedMappedArray<G1HeapRegionAttr>::clear(); }
|
||||
void clear(const HeapRegion* hr) { return set_by_index(hr->hrm_index(), G1HeapRegionAttr(G1HeapRegionAttr::NotInCSet)); }
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1HEAPREGIONATTR_HPP
|
@ -790,50 +790,50 @@ class G1CheckCSetFastTableClosure : public HeapRegionClosure {
|
||||
|
||||
virtual bool do_heap_region(HeapRegion* hr) {
|
||||
uint i = hr->hrm_index();
|
||||
InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
|
||||
G1HeapRegionAttr region_attr = (G1HeapRegionAttr) G1CollectedHeap::heap()->_region_attr.get_by_index(i);
|
||||
if (hr->is_humongous()) {
|
||||
if (hr->in_collection_set()) {
|
||||
log_error(gc, verify)("## humongous region %u in CSet", i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (cset_state.is_in_cset()) {
|
||||
log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i);
|
||||
if (region_attr.is_in_cset()) {
|
||||
log_error(gc, verify)("## inconsistent region attr type %s for humongous region %u", region_attr.get_type_str(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (hr->is_continues_humongous() && cset_state.is_humongous()) {
|
||||
log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i);
|
||||
if (hr->is_continues_humongous() && region_attr.is_humongous()) {
|
||||
log_error(gc, verify)("## inconsistent region attr type %s for continues humongous region %u", region_attr.get_type_str(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
if (cset_state.is_humongous()) {
|
||||
log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i);
|
||||
if (region_attr.is_humongous()) {
|
||||
log_error(gc, verify)("## inconsistent region attr type %s for non-humongous region %u", region_attr.get_type_str(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (hr->in_collection_set() != cset_state.is_in_cset()) {
|
||||
log_error(gc, verify)("## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
|
||||
hr->in_collection_set(), cset_state.value(), i);
|
||||
if (hr->in_collection_set() != region_attr.is_in_cset()) {
|
||||
log_error(gc, verify)("## in CSet %d / region attr type %s inconsistency for region %u",
|
||||
hr->in_collection_set(), region_attr.get_type_str(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (cset_state.is_in_cset()) {
|
||||
if (region_attr.is_in_cset()) {
|
||||
if (hr->is_archive()) {
|
||||
log_error(gc, verify)("## is_archive in collection set for region %u", i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (hr->is_young() != (cset_state.is_young())) {
|
||||
log_error(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
|
||||
hr->is_young(), cset_state.value(), i);
|
||||
if (hr->is_young() != (region_attr.is_young())) {
|
||||
log_error(gc, verify)("## is_young %d / region attr type %s inconsistency for region %u",
|
||||
hr->is_young(), region_attr.get_type_str(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
if (hr->is_old() != (cset_state.is_old())) {
|
||||
log_error(gc, verify)("## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
|
||||
hr->is_old(), cset_state.value(), i);
|
||||
if (hr->is_old() != (region_attr.is_old())) {
|
||||
log_error(gc, verify)("## is_old %d / region attr type %s inconsistency for region %u",
|
||||
hr->is_old(), region_attr.get_type_str(), i);
|
||||
_failures = true;
|
||||
return true;
|
||||
}
|
||||
|
@ -1,142 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_G1_G1INCSETSTATE_HPP
|
||||
#define SHARE_GC_G1_G1INCSETSTATE_HPP
|
||||
|
||||
#include "gc/g1/g1BiasedArray.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
|
||||
// Per-region state during garbage collection.
|
||||
struct InCSetState {
|
||||
public:
|
||||
// We use different types to represent the state value. Particularly SPARC puts
|
||||
// values in structs from "left to right", i.e. MSB to LSB. This results in many
|
||||
// unnecessary shift operations when loading and storing values of this type.
|
||||
// This degrades performance significantly (>10%) on that platform.
|
||||
// Other tested ABIs do not seem to have this problem, and actually tend to
|
||||
// favor smaller types, so we use the smallest usable type there.
|
||||
#ifdef SPARC
|
||||
#define CSETSTATE_FORMAT INTPTR_FORMAT
|
||||
typedef intptr_t in_cset_state_t;
|
||||
#else
|
||||
#define CSETSTATE_FORMAT "%d"
|
||||
typedef int8_t in_cset_state_t;
|
||||
#endif
|
||||
private:
|
||||
in_cset_state_t _value;
|
||||
public:
|
||||
enum {
|
||||
// Selection of the values were driven to micro-optimize the encoding and
|
||||
// frequency of the checks.
|
||||
// The most common check is whether the region is in the collection set or not,
|
||||
// this encoding allows us to use an > 0 check.
|
||||
// The positive values are encoded in increasing generation order, which
|
||||
// makes getting the next generation fast by a simple increment. They are also
|
||||
// used to index into arrays.
|
||||
// The negative values are used for objects requiring various special cases,
|
||||
// for example eager reclamation of humongous objects or optional regions.
|
||||
Optional = -2, // The region is optional
|
||||
Humongous = -1, // The region is humongous
|
||||
NotInCSet = 0, // The region is not in the collection set.
|
||||
Young = 1, // The region is in the collection set and a young region.
|
||||
Old = 2, // The region is in the collection set and an old region.
|
||||
Num
|
||||
};
|
||||
|
||||
InCSetState(in_cset_state_t value = NotInCSet) : _value(value) {
|
||||
assert(is_valid(), "Invalid state %d", _value);
|
||||
}
|
||||
|
||||
in_cset_state_t value() const { return _value; }
|
||||
|
||||
void set_old() { _value = Old; }
|
||||
|
||||
bool is_in_cset_or_humongous() const { return is_in_cset() || is_humongous(); }
|
||||
bool is_in_cset() const { return _value > NotInCSet; }
|
||||
|
||||
bool is_humongous() const { return _value == Humongous; }
|
||||
bool is_young() const { return _value == Young; }
|
||||
bool is_old() const { return _value == Old; }
|
||||
bool is_optional() const { return _value == Optional; }
|
||||
|
||||
#ifdef ASSERT
|
||||
bool is_default() const { return _value == NotInCSet; }
|
||||
bool is_valid() const { return (_value >= Optional) && (_value < Num); }
|
||||
bool is_valid_gen() const { return (_value >= Young && _value <= Old); }
|
||||
#endif
|
||||
};
|
||||
|
||||
// Instances of this class are used for quick tests on whether a reference points
|
||||
// into the collection set and into which generation or is a humongous object
|
||||
//
|
||||
// Each of the array's elements indicates whether the corresponding region is in
|
||||
// the collection set and if so in which generation, or a humongous region.
|
||||
//
|
||||
// We use this to speed up reference processing during young collection and
|
||||
// quickly reclaim humongous objects. For the latter, by making a humongous region
|
||||
// succeed this test, we sort-of add it to the collection set. During the reference
|
||||
// iteration closures, when we see a humongous region, we then simply mark it as
|
||||
// referenced, i.e. live.
|
||||
class G1InCSetStateFastTestBiasedMappedArray : public G1BiasedMappedArray<InCSetState> {
|
||||
protected:
|
||||
InCSetState default_value() const { return InCSetState::NotInCSet; }
|
||||
public:
|
||||
void set_optional(uintptr_t index) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
"State at index " INTPTR_FORMAT " should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value());
|
||||
set_by_index(index, InCSetState::Optional);
|
||||
}
|
||||
|
||||
void set_humongous(uintptr_t index) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
"State at index " INTPTR_FORMAT " should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value());
|
||||
set_by_index(index, InCSetState::Humongous);
|
||||
}
|
||||
|
||||
void clear_humongous(uintptr_t index) {
|
||||
set_by_index(index, InCSetState::NotInCSet);
|
||||
}
|
||||
|
||||
void set_in_young(uintptr_t index) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
"State at index " INTPTR_FORMAT " should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value());
|
||||
set_by_index(index, InCSetState::Young);
|
||||
}
|
||||
|
||||
void set_in_old(uintptr_t index) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
"State at index " INTPTR_FORMAT " should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value());
|
||||
set_by_index(index, InCSetState::Old);
|
||||
}
|
||||
|
||||
bool is_in_cset_or_humongous(HeapWord* addr) const { return at(addr).is_in_cset_or_humongous(); }
|
||||
bool is_in_cset(HeapWord* addr) const { return at(addr).is_in_cset(); }
|
||||
bool is_in_cset(const HeapRegion* hr) const { return get_by_index(hr->hrm_index()).is_in_cset(); }
|
||||
InCSetState at(HeapWord* addr) const { return get_by_address(addr); }
|
||||
void clear() { G1BiasedMappedArray<InCSetState>::clear(); }
|
||||
void clear(const HeapRegion* hr) { return set_by_index(hr->hrm_index(), InCSetState::NotInCSet); }
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1INCSETSTATE_HPP
|
@ -25,7 +25,7 @@
|
||||
#ifndef SHARE_GC_G1_G1OOPCLOSURES_HPP
|
||||
#define SHARE_GC_G1_G1OOPCLOSURES_HPP
|
||||
|
||||
#include "gc/g1/g1InCSetState.hpp"
|
||||
#include "gc/g1/g1HeapRegionAttr.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/markOop.hpp"
|
||||
|
||||
@ -52,18 +52,18 @@ protected:
|
||||
inline void prefetch_and_push(T* p, oop const obj);
|
||||
|
||||
template <class T>
|
||||
inline void handle_non_cset_obj_common(InCSetState const state, T* p, oop const obj);
|
||||
inline void handle_non_cset_obj_common(G1HeapRegionAttr const region_attr, T* p, oop const obj);
|
||||
public:
|
||||
virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
|
||||
|
||||
inline void trim_queue_partially();
|
||||
};
|
||||
|
||||
// Used during the Update RS phase to refine remaining cards in the DCQ during garbage collection.
|
||||
class G1ScanObjsDuringUpdateRSClosure : public G1ScanClosureBase {
|
||||
// Used to scan cards from the DCQS or the remembered sets during garbage collection.
|
||||
class G1ScanCardClosure : public G1ScanClosureBase {
|
||||
public:
|
||||
G1ScanObjsDuringUpdateRSClosure(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadState* pss) :
|
||||
G1ScanCardClosure(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadState* pss) :
|
||||
G1ScanClosureBase(g1h, pss) { }
|
||||
|
||||
template <class T> void do_oop_work(T* p);
|
||||
@ -71,23 +71,11 @@ public:
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
};
|
||||
|
||||
// Used during the Scan RS phase to scan cards from the remembered set during garbage collection.
|
||||
class G1ScanObjsDuringScanRSClosure : public G1ScanClosureBase {
|
||||
public:
|
||||
G1ScanObjsDuringScanRSClosure(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadState* par_scan_state):
|
||||
G1ScanClosureBase(g1h, par_scan_state) { }
|
||||
|
||||
template <class T> void do_oop_work(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
};
|
||||
|
||||
// Used during Optional RS scanning to make sure we trim the queues in a timely manner.
|
||||
class G1ScanRSForOptionalClosure : public OopClosure {
|
||||
G1ScanObjsDuringScanRSClosure* _scan_cl;
|
||||
G1ScanCardClosure* _scan_cl;
|
||||
public:
|
||||
G1ScanRSForOptionalClosure(G1ScanObjsDuringScanRSClosure* cl) : _scan_cl(cl) { }
|
||||
G1ScanRSForOptionalClosure(G1ScanCardClosure* cl) : _scan_cl(cl) { }
|
||||
|
||||
template <class T> void do_oop_work(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
|
@ -61,10 +61,10 @@ inline void G1ScanClosureBase::prefetch_and_push(T* p, const oop obj) {
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void G1ScanClosureBase::handle_non_cset_obj_common(InCSetState const state, T* p, oop const obj) {
|
||||
if (state.is_humongous()) {
|
||||
inline void G1ScanClosureBase::handle_non_cset_obj_common(G1HeapRegionAttr const region_attr, T* p, oop const obj) {
|
||||
if (region_attr.is_humongous()) {
|
||||
_g1h->set_humongous_is_live(obj);
|
||||
} else if (state.is_optional()) {
|
||||
} else if (region_attr.is_optional()) {
|
||||
_par_scan_state->remember_reference_into_optional_region(p);
|
||||
}
|
||||
}
|
||||
@ -81,16 +81,16 @@ inline void G1ScanEvacuatedObjClosure::do_oop_work(T* p) {
|
||||
return;
|
||||
}
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
const InCSetState state = _g1h->in_cset_state(obj);
|
||||
if (state.is_in_cset()) {
|
||||
const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
|
||||
if (region_attr.is_in_cset()) {
|
||||
prefetch_and_push(p, obj);
|
||||
} else if (!HeapRegion::is_in_same_region(p, obj)) {
|
||||
handle_non_cset_obj_common(state, p, obj);
|
||||
handle_non_cset_obj_common(region_attr, p, obj);
|
||||
assert(_scanning_in_young != Uninitialized, "Scan location has not been initialized.");
|
||||
if (_scanning_in_young == True) {
|
||||
return;
|
||||
}
|
||||
_par_scan_state->enqueue_card_if_tracked(p, obj);
|
||||
_par_scan_state->enqueue_card_if_tracked(region_attr, p, obj);
|
||||
}
|
||||
}
|
||||
|
||||
@ -160,7 +160,7 @@ inline void G1ConcurrentRefineOopClosure::do_oop_work(T* p) {
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void G1ScanObjsDuringUpdateRSClosure::do_oop_work(T* p) {
|
||||
inline void G1ScanCardClosure::do_oop_work(T* p) {
|
||||
T o = RawAccess<>::oop_load(p);
|
||||
if (CompressedOops::is_null(o)) {
|
||||
return;
|
||||
@ -169,31 +169,15 @@ inline void G1ScanObjsDuringUpdateRSClosure::do_oop_work(T* p) {
|
||||
|
||||
check_obj_during_refinement(p, obj);
|
||||
|
||||
assert(!_g1h->is_in_cset((HeapWord*)p), "Oop originates from " PTR_FORMAT " (region: %u) which is in the collection set.", p2i(p), _g1h->addr_to_region((HeapWord*)p));
|
||||
const InCSetState state = _g1h->in_cset_state(obj);
|
||||
if (state.is_in_cset()) {
|
||||
// Since the source is always from outside the collection set, here we implicitly know
|
||||
// that this is a cross-region reference too.
|
||||
// We can not check for references from the collection set: the remembered sets
|
||||
// may contain such entries and we do not filter them before.
|
||||
|
||||
const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
|
||||
if (region_attr.is_in_cset()) {
|
||||
prefetch_and_push(p, obj);
|
||||
} else if (!HeapRegion::is_in_same_region(p, obj)) {
|
||||
handle_non_cset_obj_common(state, p, obj);
|
||||
_par_scan_state->enqueue_card_if_tracked(p, obj);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void G1ScanObjsDuringScanRSClosure::do_oop_work(T* p) {
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (CompressedOops::is_null(heap_oop)) {
|
||||
return;
|
||||
}
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
|
||||
const InCSetState state = _g1h->in_cset_state(obj);
|
||||
if (state.is_in_cset()) {
|
||||
prefetch_and_push(p, obj);
|
||||
} else if (!HeapRegion::is_in_same_region(p, obj)) {
|
||||
handle_non_cset_obj_common(state, p, obj);
|
||||
handle_non_cset_obj_common(region_attr, p, obj);
|
||||
_par_scan_state->enqueue_card_if_tracked(region_attr, p, obj);
|
||||
}
|
||||
}
|
||||
|
||||
@ -233,7 +217,7 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
|
||||
assert(_worker_id == _par_scan_state->worker_id(), "sanity");
|
||||
|
||||
const InCSetState state = _g1h->in_cset_state(obj);
|
||||
const G1HeapRegionAttr state = _g1h->region_attr(obj);
|
||||
if (state.is_in_cset()) {
|
||||
oop forwardee;
|
||||
markOop m = obj->mark_raw();
|
||||
|
@ -75,11 +75,11 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
|
||||
|
||||
_plab_allocator = new G1PLABAllocator(_g1h->allocator());
|
||||
|
||||
_dest[InCSetState::NotInCSet] = InCSetState::NotInCSet;
|
||||
_dest[G1HeapRegionAttr::NotInCSet] = G1HeapRegionAttr::NotInCSet;
|
||||
// The dest for Young is used when the objects are aged enough to
|
||||
// need to be moved to the next space.
|
||||
_dest[InCSetState::Young] = InCSetState::Old;
|
||||
_dest[InCSetState::Old] = InCSetState::Old;
|
||||
_dest[G1HeapRegionAttr::Young] = G1HeapRegionAttr::Old;
|
||||
_dest[G1HeapRegionAttr::Old] = G1HeapRegionAttr::Old;
|
||||
|
||||
_closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
|
||||
|
||||
@ -157,18 +157,18 @@ void G1ParScanThreadState::trim_queue() {
|
||||
} while (!_refs->is_empty());
|
||||
}
|
||||
|
||||
HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
|
||||
InCSetState* dest,
|
||||
HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr const region_attr,
|
||||
G1HeapRegionAttr* dest,
|
||||
size_t word_sz,
|
||||
bool previous_plab_refill_failed) {
|
||||
assert(state.is_in_cset_or_humongous(), "Unexpected state: " CSETSTATE_FORMAT, state.value());
|
||||
assert(dest->is_in_cset_or_humongous(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
|
||||
assert(region_attr.is_in_cset_or_humongous(), "Unexpected region attr type: %s", region_attr.get_type_str());
|
||||
assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str());
|
||||
|
||||
// Right now we only have two types of regions (young / old) so
|
||||
// let's keep the logic here simple. We can generalize it when necessary.
|
||||
if (dest->is_young()) {
|
||||
bool plab_refill_in_old_failed = false;
|
||||
HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
|
||||
HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old,
|
||||
word_sz,
|
||||
&plab_refill_in_old_failed);
|
||||
// Make sure that we won't attempt to copy any other objects out
|
||||
@ -190,38 +190,38 @@ HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
|
||||
return obj_ptr;
|
||||
} else {
|
||||
_old_gen_is_full = previous_plab_refill_failed;
|
||||
assert(dest->is_old(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
|
||||
assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str());
|
||||
// no other space to try.
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
|
||||
if (state.is_young()) {
|
||||
G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markOop const m, uint& age) {
|
||||
if (region_attr.is_young()) {
|
||||
age = !m->has_displaced_mark_helper() ? m->age()
|
||||
: m->displaced_mark_helper()->age();
|
||||
if (age < _tenuring_threshold) {
|
||||
return state;
|
||||
return region_attr;
|
||||
}
|
||||
}
|
||||
return dest(state);
|
||||
return dest(region_attr);
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
|
||||
void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
|
||||
oop const old, size_t word_sz, uint age,
|
||||
HeapWord * const obj_ptr) const {
|
||||
PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state);
|
||||
PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr);
|
||||
if (alloc_buf->contains(obj_ptr)) {
|
||||
_g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
|
||||
dest_state.value() == InCSetState::Old,
|
||||
dest_attr.type() == G1HeapRegionAttr::Old,
|
||||
alloc_buf->word_sz() * HeapWordSize);
|
||||
} else {
|
||||
_g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
|
||||
dest_state.value() == InCSetState::Old);
|
||||
dest_attr.type() == G1HeapRegionAttr::Old);
|
||||
}
|
||||
}
|
||||
|
||||
oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_attr,
|
||||
oop const old,
|
||||
markOop const old_mark) {
|
||||
const size_t word_sz = old->size();
|
||||
@ -232,21 +232,21 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
(!from_region->is_young() && young_index == 0), "invariant" );
|
||||
|
||||
uint age = 0;
|
||||
InCSetState dest_state = next_state(state, old_mark, age);
|
||||
G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
|
||||
// The second clause is to prevent premature evacuation failure in case there
|
||||
// is still space in survivor, but old gen is full.
|
||||
if (_old_gen_is_full && dest_state.is_old()) {
|
||||
if (_old_gen_is_full && dest_attr.is_old()) {
|
||||
return handle_evacuation_failure_par(old, old_mark);
|
||||
}
|
||||
HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz);
|
||||
HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz);
|
||||
|
||||
// PLAB allocations should succeed most of the time, so we'll
|
||||
// normally check against NULL once and that's it.
|
||||
if (obj_ptr == NULL) {
|
||||
bool plab_refill_failed = false;
|
||||
obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, &plab_refill_failed);
|
||||
obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed);
|
||||
if (obj_ptr == NULL) {
|
||||
obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, plab_refill_failed);
|
||||
obj_ptr = allocate_in_next_plab(region_attr, &dest_attr, word_sz, plab_refill_failed);
|
||||
if (obj_ptr == NULL) {
|
||||
// This will either forward-to-self, or detect that someone else has
|
||||
// installed a forwarding pointer.
|
||||
@ -255,7 +255,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
}
|
||||
if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
|
||||
// The events are checked individually as part of the actual commit
|
||||
report_promotion_event(dest_state, old, word_sz, age, obj_ptr);
|
||||
report_promotion_event(dest_attr, old, word_sz, age, obj_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -267,7 +267,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
if (_g1h->evacuation_should_fail()) {
|
||||
// Doing this after all the allocation attempts also tests the
|
||||
// undo_allocation() method too.
|
||||
_plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz);
|
||||
_plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz);
|
||||
return handle_evacuation_failure_par(old, old_mark);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
@ -280,7 +280,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
if (forward_ptr == NULL) {
|
||||
Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
|
||||
|
||||
if (dest_state.is_young()) {
|
||||
if (dest_attr.is_young()) {
|
||||
if (age < markOopDesc::max_age) {
|
||||
age++;
|
||||
}
|
||||
@ -300,8 +300,8 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
}
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
const bool is_from_young = state.is_young();
|
||||
const bool is_to_young = dest_state.is_young();
|
||||
const bool is_from_young = region_attr.is_young();
|
||||
const bool is_to_young = dest_attr.is_young();
|
||||
assert(is_from_young == _g1h->heap_region_containing(old)->is_young(),
|
||||
"sanity");
|
||||
assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
|
||||
@ -322,12 +322,12 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
oop* old_p = set_partial_array_mask(old);
|
||||
do_oop_partial_array(old_p);
|
||||
} else {
|
||||
G1ScanInYoungSetter x(&_scanner, dest_state.is_young());
|
||||
G1ScanInYoungSetter x(&_scanner, dest_attr.is_young());
|
||||
obj->oop_iterate_backwards(&_scanner);
|
||||
}
|
||||
return obj;
|
||||
} else {
|
||||
_plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz);
|
||||
_plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz);
|
||||
return forward_ptr;
|
||||
}
|
||||
}
|
||||
|
@ -45,17 +45,17 @@ class outputStream;
|
||||
|
||||
class G1ParScanThreadState : public CHeapObj<mtGC> {
|
||||
G1CollectedHeap* _g1h;
|
||||
RefToScanQueue* _refs;
|
||||
RefToScanQueue* _refs;
|
||||
G1DirtyCardQueue _dcq;
|
||||
G1CardTable* _ct;
|
||||
G1CardTable* _ct;
|
||||
G1EvacuationRootClosures* _closures;
|
||||
|
||||
G1PLABAllocator* _plab_allocator;
|
||||
G1PLABAllocator* _plab_allocator;
|
||||
|
||||
AgeTable _age_table;
|
||||
InCSetState _dest[InCSetState::Num];
|
||||
AgeTable _age_table;
|
||||
G1HeapRegionAttr _dest[G1HeapRegionAttr::Num];
|
||||
// Local tenuring threshold.
|
||||
uint _tenuring_threshold;
|
||||
uint _tenuring_threshold;
|
||||
G1ScanEvacuatedObjClosure _scanner;
|
||||
|
||||
uint _worker_id;
|
||||
@ -80,12 +80,12 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
|
||||
G1DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
||||
G1CardTable* ct() { return _ct; }
|
||||
|
||||
InCSetState dest(InCSetState original) const {
|
||||
G1HeapRegionAttr dest(G1HeapRegionAttr original) const {
|
||||
assert(original.is_valid(),
|
||||
"Original state invalid: " CSETSTATE_FORMAT, original.value());
|
||||
assert(_dest[original.value()].is_valid_gen(),
|
||||
"Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value());
|
||||
return _dest[original.value()];
|
||||
"Original region attr invalid: %s", original.get_type_str());
|
||||
assert(_dest[original.type()].is_valid_gen(),
|
||||
"Dest region attr is invalid: %s", _dest[original.type()].get_type_str());
|
||||
return _dest[original.type()];
|
||||
}
|
||||
|
||||
size_t _num_optional_regions;
|
||||
@ -111,10 +111,19 @@ public:
|
||||
template <class T> void do_oop_ext(T* ref);
|
||||
template <class T> void push_on_queue(T* ref);
|
||||
|
||||
template <class T> void enqueue_card_if_tracked(T* p, oop o) {
|
||||
template <class T> void enqueue_card_if_tracked(G1HeapRegionAttr region_attr, T* p, oop o) {
|
||||
assert(!HeapRegion::is_in_same_region(p, o), "Should have filtered out cross-region references already.");
|
||||
assert(!_g1h->heap_region_containing(p)->is_young(), "Should have filtered out from-young references already.");
|
||||
if (!_g1h->heap_region_containing((HeapWord*)o)->rem_set()->is_tracked()) {
|
||||
|
||||
#ifdef ASSERT
|
||||
HeapRegion* const hr_obj = _g1h->heap_region_containing((HeapWord*)o);
|
||||
assert(region_attr.needs_remset_update() == hr_obj->rem_set()->is_tracked(),
|
||||
"State flag indicating remset tracking disagrees (%s) with actual remembered set (%s) for region %u",
|
||||
BOOL_TO_STR(region_attr.needs_remset_update()),
|
||||
BOOL_TO_STR(hr_obj->rem_set()->is_tracked()),
|
||||
hr_obj->hrm_index());
|
||||
#endif
|
||||
if (!region_attr.needs_remset_update()) {
|
||||
return;
|
||||
}
|
||||
size_t card_index = ct()->index_for(p);
|
||||
@ -184,14 +193,14 @@ private:
|
||||
// Returns a non-NULL pointer if successful, and updates dest if required.
|
||||
// Also determines whether we should continue to try to allocate into the various
|
||||
// generations or just end trying to allocate.
|
||||
HeapWord* allocate_in_next_plab(InCSetState const state,
|
||||
InCSetState* dest,
|
||||
HeapWord* allocate_in_next_plab(G1HeapRegionAttr const region_attr,
|
||||
G1HeapRegionAttr* dest,
|
||||
size_t word_sz,
|
||||
bool previous_plab_refill_failed);
|
||||
|
||||
inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
|
||||
inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markOop const m, uint& age);
|
||||
|
||||
void report_promotion_event(InCSetState const dest_state,
|
||||
void report_promotion_event(G1HeapRegionAttr const dest_attr,
|
||||
oop const old, size_t word_sz, uint age,
|
||||
HeapWord * const obj_ptr) const;
|
||||
|
||||
@ -200,7 +209,7 @@ private:
|
||||
|
||||
inline void trim_queue_to_threshold(uint threshold);
|
||||
public:
|
||||
oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
|
||||
oop copy_to_survivor_space(G1HeapRegionAttr const region_attr, oop const obj, markOop const old_mark);
|
||||
|
||||
void trim_queue();
|
||||
void trim_queue_partially();
|
||||
|
@ -41,14 +41,14 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
|
||||
// than one thread might claim the same card. So the same card may be
|
||||
// processed multiple times, and so we might get references into old gen here.
|
||||
// So we need to redo this check.
|
||||
const InCSetState in_cset_state = _g1h->in_cset_state(obj);
|
||||
const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
|
||||
// References pushed onto the work stack should never point to a humongous region
|
||||
// as they are not added to the collection set due to above precondition.
|
||||
assert(!in_cset_state.is_humongous(),
|
||||
assert(!region_attr.is_humongous(),
|
||||
"Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
|
||||
p2i(obj), _g1h->addr_to_region((HeapWord*)obj), p2i(p));
|
||||
|
||||
if (!in_cset_state.is_in_cset()) {
|
||||
if (!region_attr.is_in_cset()) {
|
||||
// In this case somebody else already did all the work.
|
||||
return;
|
||||
}
|
||||
@ -57,7 +57,7 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
|
||||
if (m->is_marked()) {
|
||||
obj = (oop) m->decode_pointer();
|
||||
} else {
|
||||
obj = copy_to_survivor_space(in_cset_state, obj, m);
|
||||
obj = copy_to_survivor_space(region_attr, obj, m);
|
||||
}
|
||||
RawAccess<IS_NOT_NULL>::oop_store(p, obj);
|
||||
|
||||
@ -67,7 +67,7 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
|
||||
}
|
||||
HeapRegion* from = _g1h->heap_region_containing(p);
|
||||
if (!from->is_young()) {
|
||||
enqueue_card_if_tracked(p, obj);
|
||||
enqueue_card_if_tracked(_g1h->region_attr(obj), p, obj);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
||||
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1InCSetState.hpp"
|
||||
#include "gc/g1/g1HeapRegionAttr.hpp"
|
||||
#include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
|
||||
#include "gc/g1/g1MMUTracker.hpp"
|
||||
#include "gc/g1/g1RemSetTrackingPolicy.hpp"
|
||||
|
@ -305,7 +305,7 @@ void G1RemSet::initialize(size_t capacity, uint max_regions) {
|
||||
}
|
||||
|
||||
G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
|
||||
G1ScanObjsDuringScanRSClosure* scan_obj_on_card,
|
||||
G1ScanCardClosure* scan_obj_on_card,
|
||||
G1ParScanThreadState* pss,
|
||||
G1GCPhaseTimes::GCParPhases phase,
|
||||
uint worker_i) :
|
||||
@ -345,7 +345,7 @@ void G1ScanRSForRegionClosure::scan_opt_rem_set_roots(HeapRegion* r) {
|
||||
|
||||
G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r);
|
||||
|
||||
G1ScanObjsDuringScanRSClosure scan_cl(_g1h, _pss);
|
||||
G1ScanCardClosure scan_cl(_g1h, _pss);
|
||||
G1ScanRSForOptionalClosure cl(&scan_cl);
|
||||
_opt_refs_scanned += opt_rem_set_list->oops_do(&cl, _pss->closures()->raw_strong_oops());
|
||||
_opt_refs_memory_used += opt_rem_set_list->used_memory();
|
||||
@ -464,7 +464,7 @@ void G1RemSet::scan_rem_set(G1ParScanThreadState* pss,
|
||||
G1GCPhaseTimes::GCParPhases coderoots_phase) {
|
||||
assert(pss->trim_ticks().value() == 0, "Queues must have been trimmed before entering.");
|
||||
|
||||
G1ScanObjsDuringScanRSClosure scan_cl(_g1h, pss);
|
||||
G1ScanCardClosure scan_cl(_g1h, pss);
|
||||
G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, scan_phase, worker_i);
|
||||
_g1h->collection_set_iterate_increment_from(&cl, worker_i);
|
||||
|
||||
@ -489,12 +489,12 @@ void G1RemSet::scan_rem_set(G1ParScanThreadState* pss,
|
||||
// Closure used for updating rem sets. Only called during an evacuation pause.
|
||||
class G1RefineCardClosure: public G1CardTableEntryClosure {
|
||||
G1RemSet* _g1rs;
|
||||
G1ScanObjsDuringUpdateRSClosure* _update_rs_cl;
|
||||
G1ScanCardClosure* _update_rs_cl;
|
||||
|
||||
size_t _cards_scanned;
|
||||
size_t _cards_skipped;
|
||||
public:
|
||||
G1RefineCardClosure(G1CollectedHeap* g1h, G1ScanObjsDuringUpdateRSClosure* update_rs_cl) :
|
||||
G1RefineCardClosure(G1CollectedHeap* g1h, G1ScanCardClosure* update_rs_cl) :
|
||||
_g1rs(g1h->rem_set()), _update_rs_cl(update_rs_cl), _cards_scanned(0), _cards_skipped(0)
|
||||
{}
|
||||
|
||||
@ -527,7 +527,7 @@ void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) {
|
||||
if (G1HotCardCache::default_use_cache()) {
|
||||
G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::ScanHCC, worker_i);
|
||||
|
||||
G1ScanObjsDuringUpdateRSClosure scan_hcc_cl(_g1h, pss);
|
||||
G1ScanCardClosure scan_hcc_cl(_g1h, pss);
|
||||
G1RefineCardClosure refine_card_cl(_g1h, &scan_hcc_cl);
|
||||
_g1h->iterate_hcc_closure(&refine_card_cl, worker_i);
|
||||
}
|
||||
@ -536,7 +536,7 @@ void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) {
|
||||
{
|
||||
G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::UpdateRS, worker_i);
|
||||
|
||||
G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1h, pss);
|
||||
G1ScanCardClosure update_rs_cl(_g1h, pss);
|
||||
G1RefineCardClosure refine_card_cl(_g1h, &update_rs_cl);
|
||||
_g1h->iterate_dirty_card_closure(&refine_card_cl, worker_i);
|
||||
|
||||
@ -545,12 +545,12 @@ void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) {
|
||||
}
|
||||
}
|
||||
|
||||
void G1RemSet::prepare_for_oops_into_collection_set_do() {
|
||||
void G1RemSet::prepare_for_scan_rem_set() {
|
||||
G1BarrierSet::dirty_card_queue_set().concatenate_logs();
|
||||
_scan_state->reset();
|
||||
}
|
||||
|
||||
void G1RemSet::cleanup_after_oops_into_collection_set_do() {
|
||||
void G1RemSet::cleanup_after_scan_rem_set() {
|
||||
G1GCPhaseTimes* phase_times = _g1h->phase_times();
|
||||
|
||||
// Set all cards back to clean.
|
||||
@ -712,7 +712,7 @@ void G1RemSet::refine_card_concurrently(CardValue* card_ptr,
|
||||
}
|
||||
|
||||
bool G1RemSet::refine_card_during_gc(CardValue* card_ptr,
|
||||
G1ScanObjsDuringUpdateRSClosure* update_rs_cl) {
|
||||
G1ScanCardClosure* update_rs_cl) {
|
||||
assert(_g1h->is_gc_active(), "Only call during GC");
|
||||
|
||||
// Construct the region representing the card.
|
||||
|
@ -47,8 +47,7 @@ class G1HotCardCache;
|
||||
class G1RemSetScanState;
|
||||
class G1ParScanThreadState;
|
||||
class G1Policy;
|
||||
class G1ScanObjsDuringScanRSClosure;
|
||||
class G1ScanObjsDuringUpdateRSClosure;
|
||||
class G1ScanCardClosure;
|
||||
class HeapRegionClaimer;
|
||||
|
||||
// A G1RemSet in which each heap region has a rem set that records the
|
||||
@ -99,11 +98,10 @@ public:
|
||||
// into the collection set or update the remembered set.
|
||||
void update_rem_set(G1ParScanThreadState* pss, uint worker_i);
|
||||
|
||||
// Prepare for and cleanup after an oops_into_collection_set_do
|
||||
// call. Must call each of these once before and after (in sequential
|
||||
// code) any thread calls oops_into_collection_set_do.
|
||||
void prepare_for_oops_into_collection_set_do();
|
||||
void cleanup_after_oops_into_collection_set_do();
|
||||
// Prepare for and cleanup after scanning the remembered sets. Must be called
|
||||
// once before and after in sequential code.
|
||||
void prepare_for_scan_rem_set();
|
||||
void cleanup_after_scan_rem_set();
|
||||
|
||||
G1RemSetScanState* scan_state() const { return _scan_state; }
|
||||
|
||||
@ -115,7 +113,7 @@ public:
|
||||
// Refine the card corresponding to "card_ptr", applying the given closure to
|
||||
// all references found. Must only be called during gc.
|
||||
// Returns whether the card has been scanned.
|
||||
bool refine_card_during_gc(CardValue* card_ptr, G1ScanObjsDuringUpdateRSClosure* update_rs_cl);
|
||||
bool refine_card_during_gc(CardValue* card_ptr, G1ScanCardClosure* update_rs_cl);
|
||||
|
||||
// Print accumulated summary info from the start of the VM.
|
||||
void print_summary_info();
|
||||
@ -135,7 +133,7 @@ class G1ScanRSForRegionClosure : public HeapRegionClosure {
|
||||
G1CardTable *_ct;
|
||||
|
||||
G1ParScanThreadState* _pss;
|
||||
G1ScanObjsDuringScanRSClosure* _scan_objs_on_card_cl;
|
||||
G1ScanCardClosure* _scan_objs_on_card_cl;
|
||||
|
||||
G1RemSetScanState* _scan_state;
|
||||
|
||||
@ -164,7 +162,7 @@ class G1ScanRSForRegionClosure : public HeapRegionClosure {
|
||||
void scan_strong_code_roots(HeapRegion* r);
|
||||
public:
|
||||
G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
|
||||
G1ScanObjsDuringScanRSClosure* scan_obj_on_card,
|
||||
G1ScanCardClosure* scan_obj_on_card,
|
||||
G1ParScanThreadState* pss,
|
||||
G1GCPhaseTimes::GCParPhases phase,
|
||||
uint worker_i);
|
||||
|
@ -43,7 +43,7 @@ private:
|
||||
ShenandoahStore,
|
||||
ShenandoahValue,
|
||||
ShenandoahOopStore,
|
||||
ShenandoahNone,
|
||||
ShenandoahNone
|
||||
};
|
||||
|
||||
static bool verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used);
|
||||
|
@ -73,9 +73,9 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand
|
||||
// ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_garbage is hit.
|
||||
|
||||
size_t capacity = ShenandoahHeap::heap()->max_capacity();
|
||||
size_t free_target = ShenandoahMinFreeThreshold * capacity / 100;
|
||||
size_t free_target = capacity / 100 * ShenandoahMinFreeThreshold;
|
||||
size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0;
|
||||
size_t max_cset = (size_t)(1.0 * ShenandoahEvacReserve * capacity / 100 / ShenandoahEvacWaste);
|
||||
size_t max_cset = (size_t)((1.0 * capacity / 100 * ShenandoahEvacReserve) / ShenandoahEvacWaste);
|
||||
|
||||
log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "M, Actual Free: "
|
||||
SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M, Min Garbage: " SIZE_FORMAT "M",
|
||||
@ -128,7 +128,7 @@ bool ShenandoahAdaptiveHeuristics::should_start_normal_gc() const {
|
||||
|
||||
// Check if we are falling below the worst limit, time to trigger the GC, regardless of
|
||||
// anything else.
|
||||
size_t min_threshold = ShenandoahMinFreeThreshold * heap->max_capacity() / 100;
|
||||
size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold;
|
||||
if (available < min_threshold) {
|
||||
log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
|
||||
available / M, min_threshold / M);
|
||||
@ -138,7 +138,7 @@ bool ShenandoahAdaptiveHeuristics::should_start_normal_gc() const {
|
||||
// Check if are need to learn a bit about the application
|
||||
const size_t max_learn = ShenandoahLearningSteps;
|
||||
if (_gc_times_learned < max_learn) {
|
||||
size_t init_threshold = ShenandoahInitFreeThreshold * heap->max_capacity() / 100;
|
||||
size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold;
|
||||
if (available < init_threshold) {
|
||||
log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
|
||||
_gc_times_learned + 1, max_learn, available / M, init_threshold / M);
|
||||
@ -152,8 +152,8 @@ bool ShenandoahAdaptiveHeuristics::should_start_normal_gc() const {
|
||||
|
||||
size_t allocation_headroom = available;
|
||||
|
||||
size_t spike_headroom = ShenandoahAllocSpikeFactor * capacity / 100;
|
||||
size_t penalties = _gc_time_penalties * capacity / 100;
|
||||
size_t spike_headroom = capacity / 100 * ShenandoahAllocSpikeFactor;
|
||||
size_t penalties = capacity / 100 * _gc_time_penalties;
|
||||
|
||||
allocation_headroom -= MIN2(allocation_headroom, spike_headroom);
|
||||
allocation_headroom -= MIN2(allocation_headroom, penalties);
|
||||
|
@ -52,9 +52,11 @@ ShenandoahCompactHeuristics::ShenandoahCompactHeuristics() : ShenandoahHeuristic
|
||||
bool ShenandoahCompactHeuristics::should_start_normal_gc() const {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
size_t capacity = heap->max_capacity();
|
||||
size_t available = heap->free_set()->available();
|
||||
size_t threshold_bytes_allocated = heap->max_capacity() * ShenandoahAllocationThreshold / 100;
|
||||
size_t min_threshold = ShenandoahMinFreeThreshold * heap->max_capacity() / 100;
|
||||
|
||||
size_t threshold_bytes_allocated = capacity / 100 * ShenandoahAllocationThreshold;
|
||||
size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold;
|
||||
|
||||
if (available < min_threshold) {
|
||||
log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
|
||||
|
@ -82,7 +82,7 @@ void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(Shenando
|
||||
// Do not select too large CSet that would overflow the available free space.
|
||||
// Take at least the entire evacuation reserve, and be free to overflow to free space.
|
||||
size_t capacity = ShenandoahHeap::heap()->max_capacity();
|
||||
size_t available = MAX2(ShenandoahEvacReserve * capacity / 100, actual_free);
|
||||
size_t available = MAX2(capacity / 100 * ShenandoahEvacReserve, actual_free);
|
||||
size_t max_cset = (size_t)(available / ShenandoahEvacWaste);
|
||||
|
||||
log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M",
|
||||
|
@ -54,7 +54,7 @@ bool ShenandoahStaticHeuristics::should_start_normal_gc() const {
|
||||
|
||||
size_t capacity = heap->max_capacity();
|
||||
size_t available = heap->free_set()->available();
|
||||
size_t threshold_available = (capacity * ShenandoahFreeThreshold) / 100;
|
||||
size_t threshold_available = capacity / 100 * ShenandoahFreeThreshold;
|
||||
|
||||
if (available < threshold_available) {
|
||||
log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below free threshold (" SIZE_FORMAT "M)",
|
||||
|
@ -119,9 +119,9 @@ void ShenandoahTraversalHeuristics::choose_collection_set(ShenandoahCollectionSe
|
||||
|
||||
size_t capacity = heap->max_capacity();
|
||||
size_t actual_free = heap->free_set()->available();
|
||||
size_t free_target = ShenandoahMinFreeThreshold * capacity / 100;
|
||||
size_t free_target = capacity / 100 * ShenandoahMinFreeThreshold;
|
||||
size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0;
|
||||
size_t max_cset = (size_t)(1.0 * ShenandoahEvacReserve * capacity / 100 / ShenandoahEvacWaste);
|
||||
size_t max_cset = (size_t)((1.0 * capacity / 100 * ShenandoahEvacReserve) / ShenandoahEvacWaste);
|
||||
|
||||
log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "M, Actual Free: "
|
||||
SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M, Min Garbage: " SIZE_FORMAT "M",
|
||||
@ -211,7 +211,7 @@ bool ShenandoahTraversalHeuristics::should_start_traversal_gc() {
|
||||
|
||||
// Check if we are falling below the worst limit, time to trigger the GC, regardless of
|
||||
// anything else.
|
||||
size_t min_threshold = ShenandoahMinFreeThreshold * heap->max_capacity() / 100;
|
||||
size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold;
|
||||
if (available < min_threshold) {
|
||||
log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
|
||||
available / M, min_threshold / M);
|
||||
@ -221,7 +221,7 @@ bool ShenandoahTraversalHeuristics::should_start_traversal_gc() {
|
||||
// Check if are need to learn a bit about the application
|
||||
const size_t max_learn = ShenandoahLearningSteps;
|
||||
if (_gc_times_learned < max_learn) {
|
||||
size_t init_threshold = ShenandoahInitFreeThreshold * heap->max_capacity() / 100;
|
||||
size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold;
|
||||
if (available < init_threshold) {
|
||||
log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
|
||||
_gc_times_learned + 1, max_learn, available / M, init_threshold / M);
|
||||
@ -235,8 +235,8 @@ bool ShenandoahTraversalHeuristics::should_start_traversal_gc() {
|
||||
|
||||
size_t allocation_headroom = available;
|
||||
|
||||
size_t spike_headroom = ShenandoahAllocSpikeFactor * capacity / 100;
|
||||
size_t penalties = _gc_time_penalties * capacity / 100;
|
||||
size_t spike_headroom = capacity / 100 * ShenandoahAllocSpikeFactor;
|
||||
size_t penalties = capacity / 100 * _gc_time_penalties;
|
||||
|
||||
allocation_headroom -= MIN2(allocation_headroom, spike_headroom);
|
||||
allocation_headroom -= MIN2(allocation_headroom, penalties);
|
||||
|
@ -33,7 +33,7 @@ public:
|
||||
_alloc_shared_gc, // Allocate common, outside of GCLAB
|
||||
_alloc_tlab, // Allocate TLAB
|
||||
_alloc_gclab, // Allocate GCLAB
|
||||
_ALLOC_LIMIT,
|
||||
_ALLOC_LIMIT
|
||||
};
|
||||
|
||||
static const char* alloc_type_to_string(Type type) {
|
||||
|
@ -204,7 +204,7 @@ size_t ShenandoahArguments::conservative_max_heap_alignment() {
|
||||
|
||||
void ShenandoahArguments::initialize_alignments() {
|
||||
// Need to setup sizes early to get correct alignments.
|
||||
ShenandoahHeapRegion::setup_sizes(InitialHeapSize, MaxHeapSize);
|
||||
ShenandoahHeapRegion::setup_sizes(MaxHeapSize);
|
||||
|
||||
// This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
|
||||
size_t align = ShenandoahHeapRegion::region_size_bytes();
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
_safe_unknown,
|
||||
_safe_oop,
|
||||
_safe_oop_fwd,
|
||||
_safe_all,
|
||||
_safe_all
|
||||
};
|
||||
|
||||
static void print_obj(ShenandoahMessageBuffer &msg, oop obj);
|
||||
|
@ -32,12 +32,13 @@
|
||||
class ShenandoahBarrierSetAssembler;
|
||||
|
||||
class ShenandoahBarrierSet: public BarrierSet {
|
||||
private:
|
||||
public:
|
||||
enum ArrayCopyStoreValMode {
|
||||
NONE,
|
||||
READ_BARRIER,
|
||||
WRITE_BARRIER
|
||||
};
|
||||
private:
|
||||
|
||||
ShenandoahHeap* _heap;
|
||||
ShenandoahSATBMarkQueueSet _satb_mark_queue_set;
|
||||
|
@ -59,7 +59,7 @@ private:
|
||||
concurrent_traversal,
|
||||
concurrent_normal,
|
||||
stw_degenerated,
|
||||
stw_full,
|
||||
stw_full
|
||||
} GCMode;
|
||||
|
||||
// While we could have a single lock for these, it may risk unblocking
|
||||
|
@ -430,7 +430,7 @@ void ShenandoahFreeSet::rebuild() {
|
||||
}
|
||||
|
||||
// Evac reserve: reserve trailing space for evacuations
|
||||
size_t to_reserve = ShenandoahEvacReserve * _heap->max_capacity() / 100;
|
||||
size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;
|
||||
size_t reserved = 0;
|
||||
|
||||
for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
|
||||
|
@ -244,7 +244,7 @@ public:
|
||||
UPDATEREFS_BITPOS = 3,
|
||||
|
||||
// Heap is under traversal collection
|
||||
TRAVERSAL_BITPOS = 4,
|
||||
TRAVERSAL_BITPOS = 4
|
||||
};
|
||||
|
||||
enum GCState {
|
||||
@ -253,7 +253,7 @@ public:
|
||||
MARKING = 1 << MARKING_BITPOS,
|
||||
EVACUATION = 1 << EVACUATION_BITPOS,
|
||||
UPDATEREFS = 1 << UPDATEREFS_BITPOS,
|
||||
TRAVERSAL = 1 << TRAVERSAL_BITPOS,
|
||||
TRAVERSAL = 1 << TRAVERSAL_BITPOS
|
||||
};
|
||||
|
||||
private:
|
||||
@ -303,7 +303,7 @@ public:
|
||||
_degenerated_mark,
|
||||
_degenerated_evac,
|
||||
_degenerated_updaterefs,
|
||||
_DEGENERATED_LIMIT,
|
||||
_DEGENERATED_LIMIT
|
||||
};
|
||||
|
||||
static const char* degen_point_to_string(ShenandoahDegenPoint point) {
|
||||
|
@ -516,7 +516,7 @@ HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahHeapRegion::setup_sizes(size_t initial_heap_size, size_t max_heap_size) {
|
||||
void ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
|
||||
// Absolute minimums we should not ever break.
|
||||
static const size_t MIN_REGION_SIZE = 256*K;
|
||||
|
||||
@ -526,10 +526,10 @@ void ShenandoahHeapRegion::setup_sizes(size_t initial_heap_size, size_t max_heap
|
||||
|
||||
size_t region_size;
|
||||
if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
|
||||
if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
|
||||
err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
|
||||
if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
|
||||
err_msg message("Max heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
|
||||
"of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).",
|
||||
initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K);
|
||||
max_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K);
|
||||
vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
|
||||
}
|
||||
if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
|
||||
@ -562,10 +562,10 @@ void ShenandoahHeapRegion::setup_sizes(size_t initial_heap_size, size_t max_heap
|
||||
region_size = MIN2(ShenandoahMaxRegionSize, region_size);
|
||||
|
||||
} else {
|
||||
if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
|
||||
err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
|
||||
if (ShenandoahHeapRegionSize > max_heap_size / MIN_NUM_REGIONS) {
|
||||
err_msg message("Max heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
|
||||
"of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).",
|
||||
initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K);
|
||||
max_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K);
|
||||
vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
|
||||
}
|
||||
if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) {
|
||||
|
@ -114,7 +114,7 @@ private:
|
||||
_cset, // region is in collection set
|
||||
_pinned, // region is pinned
|
||||
_pinned_cset, // region is pinned and in cset (evac failure path)
|
||||
_trash, // region contains only trash
|
||||
_trash // region contains only trash
|
||||
};
|
||||
|
||||
const char* region_state_to_string(RegionState s) const {
|
||||
@ -257,7 +257,7 @@ public:
|
||||
|
||||
static const size_t MIN_NUM_REGIONS = 10;
|
||||
|
||||
static void setup_sizes(size_t initial_heap_size, size_t max_heap_size);
|
||||
static void setup_sizes(size_t max_heap_size);
|
||||
|
||||
double empty_time() {
|
||||
return _empty_time;
|
||||
|
@ -39,7 +39,7 @@ private:
|
||||
enum PrivateConstants {
|
||||
ValBuckets = 512,
|
||||
MagBuckets = 24,
|
||||
MagMinimum = -12,
|
||||
MagMinimum = -12
|
||||
};
|
||||
int** _hdr;
|
||||
|
||||
|
@ -41,7 +41,7 @@ enum UpdateRefsMode {
|
||||
|
||||
enum StringDedupMode {
|
||||
NO_DEDUP, // Do not do anything for String deduplication
|
||||
ENQUEUE_DEDUP, // Enqueue candidate Strings for deduplication
|
||||
ENQUEUE_DEDUP // Enqueue candidate Strings for deduplication
|
||||
};
|
||||
|
||||
class ShenandoahMarkRefsSuperClosure : public MetadataVisitingOopIterateClosure {
|
||||
|
@ -153,7 +153,7 @@ void ShenandoahPacer::setup_for_traversal() {
|
||||
void ShenandoahPacer::setup_for_idle() {
|
||||
assert(ShenandoahPacing, "Only be here when pacing is enabled");
|
||||
|
||||
size_t initial = _heap->max_capacity() * ShenandoahPacingIdleSlack / 100;
|
||||
size_t initial = _heap->max_capacity() / 100 * ShenandoahPacingIdleSlack;
|
||||
double tax = 1;
|
||||
|
||||
restart_with(initial, tax);
|
||||
|
@ -35,7 +35,7 @@ STATIC_ASSERT(sizeof(ShenandoahSharedValue) == 1);
|
||||
typedef struct ShenandoahSharedFlag {
|
||||
enum {
|
||||
UNSET = 0,
|
||||
SET = 1,
|
||||
SET = 1
|
||||
};
|
||||
|
||||
DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile ShenandoahSharedValue));
|
||||
@ -62,8 +62,8 @@ typedef struct ShenandoahSharedFlag {
|
||||
return OrderAccess::load_acquire(&value) == UNSET;
|
||||
}
|
||||
|
||||
void set_cond(bool value) {
|
||||
if (value) {
|
||||
void set_cond(bool val) {
|
||||
if (val) {
|
||||
set();
|
||||
} else {
|
||||
unset();
|
||||
@ -167,8 +167,8 @@ typedef struct ShenandoahSharedBitmap {
|
||||
return (OrderAccess::load_acquire(&value)) == 0;
|
||||
}
|
||||
|
||||
void set_cond(uint mask, bool value) {
|
||||
if (value) {
|
||||
void set_cond(uint mask, bool val) {
|
||||
if (val) {
|
||||
set(mask);
|
||||
} else {
|
||||
unset(mask);
|
||||
|
@ -125,12 +125,12 @@ public:
|
||||
enum {
|
||||
chunk_bits = 10,
|
||||
pow_bits = 5,
|
||||
oop_bits = sizeof(uintptr_t)*8 - chunk_bits - pow_bits,
|
||||
oop_bits = sizeof(uintptr_t)*8 - chunk_bits - pow_bits
|
||||
};
|
||||
enum {
|
||||
oop_shift = 0,
|
||||
pow_shift = oop_shift + oop_bits,
|
||||
chunk_shift = pow_shift + pow_bits,
|
||||
chunk_shift = pow_shift + pow_bits
|
||||
};
|
||||
|
||||
public:
|
||||
|
@ -73,7 +73,7 @@ public:
|
||||
_verify_marked_incomplete,
|
||||
|
||||
// Objects should be marked in "complete" bitmap.
|
||||
_verify_marked_complete,
|
||||
_verify_marked_complete
|
||||
} VerifyMarked;
|
||||
|
||||
typedef enum {
|
||||
@ -84,7 +84,7 @@ public:
|
||||
_verify_forwarded_none,
|
||||
|
||||
// Objects may have forwardees.
|
||||
_verify_forwarded_allow,
|
||||
_verify_forwarded_allow
|
||||
} VerifyForwarded;
|
||||
|
||||
typedef enum {
|
||||
@ -97,7 +97,7 @@ public:
|
||||
// May have references to cset, all should be forwarded.
|
||||
// Note: Allowing non-forwarded references to cset is equivalent
|
||||
// to _verify_cset_disable.
|
||||
_verify_cset_forwarded,
|
||||
_verify_cset_forwarded
|
||||
} VerifyCollectionSet;
|
||||
|
||||
typedef enum {
|
||||
@ -109,7 +109,7 @@ public:
|
||||
|
||||
// All objects should belong to live regions,
|
||||
// and liveness data should be accurate
|
||||
_verify_liveness_complete,
|
||||
_verify_liveness_complete
|
||||
} VerifyLiveness;
|
||||
|
||||
typedef enum {
|
||||
@ -123,7 +123,7 @@ public:
|
||||
_verify_regions_nocset,
|
||||
|
||||
// No trash and no cset regions allowed
|
||||
_verify_regions_notrash_nocset,
|
||||
_verify_regions_notrash_nocset
|
||||
} VerifyRegions;
|
||||
|
||||
typedef enum {
|
||||
@ -137,7 +137,7 @@ public:
|
||||
_verify_gcstate_forwarded,
|
||||
|
||||
// Evacuation is in progress, some objects are forwarded
|
||||
_verify_gcstate_evacuation,
|
||||
_verify_gcstate_evacuation
|
||||
} VerifyGCState;
|
||||
|
||||
struct VerifyOptions {
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include "gc/z/zGranuleMap.hpp"
|
||||
#include "gc/z/zHeap.hpp"
|
||||
#include "gc/z/zPageAllocator.hpp"
|
||||
#include "gc/z/zPhysicalMemory.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
// Expose some ZGC globals to the SA agent.
|
||||
@ -77,20 +76,18 @@ typedef ZAttachedArray<ZForwarding, ZForwardingEntry> ZAttachedArrayForForwardin
|
||||
nonstatic_field(ZPage, _virtual, const ZVirtualMemory) \
|
||||
volatile_nonstatic_field(ZPage, _top, uintptr_t) \
|
||||
\
|
||||
nonstatic_field(ZPageAllocator, _physical, ZPhysicalMemoryManager) \
|
||||
nonstatic_field(ZPageAllocator, _max_capacity, const size_t) \
|
||||
nonstatic_field(ZPageAllocator, _capacity, size_t) \
|
||||
nonstatic_field(ZPageAllocator, _used, size_t) \
|
||||
\
|
||||
nonstatic_field(ZPageTable, _map, ZGranuleMapForPageTable) \
|
||||
\
|
||||
nonstatic_field(ZGranuleMapForPageTable, _map, ZPage** const) \
|
||||
\
|
||||
nonstatic_field(ZVirtualMemory, _start, uintptr_t) \
|
||||
nonstatic_field(ZVirtualMemory, _end, uintptr_t) \
|
||||
nonstatic_field(ZVirtualMemory, _start, const uintptr_t) \
|
||||
nonstatic_field(ZVirtualMemory, _end, const uintptr_t) \
|
||||
\
|
||||
nonstatic_field(ZForwarding, _entries, const ZAttachedArrayForForwarding) \
|
||||
\
|
||||
nonstatic_field(ZPhysicalMemoryManager, _max_capacity, const size_t) \
|
||||
nonstatic_field(ZPhysicalMemoryManager, _capacity, size_t)
|
||||
nonstatic_field(ZForwarding, _entries, const ZAttachedArrayForForwarding)
|
||||
|
||||
#define VM_INT_CONSTANTS_ZGC(declare_constant, declare_constant_with_value) \
|
||||
declare_constant(ZPhaseRelocate) \
|
||||
|
@ -48,6 +48,7 @@ ZCollectedHeap::ZCollectedHeap() :
|
||||
_heap(),
|
||||
_director(new ZDirector()),
|
||||
_driver(new ZDriver()),
|
||||
_uncommitter(new ZUncommitter()),
|
||||
_stat(new ZStat()),
|
||||
_runtime_workers() {}
|
||||
|
||||
@ -77,6 +78,7 @@ void ZCollectedHeap::initialize_serviceability() {
|
||||
void ZCollectedHeap::stop() {
|
||||
_director->stop();
|
||||
_driver->stop();
|
||||
_uncommitter->stop();
|
||||
_stat->stop();
|
||||
}
|
||||
|
||||
@ -272,6 +274,7 @@ jlong ZCollectedHeap::millis_since_last_gc() {
|
||||
void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
|
||||
tc->do_thread(_director);
|
||||
tc->do_thread(_driver);
|
||||
tc->do_thread(_uncommitter);
|
||||
tc->do_thread(_stat);
|
||||
_heap.worker_threads_do(tc);
|
||||
_runtime_workers.threads_do(tc);
|
||||
@ -331,6 +334,8 @@ void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
|
||||
st->cr();
|
||||
_driver->print_on(st);
|
||||
st->cr();
|
||||
_uncommitter->print_on(st);
|
||||
st->cr();
|
||||
_stat->print_on(st);
|
||||
st->cr();
|
||||
_heap.print_worker_threads_on(st);
|
||||
|
@ -29,10 +29,11 @@
|
||||
#include "gc/z/zBarrierSet.hpp"
|
||||
#include "gc/z/zDirector.hpp"
|
||||
#include "gc/z/zDriver.hpp"
|
||||
#include "gc/z/zInitialize.hpp"
|
||||
#include "gc/z/zHeap.hpp"
|
||||
#include "gc/z/zInitialize.hpp"
|
||||
#include "gc/z/zRuntimeWorkers.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zUncommitter.hpp"
|
||||
|
||||
class ZCollectedHeap : public CollectedHeap {
|
||||
friend class VMStructs;
|
||||
@ -44,6 +45,7 @@ private:
|
||||
ZHeap _heap;
|
||||
ZDirector* _director;
|
||||
ZDriver* _driver;
|
||||
ZUncommitter* _uncommitter;
|
||||
ZStat* _stat;
|
||||
ZRuntimeWorkers _runtime_workers;
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gcArguments.hpp"
|
||||
#include "gc/shared/oopStorage.hpp"
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
@ -45,6 +46,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
@ -62,7 +64,7 @@ ZHeap* ZHeap::_heap = NULL;
|
||||
ZHeap::ZHeap() :
|
||||
_workers(),
|
||||
_object_allocator(_workers.nworkers()),
|
||||
_page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
|
||||
_page_allocator(heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
|
||||
_page_table(),
|
||||
_forwarding_table(),
|
||||
_mark(&_workers, &_page_table),
|
||||
@ -81,13 +83,15 @@ ZHeap::ZHeap() :
|
||||
}
|
||||
|
||||
size_t ZHeap::heap_min_size() const {
|
||||
const size_t aligned_min_size = align_up(InitialHeapSize, ZGranuleSize);
|
||||
return MIN2(aligned_min_size, heap_max_size());
|
||||
return MinHeapSize;
|
||||
}
|
||||
|
||||
size_t ZHeap::heap_initial_size() const {
|
||||
return InitialHeapSize;
|
||||
}
|
||||
|
||||
size_t ZHeap::heap_max_size() const {
|
||||
const size_t aligned_max_size = align_up(MaxHeapSize, ZGranuleSize);
|
||||
return MIN2(aligned_max_size, ZAddressOffsetMax);
|
||||
return MaxHeapSize;
|
||||
}
|
||||
|
||||
size_t ZHeap::heap_max_reserve_size() const {
|
||||
@ -102,7 +106,7 @@ bool ZHeap::is_initialized() const {
|
||||
}
|
||||
|
||||
size_t ZHeap::min_capacity() const {
|
||||
return heap_min_size();
|
||||
return _page_allocator.min_capacity();
|
||||
}
|
||||
|
||||
size_t ZHeap::max_capacity() const {
|
||||
@ -250,10 +254,14 @@ void ZHeap::free_page(ZPage* page, bool reclaimed) {
|
||||
_page_allocator.free_page(page, reclaimed);
|
||||
}
|
||||
|
||||
uint64_t ZHeap::uncommit(uint64_t delay) {
|
||||
return _page_allocator.uncommit(delay);
|
||||
}
|
||||
|
||||
void ZHeap::before_flip() {
|
||||
if (ZVerifyViews) {
|
||||
// Unmap all pages
|
||||
_page_allocator.unmap_all_pages();
|
||||
_page_allocator.debug_unmap_all_pages();
|
||||
}
|
||||
}
|
||||
|
||||
@ -262,8 +270,9 @@ void ZHeap::after_flip() {
|
||||
// Map all pages
|
||||
ZPageTableIterator iter(&_page_table);
|
||||
for (ZPage* page; iter.next(&page);) {
|
||||
_page_allocator.map_page(page);
|
||||
_page_allocator.debug_map_page(page);
|
||||
}
|
||||
_page_allocator.debug_map_cached_pages();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -66,6 +66,7 @@ private:
|
||||
ZServiceability _serviceability;
|
||||
|
||||
size_t heap_min_size() const;
|
||||
size_t heap_initial_size() const;
|
||||
size_t heap_max_size() const;
|
||||
size_t heap_max_reserve_size() const;
|
||||
|
||||
@ -129,6 +130,9 @@ public:
|
||||
void undo_alloc_page(ZPage* page);
|
||||
void free_page(ZPage* page, bool reclaimed);
|
||||
|
||||
// Uncommit memory
|
||||
uint64_t uncommit(uint64_t delay);
|
||||
|
||||
// Object allocation
|
||||
uintptr_t alloc_tlab(size_t size);
|
||||
uintptr_t alloc_object(size_t size);
|
||||
|
@ -210,11 +210,11 @@ public:
|
||||
template <typename T, bool forward>
|
||||
class ZListIteratorImpl : public StackObj {
|
||||
private:
|
||||
ZList<T>* const _list;
|
||||
T* _next;
|
||||
const ZList<T>* const _list;
|
||||
T* _next;
|
||||
|
||||
public:
|
||||
ZListIteratorImpl(ZList<T>* list);
|
||||
ZListIteratorImpl(const ZList<T>* list);
|
||||
|
||||
bool next(T** elem);
|
||||
};
|
||||
@ -226,14 +226,14 @@ public:
|
||||
template <typename T>
|
||||
class ZListIterator : public ZListIteratorImpl<T, ZLIST_FORWARD> {
|
||||
public:
|
||||
ZListIterator(ZList<T>* list) :
|
||||
ZListIterator(const ZList<T>* list) :
|
||||
ZListIteratorImpl<T, ZLIST_FORWARD>(list) {}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ZListReverseIterator : public ZListIteratorImpl<T, ZLIST_REVERSE> {
|
||||
public:
|
||||
ZListReverseIterator(ZList<T>* list) :
|
||||
ZListReverseIterator(const ZList<T>* list) :
|
||||
ZListIteratorImpl<T, ZLIST_REVERSE>(list) {}
|
||||
};
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
||||
#include "gc/z/zList.hpp"
|
||||
|
||||
template <typename T, bool forward>
|
||||
ZListIteratorImpl<T, forward>::ZListIteratorImpl(ZList<T>* list) :
|
||||
ZListIteratorImpl<T, forward>::ZListIteratorImpl(const ZList<T>* list) :
|
||||
_list(list),
|
||||
_next(forward ? list->first() : list->last()) {}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,15 +34,19 @@
|
||||
static const ZStatCounter ZCounterMarkSeqNumResetContention("Contention", "Mark SeqNum Reset Contention", ZStatUnitOpsPerSecond);
|
||||
static const ZStatCounter ZCounterMarkSegmentResetContention("Contention", "Mark Segment Reset Contention", ZStatUnitOpsPerSecond);
|
||||
|
||||
static size_t bitmap_size(uint32_t size, size_t nsegments) {
|
||||
// We need at least one bit per segment
|
||||
return MAX2<size_t>(size, nsegments) * 2;
|
||||
}
|
||||
|
||||
ZLiveMap::ZLiveMap(uint32_t size) :
|
||||
_seqnum(0),
|
||||
_live_objects(0),
|
||||
_live_bytes(0),
|
||||
_segment_live_bits(0),
|
||||
_segment_claim_bits(0),
|
||||
// We need at least one bit per segment.
|
||||
_bitmap(MAX2<size_t>(size, nsegments) * 2),
|
||||
_shift(exact_log2(segment_size())) {}
|
||||
_bitmap(bitmap_size(size, nsegments)),
|
||||
_segment_shift(exact_log2(segment_size())) {}
|
||||
|
||||
void ZLiveMap::reset(size_t index) {
|
||||
const uint32_t seqnum_initializing = (uint32_t)-1;
|
||||
@ -121,3 +125,11 @@ void ZLiveMap::reset_segment(BitMap::idx_t segment) {
|
||||
const bool success = set_segment_live_atomic(segment);
|
||||
assert(success, "Should never fail");
|
||||
}
|
||||
|
||||
void ZLiveMap::resize(uint32_t size) {
|
||||
const size_t new_bitmap_size = bitmap_size(size, nsegments);
|
||||
if (_bitmap.size() != new_bitmap_size) {
|
||||
_bitmap.reinitialize(new_bitmap_size, false /* clear */);
|
||||
_segment_shift = exact_log2(segment_size());
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user