Merge
This commit is contained in:
commit
b20fa155a6
@ -112,7 +112,9 @@ static jmethodID setThreadIntegerRegisterSet_ID = 0;
|
||||
return;}
|
||||
|
||||
static void throwNewDebuggerException(JNIEnv* env, const char* errMsg) {
|
||||
env->ThrowNew(env->FindClass("sun/jvm/hotspot/debugger/DebuggerException"), errMsg);
|
||||
jclass clazz = env->FindClass("sun/jvm/hotspot/debugger/DebuggerException");
|
||||
CHECK_EXCEPTION;
|
||||
env->ThrowNew(clazz, errMsg);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -310,15 +312,18 @@ static bool getWindbgInterfaces(JNIEnv* env, jobject obj) {
|
||||
static bool setImageAndSymbolPath(JNIEnv* env, jobject obj) {
|
||||
jboolean isCopy;
|
||||
jclass clazz = env->GetObjectClass(obj);
|
||||
CHECK_EXCEPTION_(false);
|
||||
jstring path;
|
||||
const char* buf;
|
||||
|
||||
path = (jstring) env->GetStaticObjectField(clazz, imagePath_ID);
|
||||
CHECK_EXCEPTION_(false);
|
||||
buf = env->GetStringUTFChars(path, &isCopy);
|
||||
CHECK_EXCEPTION_(false);
|
||||
AutoJavaString imagePath(env, path, buf);
|
||||
|
||||
path = (jstring) env->GetStaticObjectField(clazz, symbolPath_ID);
|
||||
CHECK_EXCEPTION_(false);
|
||||
buf = env->GetStringUTFChars(path, &isCopy);
|
||||
CHECK_EXCEPTION_(false);
|
||||
AutoJavaString symbolPath(env, path, buf);
|
||||
|
@ -43,8 +43,8 @@ import sun.jvm.hotspot.types.TypeDataBase;
|
||||
// Mirror class for G1CollectedHeap.
|
||||
|
||||
public class G1CollectedHeap extends SharedHeap {
|
||||
// HeapRegionSeq _seq;
|
||||
static private long hrsFieldOffset;
|
||||
// HeapRegionManager _hrm;
|
||||
static private long hrmFieldOffset;
|
||||
// MemRegion _g1_reserved;
|
||||
static private long g1ReservedFieldOffset;
|
||||
// size_t _summary_bytes_used;
|
||||
@ -67,7 +67,7 @@ public class G1CollectedHeap extends SharedHeap {
|
||||
static private synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("G1CollectedHeap");
|
||||
|
||||
hrsFieldOffset = type.getField("_hrs").getOffset();
|
||||
hrmFieldOffset = type.getField("_hrm").getOffset();
|
||||
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
|
||||
g1mmField = type.getAddressField("_g1mm");
|
||||
oldSetFieldOffset = type.getField("_old_set").getOffset();
|
||||
@ -75,7 +75,7 @@ public class G1CollectedHeap extends SharedHeap {
|
||||
}
|
||||
|
||||
public long capacity() {
|
||||
return hrs().capacity();
|
||||
return hrm().capacity();
|
||||
}
|
||||
|
||||
public long used() {
|
||||
@ -83,13 +83,13 @@ public class G1CollectedHeap extends SharedHeap {
|
||||
}
|
||||
|
||||
public long n_regions() {
|
||||
return hrs().length();
|
||||
return hrm().length();
|
||||
}
|
||||
|
||||
private HeapRegionSeq hrs() {
|
||||
Address hrsAddr = addr.addOffsetTo(hrsFieldOffset);
|
||||
return (HeapRegionSeq) VMObjectFactory.newObject(HeapRegionSeq.class,
|
||||
hrsAddr);
|
||||
private HeapRegionManager hrm() {
|
||||
Address hrmAddr = addr.addOffsetTo(hrmFieldOffset);
|
||||
return (HeapRegionManager) VMObjectFactory.newObject(HeapRegionManager.class,
|
||||
hrmAddr);
|
||||
}
|
||||
|
||||
public G1MonitoringSupport g1mm() {
|
||||
@ -110,7 +110,7 @@ public class G1CollectedHeap extends SharedHeap {
|
||||
}
|
||||
|
||||
private Iterator<HeapRegion> heapRegionIterator() {
|
||||
return hrs().heapRegionIterator();
|
||||
return hrm().heapRegionIterator();
|
||||
}
|
||||
|
||||
public void heapRegionIterate(SpaceClosure scl) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,9 +37,9 @@ import sun.jvm.hotspot.types.CIntegerField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
|
||||
// Mirror class for HeapRegionSeq. It essentially encapsulates the G1HeapRegionTable.
|
||||
// Mirror class for HeapRegionManager.
|
||||
|
||||
public class HeapRegionSeq extends VMObject {
|
||||
public class HeapRegionManager extends VMObject {
|
||||
// G1HeapRegionTable _regions
|
||||
static private long regionsFieldOffset;
|
||||
// uint _committed_length
|
||||
@ -54,7 +54,7 @@ public class HeapRegionSeq extends VMObject {
|
||||
}
|
||||
|
||||
static private synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("HeapRegionSeq");
|
||||
Type type = db.lookupType("HeapRegionManager");
|
||||
|
||||
regionsFieldOffset = type.getField("_regions").getOffset();
|
||||
numCommittedField = type.getCIntegerField("_num_committed");
|
||||
@ -82,7 +82,7 @@ public class HeapRegionSeq extends VMObject {
|
||||
return regions().heapRegionIterator(length());
|
||||
}
|
||||
|
||||
public HeapRegionSeq(Address addr) {
|
||||
public HeapRegionManager(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
}
|
@ -721,6 +721,19 @@ copy_debug_jdk::
|
||||
($(CD) $(JDK_IMAGE_DIR)/debug && $(TAR) -xf -) ; \
|
||||
fi
|
||||
|
||||
copy_optimized_jdk::
|
||||
$(RM) -r $(JDK_IMAGE_DIR)/optimized
|
||||
$(MKDIR) -p $(JDK_IMAGE_DIR)/optimized
|
||||
if [ -d $(JDK_IMPORT_PATH)/optimized ] ; then \
|
||||
($(CD) $(JDK_IMPORT_PATH)/optimized && \
|
||||
$(TAR) -cf - $(JDK_DIRS)) | \
|
||||
($(CD) $(JDK_IMAGE_DIR)/optimized && $(TAR) -xf -) ; \
|
||||
else \
|
||||
($(CD) $(JDK_IMPORT_PATH) && \
|
||||
$(TAR) -cf - $(JDK_DIRS)) | \
|
||||
($(CD) $(JDK_IMAGE_DIR)/optimized && $(TAR) -xf -) ; \
|
||||
fi
|
||||
|
||||
#
|
||||
# Check target
|
||||
#
|
||||
|
@ -42,6 +42,9 @@ jprt_build_debugEmb:
|
||||
jprt_build_fastdebugEmb:
|
||||
$(MAKE) JAVASE_EMBEDDED=true MINIMIZE_RAM_USAGE=true jprt_build_fastdebug
|
||||
|
||||
jprt_build_optimizedEmb:
|
||||
$(MAKE) JAVASE_EMBEDDED=true MINIMIZE_RAM_USAGE=true jprt_build_optimized
|
||||
|
||||
jprt_build_productOpen:
|
||||
$(MAKE) OPENJDK=true jprt_build_product
|
||||
|
||||
@ -51,6 +54,9 @@ jprt_build_debugOpen:
|
||||
jprt_build_fastdebugOpen:
|
||||
$(MAKE) OPENJDK=true jprt_build_fastdebug
|
||||
|
||||
jprt_build_optimizedOpen:
|
||||
$(MAKE) OPENJDK=true jprt_build_optimized
|
||||
|
||||
jprt_build_product: all_product copy_product_jdk export_product_jdk
|
||||
( $(CD) $(JDK_IMAGE_DIR) && \
|
||||
$(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
|
||||
@ -63,5 +69,9 @@ jprt_build_debug: all_debug copy_debug_jdk export_debug_jdk
|
||||
( $(CD) $(JDK_IMAGE_DIR)/debug && \
|
||||
$(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
|
||||
|
||||
.PHONY: jprt_build_product jprt_build_fastdebug jprt_build_debug
|
||||
jprt_build_optimized: all_optimized copy_optimized_jdk export_optimized_jdk
|
||||
( $(CD) $(JDK_IMAGE_DIR)/optimized && \
|
||||
$(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
|
||||
|
||||
.PHONY: jprt_build_product jprt_build_fastdebug jprt_build_debug jprt_build_optimized
|
||||
|
||||
|
@ -93,13 +93,13 @@ jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
|
||||
# Standard list of jprt build targets for this source tree
|
||||
|
||||
jprt.build.targets.standard= \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug|optimized}, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug|optimized}, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug|optimized}, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}, \
|
||||
${jprt.my.linux.armvh}-{product|fastdebug}
|
||||
|
||||
jprt.build.targets.open= \
|
||||
|
@ -604,6 +604,17 @@ void VM_Version::get_processor_features() {
|
||||
|
||||
#if INCLUDE_RTM_OPT
|
||||
if (UseRTMLocking) {
|
||||
if (is_intel_family_core()) {
|
||||
if ((_model == CPU_MODEL_HASWELL_E3) ||
|
||||
(_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) ||
|
||||
(_model == CPU_MODEL_BROADWELL && _stepping < 4)) {
|
||||
if (!UnlockExperimentalVMOptions) {
|
||||
vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
|
||||
} else {
|
||||
warning("UseRTMLocking is only available as experimental option on this platform.");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
|
||||
// RTM locking should be used only for applications with
|
||||
// high lock contention. For now we do not use it by default.
|
||||
|
@ -276,7 +276,10 @@ protected:
|
||||
CPU_MODEL_WESTMERE_EX = 0x2f,
|
||||
CPU_MODEL_SANDYBRIDGE = 0x2a,
|
||||
CPU_MODEL_SANDYBRIDGE_EP = 0x2d,
|
||||
CPU_MODEL_IVYBRIDGE_EP = 0x3a
|
||||
CPU_MODEL_IVYBRIDGE_EP = 0x3a,
|
||||
CPU_MODEL_HASWELL_E3 = 0x3c,
|
||||
CPU_MODEL_HASWELL_E7 = 0x3f,
|
||||
CPU_MODEL_BROADWELL = 0x3d
|
||||
} cpuExtendedFamily;
|
||||
|
||||
// cpuid information block. All info derived from executing cpuid with
|
||||
|
@ -1644,8 +1644,20 @@ static bool _print_ascii_file(const char* filename, outputStream* st) {
|
||||
return true;
|
||||
}
|
||||
|
||||
int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) {
|
||||
outputStream * out = (outputStream *) param;
|
||||
out->print_cr(PTR_FORMAT " \t%s", base_address, name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void os::print_dll_info(outputStream *st) {
|
||||
st->print_cr("Dynamic libraries:");
|
||||
if (get_loaded_modules_info(_print_dll_info_cb, (void *)st)) {
|
||||
st->print_cr("Error: Cannot print dynamic libraries.");
|
||||
}
|
||||
}
|
||||
|
||||
int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
|
||||
#ifdef RTLD_DI_LINKMAP
|
||||
Dl_info dli;
|
||||
void *handle;
|
||||
@ -1654,36 +1666,41 @@ void os::print_dll_info(outputStream *st) {
|
||||
|
||||
if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
|
||||
dli.dli_fname == NULL) {
|
||||
st->print_cr("Error: Cannot print dynamic libraries.");
|
||||
return;
|
||||
return 1;
|
||||
}
|
||||
handle = dlopen(dli.dli_fname, RTLD_LAZY);
|
||||
if (handle == NULL) {
|
||||
st->print_cr("Error: Cannot print dynamic libraries.");
|
||||
return;
|
||||
return 1;
|
||||
}
|
||||
dlinfo(handle, RTLD_DI_LINKMAP, &map);
|
||||
if (map == NULL) {
|
||||
st->print_cr("Error: Cannot print dynamic libraries.");
|
||||
return;
|
||||
dlclose(handle);
|
||||
return 1;
|
||||
}
|
||||
|
||||
while (map->l_prev != NULL)
|
||||
map = map->l_prev;
|
||||
|
||||
while (map != NULL) {
|
||||
st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
|
||||
// Value for top_address is returned as 0 since we don't have any information about module size
|
||||
if (callback(map->l_name, (address)map->l_addr, (address)0, param)) {
|
||||
dlclose(handle);
|
||||
return 1;
|
||||
}
|
||||
map = map->l_next;
|
||||
}
|
||||
|
||||
dlclose(handle);
|
||||
#elif defined(__APPLE__)
|
||||
for (uint32_t i = 1; i < _dyld_image_count(); i++) {
|
||||
st->print_cr(PTR_FORMAT " \t%s", _dyld_get_image_header(i),
|
||||
_dyld_get_image_name(i));
|
||||
// Value for top_address is returned as 0 since we don't have any information about module size
|
||||
if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), (address)0, param)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
st->print_cr("Error: Cannot print dynamic libraries.");
|
||||
return 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -2120,6 +2120,40 @@ void os::print_dll_info(outputStream *st) {
|
||||
}
|
||||
}
|
||||
|
||||
int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
|
||||
FILE *procmapsFile = NULL;
|
||||
|
||||
// Open the procfs maps file for the current process
|
||||
if ((procmapsFile = fopen("/proc/self/maps", "r")) != NULL) {
|
||||
// Allocate PATH_MAX for file name plus a reasonable size for other fields.
|
||||
char line[PATH_MAX + 100];
|
||||
|
||||
// Read line by line from 'file'
|
||||
while (fgets(line, sizeof(line), procmapsFile) != NULL) {
|
||||
u8 base, top, offset, inode;
|
||||
char permissions[5];
|
||||
char device[6];
|
||||
char name[PATH_MAX + 1];
|
||||
|
||||
// Parse fields from line
|
||||
sscanf(line, "%lx-%lx %4s %lx %5s %ld %s", &base, &top, permissions, &offset, device, &inode, name);
|
||||
|
||||
// Filter by device id '00:00' so that we only get file system mapped files.
|
||||
if (strcmp(device, "00:00") != 0) {
|
||||
|
||||
// Call callback with the fields of interest
|
||||
if(callback(name, (address)base, (address)top, param)) {
|
||||
// Oops abort, callback aborted
|
||||
fclose(procmapsFile);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
fclose(procmapsFile);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void os::print_os_info_brief(outputStream* st) {
|
||||
os::Linux::print_distro_info(st);
|
||||
|
||||
|
@ -1722,41 +1722,54 @@ bool os::dll_address_to_library_name(address addr, char* buf,
|
||||
return false;
|
||||
}
|
||||
|
||||
// Prints the names and full paths of all opened dynamic libraries
|
||||
// for current process
|
||||
void os::print_dll_info(outputStream * st) {
|
||||
int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
|
||||
Dl_info dli;
|
||||
void *handle;
|
||||
Link_map *map;
|
||||
Link_map *p;
|
||||
|
||||
st->print_cr("Dynamic libraries:"); st->flush();
|
||||
|
||||
if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
|
||||
// Sanity check?
|
||||
if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 ||
|
||||
dli.dli_fname == NULL) {
|
||||
st->print_cr("Error: Cannot print dynamic libraries.");
|
||||
return;
|
||||
return 1;
|
||||
}
|
||||
handle = dlopen(dli.dli_fname, RTLD_LAZY);
|
||||
|
||||
void * handle = dlopen(dli.dli_fname, RTLD_LAZY);
|
||||
if (handle == NULL) {
|
||||
st->print_cr("Error: Cannot print dynamic libraries.");
|
||||
return;
|
||||
return 1;
|
||||
}
|
||||
|
||||
Link_map *map;
|
||||
dlinfo(handle, RTLD_DI_LINKMAP, &map);
|
||||
if (map == NULL) {
|
||||
st->print_cr("Error: Cannot print dynamic libraries.");
|
||||
return;
|
||||
dlclose(handle);
|
||||
return 1;
|
||||
}
|
||||
|
||||
while (map->l_prev != NULL)
|
||||
while (map->l_prev != NULL) {
|
||||
map = map->l_prev;
|
||||
}
|
||||
|
||||
while (map != NULL) {
|
||||
st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
|
||||
// Iterate through all map entries and call callback with fields of interest
|
||||
if(callback(map->l_name, (address)map->l_addr, (address)0, param)) {
|
||||
dlclose(handle);
|
||||
return 1;
|
||||
}
|
||||
map = map->l_next;
|
||||
}
|
||||
|
||||
dlclose(handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) {
|
||||
outputStream * out = (outputStream *) param;
|
||||
out->print_cr(PTR_FORMAT " \t%s", base_address, name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void os::print_dll_info(outputStream * st) {
|
||||
st->print_cr("Dynamic libraries:"); st->flush();
|
||||
if (get_loaded_modules_info(_print_dll_info_cb, (void *)st)) {
|
||||
st->print_cr("Error: Cannot print dynamic libraries.");
|
||||
}
|
||||
}
|
||||
|
||||
// Loads .dll/.so and
|
||||
|
@ -1301,120 +1301,6 @@ static bool _addr_in_ntdll( address addr )
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
// Enumerate all modules for a given process ID
|
||||
//
|
||||
// Notice that Windows 95/98/Me and Windows NT/2000/XP have
|
||||
// different API for doing this. We use PSAPI.DLL on NT based
|
||||
// Windows and ToolHelp on 95/98/Me.
|
||||
|
||||
// Callback function that is called by enumerate_modules() on
|
||||
// every DLL module.
|
||||
// Input parameters:
|
||||
// int pid,
|
||||
// char* module_file_name,
|
||||
// address module_base_addr,
|
||||
// unsigned module_size,
|
||||
// void* param
|
||||
typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *);
|
||||
|
||||
// enumerate_modules for Windows NT, using PSAPI
|
||||
static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param)
|
||||
{
|
||||
HANDLE hProcess;
|
||||
|
||||
# define MAX_NUM_MODULES 128
|
||||
HMODULE modules[MAX_NUM_MODULES];
|
||||
static char filename[MAX_PATH];
|
||||
int result = 0;
|
||||
|
||||
if (!os::PSApiDll::PSApiAvailable()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
|
||||
FALSE, pid);
|
||||
if (hProcess == NULL) return 0;
|
||||
|
||||
DWORD size_needed;
|
||||
if (!os::PSApiDll::EnumProcessModules(hProcess, modules,
|
||||
sizeof(modules), &size_needed)) {
|
||||
CloseHandle(hProcess);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// number of modules that are currently loaded
|
||||
int num_modules = size_needed / sizeof(HMODULE);
|
||||
|
||||
for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
|
||||
// Get Full pathname:
|
||||
if (!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i],
|
||||
filename, sizeof(filename))) {
|
||||
filename[0] = '\0';
|
||||
}
|
||||
|
||||
MODULEINFO modinfo;
|
||||
if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i],
|
||||
&modinfo, sizeof(modinfo))) {
|
||||
modinfo.lpBaseOfDll = NULL;
|
||||
modinfo.SizeOfImage = 0;
|
||||
}
|
||||
|
||||
// Invoke callback function
|
||||
result = func(pid, filename, (address)modinfo.lpBaseOfDll,
|
||||
modinfo.SizeOfImage, param);
|
||||
if (result) break;
|
||||
}
|
||||
|
||||
CloseHandle(hProcess);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
// enumerate_modules for Windows 95/98/ME, using TOOLHELP
|
||||
static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param)
|
||||
{
|
||||
HANDLE hSnapShot;
|
||||
static MODULEENTRY32 modentry;
|
||||
int result = 0;
|
||||
|
||||
if (!os::Kernel32Dll::HelpToolsAvailable()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Get a handle to a Toolhelp snapshot of the system
|
||||
hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid);
|
||||
if (hSnapShot == INVALID_HANDLE_VALUE) {
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
// iterate through all modules
|
||||
modentry.dwSize = sizeof(MODULEENTRY32);
|
||||
bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0;
|
||||
|
||||
while (not_done) {
|
||||
// invoke the callback
|
||||
result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr,
|
||||
modentry.modBaseSize, param);
|
||||
if (result) break;
|
||||
|
||||
modentry.dwSize = sizeof(MODULEENTRY32);
|
||||
not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0;
|
||||
}
|
||||
|
||||
CloseHandle(hSnapShot);
|
||||
return result;
|
||||
}
|
||||
|
||||
int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param )
|
||||
{
|
||||
// Get current process ID if caller doesn't provide it.
|
||||
if (!pid) pid = os::current_process_id();
|
||||
|
||||
if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param);
|
||||
else return _enumerate_modules_windows(pid, func, param);
|
||||
}
|
||||
|
||||
struct _modinfo {
|
||||
address addr;
|
||||
char* full_path; // point to a char buffer
|
||||
@ -1422,13 +1308,13 @@ struct _modinfo {
|
||||
address base_addr;
|
||||
};
|
||||
|
||||
static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr,
|
||||
unsigned size, void * param) {
|
||||
static int _locate_module_by_addr(const char * mod_fname, address base_addr,
|
||||
address top_address, void * param) {
|
||||
struct _modinfo *pmod = (struct _modinfo *)param;
|
||||
if (!pmod) return -1;
|
||||
|
||||
if (base_addr <= pmod->addr &&
|
||||
base_addr+size > pmod->addr) {
|
||||
if (base_addr <= pmod->addr &&
|
||||
top_address > pmod->addr) {
|
||||
// if a buffer is provided, copy path name to the buffer
|
||||
if (pmod->full_path) {
|
||||
jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
|
||||
@ -1453,8 +1339,7 @@ bool os::dll_address_to_library_name(address addr, char* buf,
|
||||
mi.addr = addr;
|
||||
mi.full_path = buf;
|
||||
mi.buflen = buflen;
|
||||
int pid = os::current_process_id();
|
||||
if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) {
|
||||
if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
|
||||
// buf already contains path name
|
||||
if (offset) *offset = addr - mi.base_addr;
|
||||
return true;
|
||||
@ -1479,14 +1364,14 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
}
|
||||
|
||||
// save the start and end address of jvm.dll into param[0] and param[1]
|
||||
static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr,
|
||||
unsigned size, void * param) {
|
||||
static int _locate_jvm_dll(const char* mod_fname, address base_addr,
|
||||
address top_address, void * param) {
|
||||
if (!param) return -1;
|
||||
|
||||
if (base_addr <= (address)_locate_jvm_dll &&
|
||||
base_addr+size > (address)_locate_jvm_dll) {
|
||||
if (base_addr <= (address)_locate_jvm_dll &&
|
||||
top_address > (address)_locate_jvm_dll) {
|
||||
((address*)param)[0] = base_addr;
|
||||
((address*)param)[1] = base_addr + size;
|
||||
((address*)param)[1] = top_address;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@ -1497,8 +1382,7 @@ address vm_lib_location[2]; // start and end address of jvm.dll
|
||||
// check if addr is inside jvm.dll
|
||||
bool os::address_is_in_vm(address addr) {
|
||||
if (!vm_lib_location[0] || !vm_lib_location[1]) {
|
||||
int pid = os::current_process_id();
|
||||
if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) {
|
||||
if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
|
||||
assert(false, "Can't find jvm module.");
|
||||
return false;
|
||||
}
|
||||
@ -1508,14 +1392,13 @@ bool os::address_is_in_vm(address addr) {
|
||||
}
|
||||
|
||||
// print module info; param is outputStream*
|
||||
static int _print_module(int pid, char* fname, address base,
|
||||
unsigned size, void* param) {
|
||||
static int _print_module(const char* fname, address base_address,
|
||||
address top_address, void* param) {
|
||||
if (!param) return -1;
|
||||
|
||||
outputStream* st = (outputStream*)param;
|
||||
|
||||
address end_addr = base + size;
|
||||
st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname);
|
||||
st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1644,11 +1527,60 @@ void * os::dll_load(const char *name, char *ebuf, int ebuflen)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void os::print_dll_info(outputStream *st) {
|
||||
int pid = os::current_process_id();
|
||||
st->print_cr("Dynamic libraries:");
|
||||
enumerate_modules(pid, _print_module, (void *)st);
|
||||
get_loaded_modules_info(_print_module, (void *)st);
|
||||
}
|
||||
|
||||
int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
|
||||
HANDLE hProcess;
|
||||
|
||||
# define MAX_NUM_MODULES 128
|
||||
HMODULE modules[MAX_NUM_MODULES];
|
||||
static char filename[MAX_PATH];
|
||||
int result = 0;
|
||||
|
||||
if (!os::PSApiDll::PSApiAvailable()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pid = os::current_process_id();
|
||||
hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
|
||||
FALSE, pid);
|
||||
if (hProcess == NULL) return 0;
|
||||
|
||||
DWORD size_needed;
|
||||
if (!os::PSApiDll::EnumProcessModules(hProcess, modules,
|
||||
sizeof(modules), &size_needed)) {
|
||||
CloseHandle(hProcess);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// number of modules that are currently loaded
|
||||
int num_modules = size_needed / sizeof(HMODULE);
|
||||
|
||||
for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
|
||||
// Get Full pathname:
|
||||
if (!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i],
|
||||
filename, sizeof(filename))) {
|
||||
filename[0] = '\0';
|
||||
}
|
||||
|
||||
MODULEINFO modinfo;
|
||||
if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i],
|
||||
&modinfo, sizeof(modinfo))) {
|
||||
modinfo.lpBaseOfDll = NULL;
|
||||
modinfo.SizeOfImage = 0;
|
||||
}
|
||||
|
||||
// Invoke callback function
|
||||
result = callback(filename, (address)modinfo.lpBaseOfDll,
|
||||
(address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
|
||||
if (result) break;
|
||||
}
|
||||
|
||||
CloseHandle(hProcess);
|
||||
return result;
|
||||
}
|
||||
|
||||
void os::print_os_info_brief(outputStream* st) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,7 @@ void ClassFileStream::truncated_file_error(TRAPS) {
|
||||
THROW_MSG(vmSymbols::java_lang_ClassFormatError(), "Truncated class file");
|
||||
}
|
||||
|
||||
ClassFileStream::ClassFileStream(u1* buffer, int length, char* source) {
|
||||
ClassFileStream::ClassFileStream(u1* buffer, int length, const char* source) {
|
||||
_buffer_start = buffer;
|
||||
_buffer_end = buffer + length;
|
||||
_current = buffer;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,20 +39,20 @@ class ClassFileStream: public ResourceObj {
|
||||
u1* _buffer_start; // Buffer bottom
|
||||
u1* _buffer_end; // Buffer top (one past last element)
|
||||
u1* _current; // Current buffer position
|
||||
char* _source; // Source of stream (directory name, ZIP/JAR archive name)
|
||||
const char* _source; // Source of stream (directory name, ZIP/JAR archive name)
|
||||
bool _need_verify; // True if verification is on for the class file
|
||||
|
||||
void truncated_file_error(TRAPS);
|
||||
public:
|
||||
// Constructor
|
||||
ClassFileStream(u1* buffer, int length, char* source);
|
||||
ClassFileStream(u1* buffer, int length, const char* source);
|
||||
|
||||
// Buffer access
|
||||
u1* buffer() const { return _buffer_start; }
|
||||
int length() const { return _buffer_end - _buffer_start; }
|
||||
u1* current() const { return _current; }
|
||||
void set_current(u1* pos) { _current = pos; }
|
||||
char* source() const { return _source; }
|
||||
const char* source() const { return _source; }
|
||||
void set_verify(bool flag) { _need_verify = flag; }
|
||||
|
||||
void check_truncated_file(bool b, TRAPS) {
|
||||
|
@ -189,9 +189,10 @@ bool ClassPathEntry::is_lazy() {
|
||||
return false;
|
||||
}
|
||||
|
||||
ClassPathDirEntry::ClassPathDirEntry(char* dir) : ClassPathEntry() {
|
||||
_dir = NEW_C_HEAP_ARRAY(char, strlen(dir)+1, mtClass);
|
||||
strcpy(_dir, dir);
|
||||
ClassPathDirEntry::ClassPathDirEntry(const char* dir) : ClassPathEntry() {
|
||||
char* copy = NEW_C_HEAP_ARRAY(char, strlen(dir)+1, mtClass);
|
||||
strcpy(copy, dir);
|
||||
_dir = copy;
|
||||
}
|
||||
|
||||
|
||||
@ -235,8 +236,9 @@ ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) {
|
||||
|
||||
ClassPathZipEntry::ClassPathZipEntry(jzfile* zip, const char* zip_name) : ClassPathEntry() {
|
||||
_zip = zip;
|
||||
_zip_name = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1, mtClass);
|
||||
strcpy(_zip_name, zip_name);
|
||||
char *copy = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1, mtClass);
|
||||
strcpy(copy, zip_name);
|
||||
_zip_name = copy;
|
||||
}
|
||||
|
||||
ClassPathZipEntry::~ClassPathZipEntry() {
|
||||
@ -304,7 +306,7 @@ void ClassPathZipEntry::contents_do(void f(const char* name, void* context), voi
|
||||
}
|
||||
}
|
||||
|
||||
LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st, bool throw_exception) : ClassPathEntry() {
|
||||
LazyClassPathEntry::LazyClassPathEntry(const char* path, const struct stat* st, bool throw_exception) : ClassPathEntry() {
|
||||
_path = os::strdup_check_oom(path);
|
||||
_st = *st;
|
||||
_meta_index = NULL;
|
||||
@ -314,7 +316,7 @@ LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st, bool t
|
||||
}
|
||||
|
||||
LazyClassPathEntry::~LazyClassPathEntry() {
|
||||
os::free(_path);
|
||||
os::free((void*)_path);
|
||||
}
|
||||
|
||||
bool LazyClassPathEntry::is_jar_file() {
|
||||
@ -563,17 +565,19 @@ void ClassLoader::check_shared_classpath(const char *path) {
|
||||
|
||||
void ClassLoader::setup_bootstrap_search_path() {
|
||||
assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
|
||||
char* sys_class_path = os::strdup_check_oom(Arguments::get_sysclasspath());
|
||||
if (!PrintSharedArchiveAndExit) {
|
||||
const char* sys_class_path = Arguments::get_sysclasspath();
|
||||
if (PrintSharedArchiveAndExit) {
|
||||
// Don't print sys_class_path - this is the bootcp of this current VM process, not necessarily
|
||||
// the same as the bootcp of the shared archive.
|
||||
} else {
|
||||
trace_class_path("[Bootstrap loader class path=", sys_class_path);
|
||||
}
|
||||
#if INCLUDE_CDS
|
||||
if (DumpSharedSpaces) {
|
||||
_shared_paths_misc_info->add_boot_classpath(Arguments::get_sysclasspath());
|
||||
_shared_paths_misc_info->add_boot_classpath(sys_class_path);
|
||||
}
|
||||
#endif
|
||||
setup_search_path(sys_class_path);
|
||||
os::free(sys_class_path);
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
@ -593,7 +597,7 @@ bool ClassLoader::check_shared_paths_misc_info(void *buf, int size) {
|
||||
}
|
||||
#endif
|
||||
|
||||
void ClassLoader::setup_search_path(char *class_path) {
|
||||
void ClassLoader::setup_search_path(const char *class_path) {
|
||||
int offset = 0;
|
||||
int len = (int)strlen(class_path);
|
||||
int end = 0;
|
||||
@ -620,7 +624,7 @@ void ClassLoader::setup_search_path(char *class_path) {
|
||||
}
|
||||
}
|
||||
|
||||
ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st,
|
||||
ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const struct stat* st,
|
||||
bool lazy, bool throw_exception, TRAPS) {
|
||||
JavaThread* thread = JavaThread::current();
|
||||
if (lazy) {
|
||||
@ -687,11 +691,8 @@ ClassPathZipEntry* ClassLoader::create_class_path_zip_entry(const char *path) {
|
||||
struct stat st;
|
||||
if (os::stat(path, &st) == 0) {
|
||||
if ((st.st_mode & S_IFREG) == S_IFREG) {
|
||||
char orig_path[JVM_MAXPATHLEN];
|
||||
char canonical_path[JVM_MAXPATHLEN];
|
||||
|
||||
strcpy(orig_path, path);
|
||||
if (get_canonical_path(orig_path, canonical_path, JVM_MAXPATHLEN)) {
|
||||
if (get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
|
||||
char* error_msg = NULL;
|
||||
jzfile* zip;
|
||||
{
|
||||
@ -737,7 +738,7 @@ void ClassLoader::add_to_list(ClassPathEntry *new_entry) {
|
||||
}
|
||||
|
||||
// Returns true IFF the file/dir exists and the entry was successfully created.
|
||||
bool ClassLoader::update_class_path_entry_list(char *path,
|
||||
bool ClassLoader::update_class_path_entry_list(const char *path,
|
||||
bool check_for_duplicates,
|
||||
bool throw_exception) {
|
||||
struct stat st;
|
||||
@ -762,8 +763,8 @@ bool ClassLoader::update_class_path_entry_list(char *path,
|
||||
if (DumpSharedSpaces) {
|
||||
_shared_paths_misc_info->add_nonexist_path(path);
|
||||
}
|
||||
return false;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1269,11 +1270,17 @@ void classLoader_init() {
|
||||
}
|
||||
|
||||
|
||||
bool ClassLoader::get_canonical_path(char* orig, char* out, int len) {
|
||||
bool ClassLoader::get_canonical_path(const char* orig, char* out, int len) {
|
||||
assert(orig != NULL && out != NULL && len > 0, "bad arguments");
|
||||
if (CanonicalizeEntry != NULL) {
|
||||
JNIEnv* env = JavaThread::current()->jni_environment();
|
||||
if ((CanonicalizeEntry)(env, os::native_path(orig), out, len) < 0) {
|
||||
JavaThread* THREAD = JavaThread::current();
|
||||
JNIEnv* env = THREAD->jni_environment();
|
||||
ResourceMark rm(THREAD);
|
||||
|
||||
// os::native_path writes into orig_copy
|
||||
char* orig_copy = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, strlen(orig)+1);
|
||||
strcpy(orig_copy, orig);
|
||||
if ((CanonicalizeEntry)(env, os::native_path(orig_copy), out, len) < 0) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
|
@ -72,11 +72,11 @@ class ClassPathEntry: public CHeapObj<mtClass> {
|
||||
|
||||
class ClassPathDirEntry: public ClassPathEntry {
|
||||
private:
|
||||
char* _dir; // Name of directory
|
||||
const char* _dir; // Name of directory
|
||||
public:
|
||||
bool is_jar_file() { return false; }
|
||||
const char* name() { return _dir; }
|
||||
ClassPathDirEntry(char* dir);
|
||||
ClassPathDirEntry(const char* dir);
|
||||
ClassFileStream* open_stream(const char* name, TRAPS);
|
||||
// Debugging
|
||||
NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
|
||||
@ -100,8 +100,8 @@ typedef struct {
|
||||
|
||||
class ClassPathZipEntry: public ClassPathEntry {
|
||||
private:
|
||||
jzfile* _zip; // The zip archive
|
||||
char* _zip_name; // Name of zip archive
|
||||
jzfile* _zip; // The zip archive
|
||||
const char* _zip_name; // Name of zip archive
|
||||
public:
|
||||
bool is_jar_file() { return true; }
|
||||
const char* name() { return _zip_name; }
|
||||
@ -119,7 +119,7 @@ class ClassPathZipEntry: public ClassPathEntry {
|
||||
// For lazier loading of boot class path entries
|
||||
class LazyClassPathEntry: public ClassPathEntry {
|
||||
private:
|
||||
char* _path; // dir or file
|
||||
const char* _path; // dir or file
|
||||
struct stat _st;
|
||||
MetaIndex* _meta_index;
|
||||
bool _has_error;
|
||||
@ -129,7 +129,7 @@ class LazyClassPathEntry: public ClassPathEntry {
|
||||
public:
|
||||
bool is_jar_file();
|
||||
const char* name() { return _path; }
|
||||
LazyClassPathEntry(char* path, const struct stat* st, bool throw_exception);
|
||||
LazyClassPathEntry(const char* path, const struct stat* st, bool throw_exception);
|
||||
virtual ~LazyClassPathEntry();
|
||||
u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
|
||||
|
||||
@ -216,17 +216,17 @@ class ClassLoader: AllStatic {
|
||||
static void setup_meta_index(const char* meta_index_path, const char* meta_index_dir,
|
||||
int start_index);
|
||||
static void setup_bootstrap_search_path();
|
||||
static void setup_search_path(char *class_path);
|
||||
static void setup_search_path(const char *class_path);
|
||||
|
||||
static void load_zip_library();
|
||||
static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st,
|
||||
static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st,
|
||||
bool lazy, bool throw_exception, TRAPS);
|
||||
|
||||
// Canonicalizes path names, so strcmp will work properly. This is mainly
|
||||
// to avoid confusing the zip library
|
||||
static bool get_canonical_path(char* orig, char* out, int len);
|
||||
static bool get_canonical_path(const char* orig, char* out, int len);
|
||||
public:
|
||||
static bool update_class_path_entry_list(char *path,
|
||||
static bool update_class_path_entry_list(const char *path,
|
||||
bool check_for_duplicates,
|
||||
bool throw_exception=true);
|
||||
static void print_bootclasspath();
|
||||
|
@ -776,7 +776,7 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
|
||||
// mark metadata seen on the stack and code cache so we can delete
|
||||
// unneeded entries.
|
||||
bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
|
||||
MetadataOnStackMark md_on_stack;
|
||||
MetadataOnStackMark md_on_stack(has_redefined_a_class);
|
||||
if (has_redefined_a_class) {
|
||||
// purge_previous_versions also cleans weak method links. Because
|
||||
// one method's MDO can reference another method from another
|
||||
|
@ -59,8 +59,8 @@ public:
|
||||
};
|
||||
|
||||
|
||||
static void add_class_path_entry(char* path, bool check_for_duplicates,
|
||||
ClassPathEntry* new_entry) {
|
||||
static void add_class_path_entry(const char* path, bool check_for_duplicates,
|
||||
ClassPathEntry* new_entry) {
|
||||
ClassLoader::add_to_list(new_entry);
|
||||
}
|
||||
static void setup_search_paths() {}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -41,7 +41,7 @@ NOT_PRODUCT(bool MetadataOnStackMark::_is_active = false;)
|
||||
// Walk metadata on the stack and mark it so that redefinition doesn't delete
|
||||
// it. Class unloading also walks the previous versions and might try to
|
||||
// delete it, so this class is used by class unloading also.
|
||||
MetadataOnStackMark::MetadataOnStackMark() {
|
||||
MetadataOnStackMark::MetadataOnStackMark(bool has_redefined_a_class) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
|
||||
NOT_PRODUCT(_is_active = true;)
|
||||
if (_marked_objects == NULL) {
|
||||
@ -49,7 +49,7 @@ MetadataOnStackMark::MetadataOnStackMark() {
|
||||
}
|
||||
|
||||
Threads::metadata_do(Metadata::mark_on_stack);
|
||||
if (JvmtiExport::has_redefined_a_class()) {
|
||||
if (has_redefined_a_class) {
|
||||
CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
|
||||
}
|
||||
CompileBroker::mark_on_stack();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,7 +37,7 @@ class Metadata;
|
||||
class MetadataOnStackMark : public StackObj {
|
||||
NOT_PRODUCT(static bool _is_active;)
|
||||
public:
|
||||
MetadataOnStackMark();
|
||||
MetadataOnStackMark(bool has_redefined_a_class);
|
||||
~MetadataOnStackMark();
|
||||
static void record(Metadata* m);
|
||||
};
|
||||
|
@ -139,7 +139,7 @@ bool SharedPathsMiscInfo::check(jint type, const char* path) {
|
||||
if (timestamp != st.st_mtime) {
|
||||
return fail("Timestamp mismatch");
|
||||
}
|
||||
if (filesize != st.st_size) {
|
||||
if (filesize != st.st_size) {
|
||||
return fail("File size mismatch");
|
||||
}
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ public:
|
||||
out->print("Expecting that %s does not exist", path);
|
||||
break;
|
||||
case REQUIRED:
|
||||
out->print("Expecting that file %s must exist and not altered", path);
|
||||
out->print("Expecting that file %s must exist and is not altered", path);
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
|
@ -1243,7 +1243,6 @@ instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik,
|
||||
tty->print_cr("]");
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
if (DumpLoadedClassList != NULL && classlist_file->is_open()) {
|
||||
// Only dump the classes that can be stored into CDS archive
|
||||
if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
|
||||
@ -1252,7 +1251,6 @@ instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik,
|
||||
classlist_file->flush();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// notify a class loaded from shared object
|
||||
ClassLoadingService::notify_class_loaded(InstanceKlass::cast(ik()),
|
||||
@ -1260,7 +1258,7 @@ instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik,
|
||||
}
|
||||
return ik;
|
||||
}
|
||||
#endif
|
||||
#endif // INCLUDE_CDS
|
||||
|
||||
instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
|
||||
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
|
||||
|
@ -2062,7 +2062,7 @@ void nmethod::metadata_do(void f(Metadata*)) {
|
||||
"metadata must be found in exactly one place");
|
||||
if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
|
||||
Metadata* md = r->metadata_value();
|
||||
f(md);
|
||||
if (md != _method) f(md);
|
||||
}
|
||||
} else if (iter.type() == relocInfo::virtual_call_type) {
|
||||
// Check compiledIC holders associated with this nmethod
|
||||
@ -2087,9 +2087,6 @@ void nmethod::metadata_do(void f(Metadata*)) {
|
||||
Metadata* md = *p;
|
||||
f(md);
|
||||
}
|
||||
|
||||
// Call function Method*, not embedded in these other places.
|
||||
if (_method != NULL) f(_method);
|
||||
}
|
||||
|
||||
void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
|
||||
|
@ -328,9 +328,11 @@ AdaptiveSizePolicy* CMSCollector::size_policy() {
|
||||
void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
|
||||
|
||||
const char* gen_name = "old";
|
||||
GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
|
||||
|
||||
// Generation Counters - generation 1, 1 subspace
|
||||
_gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
|
||||
_gen_counters = new GenerationCounters(gen_name, 1, 1,
|
||||
gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
|
||||
|
||||
_space_counters = new GSpaceCounters(gen_name, 0,
|
||||
_virtual_space.reserved_size(),
|
||||
|
@ -34,8 +34,8 @@
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
||||
#include "gc_implementation/shared/vmGCOperations.hpp"
|
||||
#include "gc_implementation/shared/gcTimer.hpp"
|
||||
@ -434,10 +434,6 @@ void CMMarkStack::oops_do(OopClosure* f) {
|
||||
}
|
||||
}
|
||||
|
||||
bool ConcurrentMark::not_yet_marked(oop obj) const {
|
||||
return _g1h->is_obj_ill(obj);
|
||||
}
|
||||
|
||||
CMRootRegions::CMRootRegions() :
|
||||
_young_list(NULL), _cm(NULL), _scan_in_progress(false),
|
||||
_should_abort(false), _next_survivor(NULL) { }
|
||||
@ -892,7 +888,16 @@ class CheckBitmapClearHRClosure : public HeapRegionClosure {
|
||||
}
|
||||
|
||||
virtual bool doHeapRegion(HeapRegion* r) {
|
||||
return _bitmap->getNextMarkedWordAddress(r->bottom(), r->end()) != r->end();
|
||||
// This closure can be called concurrently to the mutator, so we must make sure
|
||||
// that the result of the getNextMarkedWordAddress() call is compared to the
|
||||
// value passed to it as limit to detect any found bits.
|
||||
// We can use the region's orig_end() for the limit and the comparison value
|
||||
// as it always contains the "real" end of the region that never changes and
|
||||
// has no side effects.
|
||||
// Due to the latter, there can also be no problem with the compiler generating
|
||||
// reloads of the orig_end() call.
|
||||
HeapWord* end = r->orig_end();
|
||||
return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
|
||||
}
|
||||
};
|
||||
|
||||
@ -1117,20 +1122,17 @@ public:
|
||||
if (!_cm->has_aborted()) {
|
||||
do {
|
||||
double start_vtime_sec = os::elapsedVTime();
|
||||
double start_time_sec = os::elapsedTime();
|
||||
double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
|
||||
|
||||
the_task->do_marking_step(mark_step_duration_ms,
|
||||
true /* do_termination */,
|
||||
false /* is_serial*/);
|
||||
|
||||
double end_time_sec = os::elapsedTime();
|
||||
double end_vtime_sec = os::elapsedVTime();
|
||||
double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
|
||||
double elapsed_time_sec = end_time_sec - start_time_sec;
|
||||
_cm->clear_has_overflown();
|
||||
|
||||
bool ret = _cm->do_yield_check(worker_id);
|
||||
_cm->do_yield_check(worker_id);
|
||||
|
||||
jlong sleep_time_ms;
|
||||
if (!_cm->has_aborted() && the_task->has_aborted()) {
|
||||
@ -1140,17 +1142,6 @@ public:
|
||||
os::sleep(Thread::current(), sleep_time_ms, false);
|
||||
SuspendibleThreadSet::join();
|
||||
}
|
||||
double end_time2_sec = os::elapsedTime();
|
||||
double elapsed_time2_sec = end_time2_sec - start_time_sec;
|
||||
|
||||
#if 0
|
||||
gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
|
||||
"overhead %1.4lf",
|
||||
elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
|
||||
the_task->conc_overhead(os::elapsedTime()) * 8.0);
|
||||
gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
|
||||
elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
|
||||
#endif
|
||||
} while (!_cm->has_aborted() && the_task->has_aborted());
|
||||
}
|
||||
the_task->record_end_time();
|
||||
@ -1409,7 +1400,7 @@ protected:
|
||||
void set_bit_for_region(HeapRegion* hr) {
|
||||
assert(!hr->continuesHumongous(), "should have filtered those out");
|
||||
|
||||
BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
|
||||
BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
|
||||
if (!hr->startsHumongous()) {
|
||||
// Normal (non-humongous) case: just set the bit.
|
||||
_region_bm->par_at_put(index, true);
|
||||
@ -1597,7 +1588,7 @@ public:
|
||||
if (_verbose) {
|
||||
gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
|
||||
"expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
|
||||
hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
|
||||
hr->hrm_index(), exp_marked_bytes, act_marked_bytes);
|
||||
}
|
||||
failures += 1;
|
||||
}
|
||||
@ -1606,7 +1597,7 @@ public:
|
||||
// (which was just calculated) region bit maps.
|
||||
// We're not OK if the bit in the calculated expected region
|
||||
// bitmap is set and the bit in the actual region bitmap is not.
|
||||
BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
|
||||
BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
|
||||
|
||||
bool expected = _exp_region_bm->at(index);
|
||||
bool actual = _region_bm->at(index);
|
||||
@ -1614,7 +1605,7 @@ public:
|
||||
if (_verbose) {
|
||||
gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
|
||||
"expected: %s, actual: %s",
|
||||
hr->hrs_index(),
|
||||
hr->hrm_index(),
|
||||
BOOL_TO_STR(expected), BOOL_TO_STR(actual));
|
||||
}
|
||||
failures += 1;
|
||||
@ -1635,7 +1626,7 @@ public:
|
||||
if (_verbose) {
|
||||
gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
|
||||
"expected: %s, actual: %s",
|
||||
hr->hrs_index(), i,
|
||||
hr->hrm_index(), i,
|
||||
BOOL_TO_STR(expected), BOOL_TO_STR(actual));
|
||||
}
|
||||
failures += 1;
|
||||
@ -2949,11 +2940,6 @@ void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
|
||||
_nextMarkBitMap->clearRange(mr);
|
||||
}
|
||||
|
||||
void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
|
||||
clearRangePrevBitmap(mr);
|
||||
clearRangeNextBitmap(mr);
|
||||
}
|
||||
|
||||
HeapRegion*
|
||||
ConcurrentMark::claim_region(uint worker_id) {
|
||||
// "checkpoint" the finger
|
||||
@ -3256,7 +3242,7 @@ class AggregateCountDataHRClosure: public HeapRegionClosure {
|
||||
assert(limit_idx <= end_idx, "or else use atomics");
|
||||
|
||||
// Aggregate the "stripe" in the count data associated with hr.
|
||||
uint hrs_index = hr->hrs_index();
|
||||
uint hrm_index = hr->hrm_index();
|
||||
size_t marked_bytes = 0;
|
||||
|
||||
for (uint i = 0; i < _max_worker_id; i += 1) {
|
||||
@ -3265,7 +3251,7 @@ class AggregateCountDataHRClosure: public HeapRegionClosure {
|
||||
|
||||
// Fetch the marked_bytes in this region for task i and
|
||||
// add it to the running total for this region.
|
||||
marked_bytes += marked_bytes_array[hrs_index];
|
||||
marked_bytes += marked_bytes_array[hrm_index];
|
||||
|
||||
// Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
|
||||
// into the global card bitmap.
|
||||
@ -3499,17 +3485,6 @@ bool ConcurrentMark::do_yield_check(uint worker_id) {
|
||||
}
|
||||
}
|
||||
|
||||
bool ConcurrentMark::containing_card_is_marked(void* p) {
|
||||
size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
|
||||
return _card_bm.at(offset >> CardTableModRefBS::card_shift);
|
||||
}
|
||||
|
||||
bool ConcurrentMark::containing_cards_are_marked(void* start,
|
||||
void* last) {
|
||||
return containing_card_is_marked(start) &&
|
||||
containing_card_is_marked(last);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// for debugging purposes
|
||||
void ConcurrentMark::print_finger() {
|
||||
@ -3762,7 +3737,7 @@ void CMTask::regular_clock_call() {
|
||||
|
||||
if (_cm->verbose_medium()) {
|
||||
gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
|
||||
"scanned = %d%s, refs reached = %d%s",
|
||||
"scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
|
||||
_worker_id, last_interval_ms,
|
||||
_words_scanned,
|
||||
(_words_scanned >= _words_scanned_limit) ? " (*)" : "",
|
||||
|
@ -683,7 +683,9 @@ public:
|
||||
return _task_queues->steal(worker_id, hash_seed, obj);
|
||||
}
|
||||
|
||||
ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage);
|
||||
ConcurrentMark(G1CollectedHeap* g1h,
|
||||
G1RegionToSpaceMapper* prev_bitmap_storage,
|
||||
G1RegionToSpaceMapper* next_bitmap_storage);
|
||||
~ConcurrentMark();
|
||||
|
||||
ConcurrentMarkThread* cmThread() { return _cmThread; }
|
||||
@ -712,8 +714,10 @@ public:
|
||||
// inconsistent) and always passing the size. hr is the region that
|
||||
// contains the object and it's passed optionally from callers who
|
||||
// might already have it (no point in recalculating it).
|
||||
inline void grayRoot(oop obj, size_t word_size,
|
||||
uint worker_id, HeapRegion* hr = NULL);
|
||||
inline void grayRoot(oop obj,
|
||||
size_t word_size,
|
||||
uint worker_id,
|
||||
HeapRegion* hr = NULL);
|
||||
|
||||
// It iterates over the heap and for each object it comes across it
|
||||
// will dump the contents of its reference fields, as well as
|
||||
@ -734,7 +738,8 @@ public:
|
||||
// AND MARKED : indicates that an object is both explicitly and
|
||||
// implicitly live (it should be one or the other, not both)
|
||||
void print_reachable(const char* str,
|
||||
VerifyOption vo, bool all) PRODUCT_RETURN;
|
||||
VerifyOption vo,
|
||||
bool all) PRODUCT_RETURN;
|
||||
|
||||
// Clear the next marking bitmap (will be called concurrently).
|
||||
void clearNextBitmap();
|
||||
@ -771,12 +776,11 @@ public:
|
||||
// this carefully!
|
||||
inline void markPrev(oop p);
|
||||
|
||||
// Clears marks for all objects in the given range, for the prev,
|
||||
// next, or both bitmaps. NB: the previous bitmap is usually
|
||||
// Clears marks for all objects in the given range, for the prev or
|
||||
// next bitmaps. NB: the previous bitmap is usually
|
||||
// read-only, so use this carefully!
|
||||
void clearRangePrevBitmap(MemRegion mr);
|
||||
void clearRangeNextBitmap(MemRegion mr);
|
||||
void clearRangeBothBitmaps(MemRegion mr);
|
||||
|
||||
// Notify data structures that a GC has started.
|
||||
void note_start_of_gc() {
|
||||
@ -798,21 +802,6 @@ public:
|
||||
bool verify_thread_buffers,
|
||||
bool verify_fingers) PRODUCT_RETURN;
|
||||
|
||||
bool isMarked(oop p) const {
|
||||
assert(p != NULL && p->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)p;
|
||||
assert(addr >= _nextMarkBitMap->startWord() ||
|
||||
addr < _nextMarkBitMap->endWord(), "in a region");
|
||||
|
||||
return _nextMarkBitMap->isMarked(addr);
|
||||
}
|
||||
|
||||
inline bool not_yet_marked(oop p) const;
|
||||
|
||||
// XXX Debug code
|
||||
bool containing_card_is_marked(void* p);
|
||||
bool containing_cards_are_marked(void* start, void* last);
|
||||
|
||||
bool isPrevMarked(oop p) const {
|
||||
assert(p != NULL && p->is_oop(), "expected an oop");
|
||||
HeapWord* addr = (HeapWord*)p;
|
||||
@ -898,7 +887,8 @@ public:
|
||||
// marked_bytes array slot for the given HeapRegion.
|
||||
// Sets the bits in the given card bitmap that are associated with the
|
||||
// cards that are spanned by the memory region.
|
||||
inline void count_region(MemRegion mr, HeapRegion* hr,
|
||||
inline void count_region(MemRegion mr,
|
||||
HeapRegion* hr,
|
||||
size_t* marked_bytes_array,
|
||||
BitMap* task_card_bm);
|
||||
|
||||
@ -906,56 +896,27 @@ public:
|
||||
// data structures for the given worker id.
|
||||
inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
|
||||
|
||||
// Counts the given memory region in the task/worker counting
|
||||
// data structures for the given worker id.
|
||||
inline void count_region(MemRegion mr, uint worker_id);
|
||||
|
||||
// Counts the given object in the given task/worker counting
|
||||
// data structures.
|
||||
inline void count_object(oop obj, HeapRegion* hr,
|
||||
inline void count_object(oop obj,
|
||||
HeapRegion* hr,
|
||||
size_t* marked_bytes_array,
|
||||
BitMap* task_card_bm);
|
||||
|
||||
// Counts the given object in the task/worker counting data
|
||||
// structures for the given worker id.
|
||||
inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
|
||||
|
||||
// Attempts to mark the given object and, if successful, counts
|
||||
// the object in the given task/worker counting structures.
|
||||
inline bool par_mark_and_count(oop obj, HeapRegion* hr,
|
||||
inline bool par_mark_and_count(oop obj,
|
||||
HeapRegion* hr,
|
||||
size_t* marked_bytes_array,
|
||||
BitMap* task_card_bm);
|
||||
|
||||
// Attempts to mark the given object and, if successful, counts
|
||||
// the object in the task/worker counting structures for the
|
||||
// given worker id.
|
||||
inline bool par_mark_and_count(oop obj, size_t word_size,
|
||||
HeapRegion* hr, uint worker_id);
|
||||
|
||||
// Attempts to mark the given object and, if successful, counts
|
||||
// the object in the task/worker counting structures for the
|
||||
// given worker id.
|
||||
inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
|
||||
|
||||
// Similar to the above routine but we don't know the heap region that
|
||||
// contains the object to be marked/counted, which this routine looks up.
|
||||
inline bool par_mark_and_count(oop obj, uint worker_id);
|
||||
|
||||
// Similar to the above routine but there are times when we cannot
|
||||
// safely calculate the size of obj due to races and we, therefore,
|
||||
// pass the size in as a parameter. It is the caller's responsibility
|
||||
// to ensure that the size passed in for obj is valid.
|
||||
inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
|
||||
|
||||
// Unconditionally mark the given object, and unconditionally count
|
||||
// the object in the counting structures for worker id 0.
|
||||
// Should *not* be called from parallel code.
|
||||
inline bool mark_and_count(oop obj, HeapRegion* hr);
|
||||
|
||||
// Similar to the above routine but we don't know the heap region that
|
||||
// contains the object to be marked/counted, which this routine looks up.
|
||||
// Should *not* be called from parallel code.
|
||||
inline bool mark_and_count(oop obj);
|
||||
inline bool par_mark_and_count(oop obj,
|
||||
size_t word_size,
|
||||
HeapRegion* hr,
|
||||
uint worker_id);
|
||||
|
||||
// Returns true if initialization was successfully completed.
|
||||
bool completed_initialization() const {
|
||||
@ -1227,9 +1188,12 @@ public:
|
||||
_finger = new_finger;
|
||||
}
|
||||
|
||||
CMTask(uint worker_id, ConcurrentMark *cm,
|
||||
size_t* marked_bytes, BitMap* card_bm,
|
||||
CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
|
||||
CMTask(uint worker_id,
|
||||
ConcurrentMark *cm,
|
||||
size_t* marked_bytes,
|
||||
BitMap* card_bm,
|
||||
CMTaskQueue* task_queue,
|
||||
CMTaskQueueSet* task_queues);
|
||||
|
||||
// it prints statistics associated with this task
|
||||
void print_stats();
|
||||
|
@ -86,7 +86,7 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
|
||||
HeapWord* start = mr.start();
|
||||
HeapWord* end = mr.end();
|
||||
size_t region_size_bytes = mr.byte_size();
|
||||
uint index = hr->hrs_index();
|
||||
uint index = hr->hrm_index();
|
||||
|
||||
assert(!hr->continuesHumongous(), "should not be HC region");
|
||||
assert(hr == g1h->heap_region_containing(start), "sanity");
|
||||
@ -125,14 +125,6 @@ inline void ConcurrentMark::count_region(MemRegion mr,
|
||||
count_region(mr, hr, marked_bytes_array, task_card_bm);
|
||||
}
|
||||
|
||||
// Counts the given memory region, which may be a single object, in the
|
||||
// task/worker counting data structures for the given worker id.
|
||||
inline void ConcurrentMark::count_region(MemRegion mr, uint worker_id) {
|
||||
HeapWord* addr = mr.start();
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
|
||||
count_region(mr, hr, worker_id);
|
||||
}
|
||||
|
||||
// Counts the given object in the given task/worker counting data structures.
|
||||
inline void ConcurrentMark::count_object(oop obj,
|
||||
HeapRegion* hr,
|
||||
@ -142,17 +134,6 @@ inline void ConcurrentMark::count_object(oop obj,
|
||||
count_region(mr, hr, marked_bytes_array, task_card_bm);
|
||||
}
|
||||
|
||||
// Counts the given object in the task/worker counting data
|
||||
// structures for the given worker id.
|
||||
inline void ConcurrentMark::count_object(oop obj,
|
||||
HeapRegion* hr,
|
||||
uint worker_id) {
|
||||
size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
|
||||
BitMap* task_card_bm = count_card_bitmap_for(worker_id);
|
||||
HeapWord* addr = (HeapWord*) obj;
|
||||
count_object(obj, hr, marked_bytes_array, task_card_bm);
|
||||
}
|
||||
|
||||
// Attempts to mark the given object and, if successful, counts
|
||||
// the object in the given task/worker counting structures.
|
||||
inline bool ConcurrentMark::par_mark_and_count(oop obj,
|
||||
@ -184,63 +165,6 @@ inline bool ConcurrentMark::par_mark_and_count(oop obj,
|
||||
return false;
|
||||
}
|
||||
|
||||
// Attempts to mark the given object and, if successful, counts
|
||||
// the object in the task/worker counting structures for the
|
||||
// given worker id.
|
||||
inline bool ConcurrentMark::par_mark_and_count(oop obj,
|
||||
HeapRegion* hr,
|
||||
uint worker_id) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_nextMarkBitMap->parMark(addr)) {
|
||||
// Update the task specific count data for the object.
|
||||
count_object(obj, hr, worker_id);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// As above - but we don't know the heap region containing the
|
||||
// object and so have to supply it.
|
||||
inline bool ConcurrentMark::par_mark_and_count(oop obj, uint worker_id) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
|
||||
return par_mark_and_count(obj, hr, worker_id);
|
||||
}
|
||||
|
||||
// Similar to the above routine but we already know the size, in words, of
|
||||
// the object that we wish to mark/count
|
||||
inline bool ConcurrentMark::par_mark_and_count(oop obj,
|
||||
size_t word_size,
|
||||
uint worker_id) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_nextMarkBitMap->parMark(addr)) {
|
||||
// Update the task specific count data for the object.
|
||||
MemRegion mr(addr, word_size);
|
||||
count_region(mr, worker_id);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Unconditionally mark the given object, and unconditionally count
|
||||
// the object in the counting structures for worker id 0.
|
||||
// Should *not* be called from parallel code.
|
||||
inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
_nextMarkBitMap->mark(addr);
|
||||
// Update the task specific count data for the object.
|
||||
count_object(obj, hr, 0 /* worker_id */);
|
||||
return true;
|
||||
}
|
||||
|
||||
// As above - but we don't have the heap region containing the
|
||||
// object, so we have to supply it.
|
||||
inline bool ConcurrentMark::mark_and_count(oop obj) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
|
||||
return mark_and_count(obj, hr);
|
||||
}
|
||||
|
||||
inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
|
||||
HeapWord* start_addr = MAX2(startWord(), mr.start());
|
||||
HeapWord* end_addr = MIN2(endWord(), mr.end());
|
||||
|
@ -30,14 +30,7 @@
|
||||
#include "runtime/java.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
|
||||
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
||||
|
||||
void G1BlockOffsetSharedArrayMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
|
||||
// Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
|
||||
// retrieve it here since this would cause firing of several asserts. The code
|
||||
// executed after commit of a region already needs to do some re-initialization of
|
||||
// the HeapRegion, so we combine that.
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// G1BlockOffsetSharedArray
|
||||
@ -59,10 +52,10 @@ G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpa
|
||||
if (TraceBlockOffsetTable) {
|
||||
gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
|
||||
gclog_or_tty->print_cr(" "
|
||||
" rs.base(): " INTPTR_FORMAT
|
||||
" rs.size(): " INTPTR_FORMAT
|
||||
" rs end(): " INTPTR_FORMAT,
|
||||
bot_reserved.start(), bot_reserved.byte_size(), bot_reserved.end());
|
||||
" rs.base(): " PTR_FORMAT
|
||||
" rs.size(): " SIZE_FORMAT
|
||||
" rs end(): " PTR_FORMAT,
|
||||
p2i(bot_reserved.start()), bot_reserved.byte_size(), p2i(bot_reserved.end()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -72,26 +65,16 @@ bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
|
||||
return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
|
||||
}
|
||||
|
||||
void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
|
||||
set_offset_array(index_for(left), index_for(right -1), offset);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// G1BlockOffsetArray
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
|
||||
MemRegion mr, bool init_to_zero) :
|
||||
MemRegion mr) :
|
||||
G1BlockOffsetTable(mr.start(), mr.end()),
|
||||
_unallocated_block(_bottom),
|
||||
_array(array), _gsp(NULL),
|
||||
_init_to_zero(init_to_zero) {
|
||||
_array(array), _gsp(NULL) {
|
||||
assert(_bottom <= _end, "arguments out of order");
|
||||
if (!_init_to_zero) {
|
||||
// initialize cards to point back to mr.start()
|
||||
set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
|
||||
_array->set_offset_array(0, 0); // set first card to 0
|
||||
}
|
||||
}
|
||||
|
||||
void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) {
|
||||
@ -181,93 +164,6 @@ G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size
|
||||
DEBUG_ONLY(check_all_cards(start_card, end_card);)
|
||||
}
|
||||
|
||||
// The block [blk_start, blk_end) has been allocated;
|
||||
// adjust the block offset table to represent this information;
|
||||
// right-open interval: [blk_start, blk_end)
|
||||
void
|
||||
G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
mark_block(blk_start, blk_end);
|
||||
allocated(blk_start, blk_end);
|
||||
}
|
||||
|
||||
// Adjust BOT to show that a previously whole block has been split
|
||||
// into two.
|
||||
void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size,
|
||||
size_t left_blk_size) {
|
||||
// Verify that the BOT shows [blk, blk + blk_size) to be one block.
|
||||
verify_single_block(blk, blk_size);
|
||||
// Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
|
||||
// is one single block.
|
||||
mark_block(blk + left_blk_size, blk + blk_size);
|
||||
}
|
||||
|
||||
|
||||
// Action_mark - update the BOT for the block [blk_start, blk_end).
|
||||
// Current typical use is for splitting a block.
|
||||
// Action_single - update the BOT for an allocation.
|
||||
// Action_verify - BOT verification.
|
||||
void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
|
||||
HeapWord* blk_end,
|
||||
Action action) {
|
||||
assert(Universe::heap()->is_in_reserved(blk_start),
|
||||
"reference must be into the heap");
|
||||
assert(Universe::heap()->is_in_reserved(blk_end-1),
|
||||
"limit must be within the heap");
|
||||
// This is optimized to make the test fast, assuming we only rarely
|
||||
// cross boundaries.
|
||||
uintptr_t end_ui = (uintptr_t)(blk_end - 1);
|
||||
uintptr_t start_ui = (uintptr_t)blk_start;
|
||||
// Calculate the last card boundary preceding end of blk
|
||||
intptr_t boundary_before_end = (intptr_t)end_ui;
|
||||
clear_bits(boundary_before_end, right_n_bits(LogN));
|
||||
if (start_ui <= (uintptr_t)boundary_before_end) {
|
||||
// blk starts at or crosses a boundary
|
||||
// Calculate index of card on which blk begins
|
||||
size_t start_index = _array->index_for(blk_start);
|
||||
// Index of card on which blk ends
|
||||
size_t end_index = _array->index_for(blk_end - 1);
|
||||
// Start address of card on which blk begins
|
||||
HeapWord* boundary = _array->address_for_index(start_index);
|
||||
assert(boundary <= blk_start, "blk should start at or after boundary");
|
||||
if (blk_start != boundary) {
|
||||
// blk starts strictly after boundary
|
||||
// adjust card boundary and start_index forward to next card
|
||||
boundary += N_words;
|
||||
start_index++;
|
||||
}
|
||||
assert(start_index <= end_index, "monotonicity of index_for()");
|
||||
assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
|
||||
switch (action) {
|
||||
case Action_mark: {
|
||||
if (init_to_zero()) {
|
||||
_array->set_offset_array(start_index, boundary, blk_start);
|
||||
break;
|
||||
} // Else fall through to the next case
|
||||
}
|
||||
case Action_single: {
|
||||
_array->set_offset_array(start_index, boundary, blk_start);
|
||||
// We have finished marking the "offset card". We need to now
|
||||
// mark the subsequent cards that this blk spans.
|
||||
if (start_index < end_index) {
|
||||
HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
|
||||
HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
|
||||
set_remainder_to_point_to_start(rem_st, rem_end);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Action_check: {
|
||||
_array->check_offset_array(start_index, boundary, blk_start);
|
||||
// We have finished checking the "offset card". We need to now
|
||||
// check the subsequent cards that this blk spans.
|
||||
check_all_cards(start_index + 1, end_index);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The card-interval [start_card, end_card] is a closed interval; this
|
||||
// is an expensive check -- use with care and only under protection of
|
||||
// suitable flag.
|
||||
@ -306,25 +202,6 @@ void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) con
|
||||
}
|
||||
}
|
||||
|
||||
// The range [blk_start, blk_end) represents a single contiguous block
|
||||
// of storage; modify the block offset table to represent this
|
||||
// information; Right-open interval: [blk_start, blk_end)
|
||||
// NOTE: this method does _not_ adjust _unallocated_block.
|
||||
void
|
||||
G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
do_block_internal(blk_start, blk_end, Action_single);
|
||||
}
|
||||
|
||||
// Mark the BOT such that if [blk_start, blk_end) straddles a card
|
||||
// boundary, the card following the first such boundary is marked
|
||||
// with the appropriate offset.
|
||||
// NOTE: this method does _not_ adjust _unallocated_block or
|
||||
// any cards subsequent to the first one.
|
||||
void
|
||||
G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
do_block_internal(blk_start, blk_end, Action_mark);
|
||||
}
|
||||
|
||||
HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
|
||||
assert(_bottom <= addr && addr < _end,
|
||||
"addr must be covered by this Array");
|
||||
@ -381,7 +258,7 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
|
||||
assert(next_boundary <= _array->_end,
|
||||
err_msg("next_boundary is beyond the end of the covered region "
|
||||
" next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
|
||||
next_boundary, _array->_end));
|
||||
p2i(next_boundary), p2i(_array->_end)));
|
||||
if (addr >= gsp()->top()) return gsp()->top();
|
||||
while (next_boundary < addr) {
|
||||
while (n <= next_boundary) {
|
||||
@ -397,57 +274,13 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
|
||||
return forward_to_block_containing_addr_const(q, n, addr);
|
||||
}
|
||||
|
||||
HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
|
||||
assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
|
||||
|
||||
assert(_bottom <= addr && addr < _end,
|
||||
"addr must be covered by this Array");
|
||||
// Must read this exactly once because it can be modified by parallel
|
||||
// allocation.
|
||||
HeapWord* ub = _unallocated_block;
|
||||
if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
|
||||
assert(ub < _end, "tautology (see above)");
|
||||
return ub;
|
||||
}
|
||||
|
||||
// Otherwise, find the block start using the table, but taking
|
||||
// care (cf block_start_unsafe() above) not to parse any objects/blocks
|
||||
// on the cards themselves.
|
||||
size_t index = _array->index_for(addr);
|
||||
assert(_array->address_for_index(index) == addr,
|
||||
"arg should be start of card");
|
||||
|
||||
HeapWord* q = (HeapWord*)addr;
|
||||
uint offset;
|
||||
do {
|
||||
offset = _array->offset_array(index--);
|
||||
q -= offset;
|
||||
} while (offset == N_words);
|
||||
assert(q <= addr, "block start should be to left of arg");
|
||||
return q;
|
||||
}
|
||||
|
||||
// Note that the committed size of the covered space may have changed,
|
||||
// so the table size might also wish to change.
|
||||
void G1BlockOffsetArray::resize(size_t new_word_size) {
|
||||
HeapWord* new_end = _bottom + new_word_size;
|
||||
if (_end < new_end && !init_to_zero()) {
|
||||
// verify that the old and new boundaries are also card boundaries
|
||||
assert(_array->is_card_boundary(_end),
|
||||
"_end not a card boundary");
|
||||
assert(_array->is_card_boundary(new_end),
|
||||
"new _end would not be a card boundary");
|
||||
// set all the newly added cards
|
||||
_array->set_offset_array(_end, new_end, N_words);
|
||||
}
|
||||
_end = new_end; // update _end
|
||||
}
|
||||
|
||||
void G1BlockOffsetArray::set_region(MemRegion mr) {
|
||||
_bottom = mr.start();
|
||||
_end = mr.end();
|
||||
}
|
||||
|
||||
//
|
||||
// threshold_
|
||||
// | _index_
|
||||
@ -522,7 +355,7 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
|
||||
"blk_start: " PTR_FORMAT ", "
|
||||
"boundary: " PTR_FORMAT,
|
||||
(uint)_array->offset_array(orig_index),
|
||||
blk_start, boundary));
|
||||
p2i(blk_start), p2i(boundary)));
|
||||
for (size_t j = orig_index + 1; j <= end_index; j++) {
|
||||
assert(_array->offset_array(j) > 0 &&
|
||||
_array->offset_array(j) <=
|
||||
@ -556,9 +389,9 @@ G1BlockOffsetArray::verify_for_object(HeapWord* obj_start,
|
||||
"card addr: "PTR_FORMAT" BOT entry: %u "
|
||||
"obj: "PTR_FORMAT" word size: "SIZE_FORMAT" "
|
||||
"cards: ["SIZE_FORMAT","SIZE_FORMAT"]",
|
||||
block_start, card, card_addr,
|
||||
p2i(block_start), card, p2i(card_addr),
|
||||
_array->offset_array(card),
|
||||
obj_start, word_size, first_card, last_card);
|
||||
p2i(obj_start), word_size, first_card, last_card);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -572,10 +405,10 @@ G1BlockOffsetArray::print_on(outputStream* out) {
|
||||
size_t to_index = _array->index_for(_end);
|
||||
out->print_cr(">> BOT for area ["PTR_FORMAT","PTR_FORMAT") "
|
||||
"cards ["SIZE_FORMAT","SIZE_FORMAT")",
|
||||
_bottom, _end, from_index, to_index);
|
||||
p2i(_bottom), p2i(_end), from_index, to_index);
|
||||
for (size_t i = from_index; i < to_index; ++i) {
|
||||
out->print_cr(" entry "SIZE_FORMAT_W(8)" | "PTR_FORMAT" : %3u",
|
||||
i, _array->address_for_index(i),
|
||||
i, p2i(_array->address_for_index(i)),
|
||||
(uint) _array->offset_array(i));
|
||||
}
|
||||
}
|
||||
@ -606,7 +439,7 @@ block_start_unsafe_const(const void* addr) const {
|
||||
G1BlockOffsetArrayContigSpace::
|
||||
G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
|
||||
MemRegion mr) :
|
||||
G1BlockOffsetArray(array, mr, true)
|
||||
G1BlockOffsetArray(array, mr)
|
||||
{
|
||||
_next_offset_threshold = NULL;
|
||||
_next_offset_index = 0;
|
||||
@ -641,15 +474,6 @@ HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
|
||||
return _next_offset_threshold;
|
||||
}
|
||||
|
||||
void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
|
||||
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
|
||||
"just checking");
|
||||
size_t bottom_index = _array->index_for(_bottom);
|
||||
assert(_array->address_for_index(bottom_index) == _bottom,
|
||||
"Precondition of call");
|
||||
_array->set_offset_array(bottom_index, 0);
|
||||
}
|
||||
|
||||
void
|
||||
G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
|
||||
assert(new_top <= _end, "_end should have already been updated");
|
||||
@ -663,7 +487,7 @@ G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
|
||||
void
|
||||
G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
|
||||
G1BlockOffsetArray::print_on(out);
|
||||
out->print_cr(" next offset threshold: "PTR_FORMAT, _next_offset_threshold);
|
||||
out->print_cr(" next offset threshold: "PTR_FORMAT, p2i(_next_offset_threshold));
|
||||
out->print_cr(" next offset index: "SIZE_FORMAT, _next_offset_index);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
@ -109,7 +109,12 @@ public:
|
||||
|
||||
class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
|
||||
public:
|
||||
virtual void on_commit(uint start_idx, size_t num_regions);
|
||||
virtual void on_commit(uint start_idx, size_t num_regions) {
|
||||
// Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
|
||||
// retrieve it here since this would cause firing of several asserts. The code
|
||||
// executed after commit of a region already needs to do some re-initialization of
|
||||
// the HeapRegion, so we combine that.
|
||||
}
|
||||
};
|
||||
|
||||
// This implementation of "G1BlockOffsetTable" divides the covered region
|
||||
@ -153,8 +158,6 @@ private:
|
||||
// For performance these have to devolve to array accesses in product builds.
|
||||
inline u_char offset_array(size_t index) const;
|
||||
|
||||
void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
|
||||
|
||||
void set_offset_array_raw(size_t index, u_char offset) {
|
||||
_offset_array[index] = offset;
|
||||
}
|
||||
@ -165,8 +168,6 @@ private:
|
||||
|
||||
inline void set_offset_array(size_t left, size_t right, u_char offset);
|
||||
|
||||
inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const;
|
||||
|
||||
bool is_card_boundary(HeapWord* p) const;
|
||||
|
||||
public:
|
||||
@ -193,8 +194,6 @@ public:
|
||||
// G1BlockOffsetTable(s) to initialize cards.
|
||||
G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
|
||||
|
||||
void set_bottom(HeapWord* new_bottom);
|
||||
|
||||
// Return the appropriate index into "_offset_array" for "p".
|
||||
inline size_t index_for(const void* p) const;
|
||||
inline size_t index_for_raw(const void* p) const;
|
||||
@ -220,14 +219,6 @@ private:
|
||||
LogN = G1BlockOffsetSharedArray::LogN
|
||||
};
|
||||
|
||||
// The following enums are used by do_block_helper
|
||||
enum Action {
|
||||
Action_single, // BOT records a single block (see single_block())
|
||||
Action_mark, // BOT marks the start of a block (see mark_block())
|
||||
Action_check // Check that BOT records block correctly
|
||||
// (see verify_single_block()).
|
||||
};
|
||||
|
||||
// This is the array, which can be shared by several BlockOffsetArray's
|
||||
// servicing different
|
||||
G1BlockOffsetSharedArray* _array;
|
||||
@ -235,10 +226,6 @@ private:
|
||||
// The space that owns this subregion.
|
||||
G1OffsetTableContigSpace* _gsp;
|
||||
|
||||
// If true, array entries are initialized to 0; otherwise, they are
|
||||
// initialized to point backwards to the beginning of the covered region.
|
||||
bool _init_to_zero;
|
||||
|
||||
// The portion [_unallocated_block, _sp.end()) of the space that
|
||||
// is a single block known not to contain any objects.
|
||||
// NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
|
||||
@ -253,9 +240,6 @@ private:
|
||||
// that is closed: [start_index, end_index]
|
||||
void set_remainder_to_point_to_start_incl(size_t start, size_t end);
|
||||
|
||||
// A helper function for BOT adjustment/verification work
|
||||
void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action);
|
||||
|
||||
protected:
|
||||
|
||||
G1OffsetTableContigSpace* gsp() const { return _gsp; }
|
||||
@ -303,11 +287,9 @@ protected:
|
||||
|
||||
public:
|
||||
// The space may not have it's bottom and top set yet, which is why the
|
||||
// region is passed as a parameter. If "init_to_zero" is true, the
|
||||
// elements of the array are initialized to zero. Otherwise, they are
|
||||
// initialized to point backwards to the beginning.
|
||||
G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr,
|
||||
bool init_to_zero);
|
||||
// region is passed as a parameter. The elements of the array are
|
||||
// initialized to zero.
|
||||
G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr);
|
||||
|
||||
// Note: this ought to be part of the constructor, but that would require
|
||||
// "this" to be passed as a parameter to a member constructor for
|
||||
@ -315,114 +297,19 @@ public:
|
||||
// This would be legal C++, but MS VC++ doesn't allow it.
|
||||
void set_space(G1OffsetTableContigSpace* sp);
|
||||
|
||||
// Resets the covered region to the given "mr".
|
||||
void set_region(MemRegion mr);
|
||||
|
||||
// Resets the covered region to one with the same _bottom as before but
|
||||
// the "new_word_size".
|
||||
void resize(size_t new_word_size);
|
||||
|
||||
// These must be guaranteed to work properly (i.e., do nothing)
|
||||
// when "blk_start" ("blk" for second version) is "NULL".
|
||||
virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
|
||||
virtual void alloc_block(HeapWord* blk, size_t size) {
|
||||
alloc_block(blk, blk + size);
|
||||
}
|
||||
|
||||
// The following methods are useful and optimized for a
|
||||
// general, non-contiguous space.
|
||||
|
||||
// Given a block [blk_start, blk_start + full_blk_size), and
|
||||
// a left_blk_size < full_blk_size, adjust the BOT to show two
|
||||
// blocks [blk_start, blk_start + left_blk_size) and
|
||||
// [blk_start + left_blk_size, blk_start + full_blk_size).
|
||||
// It is assumed (and verified in the non-product VM) that the
|
||||
// BOT was correct for the original block.
|
||||
void split_block(HeapWord* blk_start, size_t full_blk_size,
|
||||
size_t left_blk_size);
|
||||
|
||||
// Adjust the BOT to show that it has a single block in the
|
||||
// range [blk_start, blk_start + size). All necessary BOT
|
||||
// cards are adjusted, but _unallocated_block isn't.
|
||||
void single_block(HeapWord* blk_start, HeapWord* blk_end);
|
||||
void single_block(HeapWord* blk, size_t size) {
|
||||
single_block(blk, blk + size);
|
||||
}
|
||||
|
||||
// Adjust BOT to show that it has a block in the range
|
||||
// [blk_start, blk_start + size). Only the first card
|
||||
// of BOT is touched. It is assumed (and verified in the
|
||||
// non-product VM) that the remaining cards of the block
|
||||
// are correct.
|
||||
void mark_block(HeapWord* blk_start, HeapWord* blk_end);
|
||||
void mark_block(HeapWord* blk, size_t size) {
|
||||
mark_block(blk, blk + size);
|
||||
}
|
||||
|
||||
// Adjust _unallocated_block to indicate that a particular
|
||||
// block has been newly allocated or freed. It is assumed (and
|
||||
// verified in the non-product VM) that the BOT is correct for
|
||||
// the given block.
|
||||
inline void allocated(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
// Verify that the BOT shows [blk, blk + blk_size) to be one block.
|
||||
verify_single_block(blk_start, blk_end);
|
||||
if (BlockOffsetArrayUseUnallocatedBlock) {
|
||||
_unallocated_block = MAX2(_unallocated_block, blk_end);
|
||||
}
|
||||
}
|
||||
|
||||
inline void allocated(HeapWord* blk, size_t size) {
|
||||
allocated(blk, blk + size);
|
||||
}
|
||||
|
||||
inline void freed(HeapWord* blk_start, HeapWord* blk_end);
|
||||
|
||||
inline void freed(HeapWord* blk, size_t size);
|
||||
|
||||
virtual HeapWord* block_start_unsafe(const void* addr);
|
||||
virtual HeapWord* block_start_unsafe_const(const void* addr) const;
|
||||
|
||||
// Requires "addr" to be the start of a card and returns the
|
||||
// start of the block that contains the given address.
|
||||
HeapWord* block_start_careful(const void* addr) const;
|
||||
|
||||
// If true, initialize array slots with no allocated blocks to zero.
|
||||
// Otherwise, make them point back to the front.
|
||||
bool init_to_zero() { return _init_to_zero; }
|
||||
|
||||
// Verification & debugging - ensure that the offset table reflects the fact
|
||||
// that the block [blk_start, blk_end) or [blk, blk + size) is a
|
||||
// single block of storage. NOTE: can;t const this because of
|
||||
// call to non-const do_block_internal() below.
|
||||
inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
if (VerifyBlockOffsetArray) {
|
||||
do_block_internal(blk_start, blk_end, Action_check);
|
||||
}
|
||||
}
|
||||
|
||||
inline void verify_single_block(HeapWord* blk, size_t size) {
|
||||
verify_single_block(blk, blk + size);
|
||||
}
|
||||
|
||||
// Used by region verification. Checks that the contents of the
|
||||
// BOT reflect that there's a single object that spans the address
|
||||
// range [obj_start, obj_start + word_size); returns true if this is
|
||||
// the case, returns false if it's not.
|
||||
bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
|
||||
|
||||
// Verify that the given block is before _unallocated_block
|
||||
inline void verify_not_unallocated(HeapWord* blk_start,
|
||||
HeapWord* blk_end) const {
|
||||
if (BlockOffsetArrayUseUnallocatedBlock) {
|
||||
assert(blk_start < blk_end, "Block inconsistency?");
|
||||
assert(blk_end <= _unallocated_block, "_unallocated_block problem");
|
||||
}
|
||||
}
|
||||
|
||||
inline void verify_not_unallocated(HeapWord* blk, size_t size) const {
|
||||
verify_not_unallocated(blk, blk + size);
|
||||
}
|
||||
|
||||
void check_all_cards(size_t left_card, size_t right_card) const;
|
||||
|
||||
virtual void print_on(outputStream* out) PRODUCT_RETURN;
|
||||
@ -445,14 +332,12 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
|
||||
blk_start, blk_end);
|
||||
}
|
||||
|
||||
// Variant of zero_bottom_entry that does not check for availability of the
|
||||
// Zero out the entry for _bottom (offset will be zero). Does not check for availability of the
|
||||
// memory first.
|
||||
void zero_bottom_entry_raw();
|
||||
// Variant of initialize_threshold that does not check for availability of the
|
||||
// memory first.
|
||||
HeapWord* initialize_threshold_raw();
|
||||
// Zero out the entry for _bottom (offset will be zero).
|
||||
void zero_bottom_entry();
|
||||
public:
|
||||
G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
|
||||
|
||||
|
@ -91,13 +91,6 @@ void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_cha
|
||||
}
|
||||
}
|
||||
|
||||
void G1BlockOffsetSharedArray::check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
|
||||
check_index(index, "index out of range");
|
||||
assert(high >= low, "addresses out of order");
|
||||
check_offset(pointer_delta(high, low), "offset too large");
|
||||
assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
|
||||
}
|
||||
|
||||
// Variant of index_for that does not check the index for validity.
|
||||
inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const {
|
||||
return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN;
|
||||
@ -193,28 +186,4 @@ G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
|
||||
return q;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// BlockOffsetArrayNonContigSpace inlines
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
// Verify that the BOT shows [blk_start, blk_end) to be one block.
|
||||
verify_single_block(blk_start, blk_end);
|
||||
// adjust _unallocated_block upward or downward
|
||||
// as appropriate
|
||||
if (BlockOffsetArrayUseUnallocatedBlock) {
|
||||
assert(_unallocated_block <= _end,
|
||||
"Inconsistent value for _unallocated_block");
|
||||
if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) {
|
||||
// CMS-specific note: a block abutting _unallocated_block to
|
||||
// its left is being freed, a new block is being added or
|
||||
// we are resetting following a compaction
|
||||
_unallocated_block = blk_start;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void G1BlockOffsetArray::freed(HeapWord* blk, size_t size) {
|
||||
freed(blk, blk + size);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
|
||||
|
@ -532,9 +532,9 @@ G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
|
||||
// again to allocate from it.
|
||||
append_secondary_free_list();
|
||||
|
||||
assert(_hrs.num_free_regions() > 0, "if the secondary_free_list was not "
|
||||
assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
|
||||
"empty we should have moved at least one entry to the free_list");
|
||||
HeapRegion* res = _hrs.allocate_free_region(is_old);
|
||||
HeapRegion* res = _hrm.allocate_free_region(is_old);
|
||||
if (G1ConcRegionFreeingVerbose) {
|
||||
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
|
||||
"allocated "HR_FORMAT" from secondary_free_list",
|
||||
@ -575,7 +575,7 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_e
|
||||
}
|
||||
}
|
||||
|
||||
res = _hrs.allocate_free_region(is_old);
|
||||
res = _hrm.allocate_free_region(is_old);
|
||||
|
||||
if (res == NULL) {
|
||||
if (G1ConcRegionFreeingVerbose) {
|
||||
@ -601,7 +601,7 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_e
|
||||
// always expand the heap by an amount aligned to the heap
|
||||
// region size, the free list should in theory not be empty.
|
||||
// In either case allocate_free_region() will check for NULL.
|
||||
res = _hrs.allocate_free_region(is_old);
|
||||
res = _hrm.allocate_free_region(is_old);
|
||||
} else {
|
||||
_expand_heap_after_alloc_failure = false;
|
||||
}
|
||||
@ -613,7 +613,7 @@ HeapWord*
|
||||
G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
||||
uint num_regions,
|
||||
size_t word_size) {
|
||||
assert(first != G1_NO_HRS_INDEX, "pre-condition");
|
||||
assert(first != G1_NO_HRM_INDEX, "pre-condition");
|
||||
assert(isHumongous(word_size), "word_size should be humongous");
|
||||
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
|
||||
|
||||
@ -751,7 +751,7 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||
|
||||
verify_region_sets_optional();
|
||||
|
||||
uint first = G1_NO_HRS_INDEX;
|
||||
uint first = G1_NO_HRM_INDEX;
|
||||
uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
|
||||
|
||||
if (obj_regions == 1) {
|
||||
@ -760,7 +760,7 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||
// later.
|
||||
HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
|
||||
if (hr != NULL) {
|
||||
first = hr->hrs_index();
|
||||
first = hr->hrm_index();
|
||||
}
|
||||
} else {
|
||||
// We can't allocate humongous regions spanning more than one region while
|
||||
@ -776,18 +776,18 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||
|
||||
// Policy: Try only empty regions (i.e. already committed first). Maybe we
|
||||
// are lucky enough to find some.
|
||||
first = _hrs.find_contiguous_only_empty(obj_regions);
|
||||
if (first != G1_NO_HRS_INDEX) {
|
||||
_hrs.allocate_free_regions_starting_at(first, obj_regions);
|
||||
first = _hrm.find_contiguous_only_empty(obj_regions);
|
||||
if (first != G1_NO_HRM_INDEX) {
|
||||
_hrm.allocate_free_regions_starting_at(first, obj_regions);
|
||||
}
|
||||
}
|
||||
|
||||
if (first == G1_NO_HRS_INDEX) {
|
||||
if (first == G1_NO_HRM_INDEX) {
|
||||
// Policy: We could not find enough regions for the humongous object in the
|
||||
// free list. Look through the heap to find a mix of free and uncommitted regions.
|
||||
// If so, try expansion.
|
||||
first = _hrs.find_contiguous_empty_or_unavailable(obj_regions);
|
||||
if (first != G1_NO_HRS_INDEX) {
|
||||
first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
|
||||
if (first != G1_NO_HRM_INDEX) {
|
||||
// We found something. Make sure these regions are committed, i.e. expand
|
||||
// the heap. Alternatively we could do a defragmentation GC.
|
||||
ergo_verbose1(ErgoHeapSizing,
|
||||
@ -796,7 +796,7 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||
ergo_format_byte("allocation request"),
|
||||
word_size * HeapWordSize);
|
||||
|
||||
_hrs.expand_at(first, obj_regions);
|
||||
_hrm.expand_at(first, obj_regions);
|
||||
g1_policy()->record_new_heap_size(num_regions());
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -806,14 +806,14 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||
assert(is_on_master_free_list(hr), "sanity");
|
||||
}
|
||||
#endif
|
||||
_hrs.allocate_free_regions_starting_at(first, obj_regions);
|
||||
_hrm.allocate_free_regions_starting_at(first, obj_regions);
|
||||
} else {
|
||||
// Policy: Potentially trigger a defragmentation GC.
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* result = NULL;
|
||||
if (first != G1_NO_HRS_INDEX) {
|
||||
if (first != G1_NO_HRM_INDEX) {
|
||||
result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
|
||||
assert(result != NULL, "it should always return a valid result");
|
||||
|
||||
@ -1248,7 +1248,7 @@ public:
|
||||
: _hr_printer(hr_printer) { }
|
||||
};
|
||||
|
||||
void G1CollectedHeap::print_hrs_post_compaction() {
|
||||
void G1CollectedHeap::print_hrm_post_compaction() {
|
||||
PostCompactionPrinterClosure cl(hr_printer());
|
||||
heap_region_iterate(&cl);
|
||||
}
|
||||
@ -1417,7 +1417,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
// that all the COMMIT / UNCOMMIT events are generated before
|
||||
// the end GC event.
|
||||
|
||||
print_hrs_post_compaction();
|
||||
print_hrm_post_compaction();
|
||||
_hr_printer.end_gc(true /* full */, (size_t) total_collections());
|
||||
}
|
||||
|
||||
@ -1490,7 +1490,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
// Update the number of full collections that have been completed.
|
||||
increment_old_marking_cycles_completed(false /* concurrent */);
|
||||
|
||||
_hrs.verify_optional();
|
||||
_hrm.verify_optional();
|
||||
verify_region_sets_optional();
|
||||
|
||||
verify_after_gc();
|
||||
@ -1734,7 +1734,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
|
||||
ergo_format_byte("allocation request"),
|
||||
word_size * HeapWordSize);
|
||||
if (expand(expand_bytes)) {
|
||||
_hrs.verify_optional();
|
||||
_hrm.verify_optional();
|
||||
verify_region_sets_optional();
|
||||
return attempt_allocation_at_safepoint(word_size,
|
||||
false /* expect_null_mutator_alloc_region */);
|
||||
@ -1762,7 +1762,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
|
||||
uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
|
||||
assert(regions_to_expand > 0, "Must expand by at least one region");
|
||||
|
||||
uint expanded_by = _hrs.expand_by(regions_to_expand);
|
||||
uint expanded_by = _hrm.expand_by(regions_to_expand);
|
||||
|
||||
if (expanded_by > 0) {
|
||||
size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
|
||||
@ -1775,7 +1775,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
|
||||
// The expansion of the virtual storage space was unsuccessful.
|
||||
// Let's see if it was because we ran out of swap.
|
||||
if (G1ExitOnExpansionFailure &&
|
||||
_hrs.available() >= regions_to_expand) {
|
||||
_hrm.available() >= regions_to_expand) {
|
||||
// We had head room...
|
||||
vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
|
||||
}
|
||||
@ -1790,7 +1790,7 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
|
||||
HeapRegion::GrainBytes);
|
||||
uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
|
||||
|
||||
uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
|
||||
uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
|
||||
size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
|
||||
|
||||
ergo_verbose3(ErgoHeapSizing,
|
||||
@ -1823,7 +1823,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
|
||||
shrink_helper(shrink_bytes);
|
||||
rebuild_region_sets(true /* free_list_only */);
|
||||
|
||||
_hrs.verify_optional();
|
||||
_hrm.verify_optional();
|
||||
verify_region_sets_optional();
|
||||
}
|
||||
|
||||
@ -1867,6 +1867,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_old_marking_cycles_started(0),
|
||||
_old_marking_cycles_completed(0),
|
||||
_concurrent_cycle_started(false),
|
||||
_heap_summary_sent(false),
|
||||
_in_cset_fast_test(),
|
||||
_dirty_cards_region_list(NULL),
|
||||
_worker_cset_start_region(NULL),
|
||||
@ -2032,7 +2033,7 @@ jint G1CollectedHeap::initialize() {
|
||||
CMBitMap::mark_distance(),
|
||||
mtGC);
|
||||
|
||||
_hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
|
||||
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
|
||||
g1_barrier_set()->initialize(cardtable_storage);
|
||||
// Do later initialization work for concurrent refinement.
|
||||
_cg1r->init(card_counts_storage);
|
||||
@ -2053,8 +2054,8 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
_g1h = this;
|
||||
|
||||
_in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
|
||||
_humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
|
||||
_in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
|
||||
_humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
|
||||
|
||||
// Create the ConcurrentMark data structure and thread.
|
||||
// (Must do this late, so that "max_regions" is defined.)
|
||||
@ -2115,7 +2116,7 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
// Here we allocate the dummy HeapRegion that is required by the
|
||||
// G1AllocRegion class.
|
||||
HeapRegion* dummy_region = _hrs.get_dummy_region();
|
||||
HeapRegion* dummy_region = _hrm.get_dummy_region();
|
||||
|
||||
// We'll re-use the same region whether the alloc region will
|
||||
// require BOT updates or not and, if it doesn't, then a non-young
|
||||
@ -2232,14 +2233,14 @@ void G1CollectedHeap::ref_processing_init() {
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::capacity() const {
|
||||
return _hrs.length() * HeapRegion::GrainBytes;
|
||||
return _hrm.length() * HeapRegion::GrainBytes;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
|
||||
assert(!hr->continuesHumongous(), "pre-condition");
|
||||
hr->reset_gc_time_stamp();
|
||||
if (hr->startsHumongous()) {
|
||||
uint first_index = hr->hrs_index() + 1;
|
||||
uint first_index = hr->hrm_index() + 1;
|
||||
uint last_index = hr->last_hc_index();
|
||||
for (uint i = first_index; i < last_index; i += 1) {
|
||||
HeapRegion* chr = region_at(i);
|
||||
@ -2445,13 +2446,24 @@ void G1CollectedHeap::register_concurrent_cycle_end() {
|
||||
_gc_timer_cm->register_gc_end();
|
||||
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
|
||||
|
||||
// Clear state variables to prepare for the next concurrent cycle.
|
||||
_concurrent_cycle_started = false;
|
||||
_heap_summary_sent = false;
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
|
||||
if (_concurrent_cycle_started) {
|
||||
trace_heap_after_gc(_gc_tracer_cm);
|
||||
// This function can be called when:
|
||||
// the cleanup pause is run
|
||||
// the concurrent cycle is aborted before the cleanup pause.
|
||||
// the concurrent cycle is aborted after the cleanup pause,
|
||||
// but before the concurrent cycle end has been registered.
|
||||
// Make sure that we only send the heap information once.
|
||||
if (!_heap_summary_sent) {
|
||||
trace_heap_after_gc(_gc_tracer_cm);
|
||||
_heap_summary_sent = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2537,7 +2549,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::is_in(const void* p) const {
|
||||
if (_hrs.reserved().contains(p)) {
|
||||
if (_hrm.reserved().contains(p)) {
|
||||
// Given that we know that p is in the reserved space,
|
||||
// heap_region_containing_raw() should successfully
|
||||
// return the containing region.
|
||||
@ -2551,7 +2563,7 @@ bool G1CollectedHeap::is_in(const void* p) const {
|
||||
#ifdef ASSERT
|
||||
bool G1CollectedHeap::is_in_exact(const void* p) const {
|
||||
bool contains = reserved_region().contains(p);
|
||||
bool available = _hrs.is_available(addr_to_region((HeapWord*)p));
|
||||
bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
|
||||
if (contains && available) {
|
||||
return true;
|
||||
} else {
|
||||
@ -2618,7 +2630,7 @@ void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
|
||||
_hrs.iterate(cl);
|
||||
_hrm.iterate(cl);
|
||||
}
|
||||
|
||||
void
|
||||
@ -2626,7 +2638,7 @@ G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
|
||||
uint worker_id,
|
||||
uint num_workers,
|
||||
jint claim_value) const {
|
||||
_hrs.par_iterate(cl, worker_id, num_workers, claim_value);
|
||||
_hrm.par_iterate(cl, worker_id, num_workers, claim_value);
|
||||
}
|
||||
|
||||
class ResetClaimValuesClosure: public HeapRegionClosure {
|
||||
@ -2846,9 +2858,9 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
|
||||
}
|
||||
|
||||
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
|
||||
HeapRegion* result = _hrs.next_region_in_heap(from);
|
||||
HeapRegion* result = _hrm.next_region_in_heap(from);
|
||||
while (result != NULL && result->isHumongous()) {
|
||||
result = _hrs.next_region_in_heap(result);
|
||||
result = _hrm.next_region_in_heap(result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -2908,7 +2920,7 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::max_capacity() const {
|
||||
return _hrs.reserved().byte_size();
|
||||
return _hrm.reserved().byte_size();
|
||||
}
|
||||
|
||||
jlong G1CollectedHeap::millis_since_last_gc() {
|
||||
@ -3437,9 +3449,9 @@ void G1CollectedHeap::print_on(outputStream* st) const {
|
||||
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
|
||||
capacity()/K, used_unlocked()/K);
|
||||
st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
|
||||
_hrs.reserved().start(),
|
||||
_hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords,
|
||||
_hrs.reserved().end());
|
||||
_hrm.reserved().start(),
|
||||
_hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords,
|
||||
_hrm.reserved().end());
|
||||
st->cr();
|
||||
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
|
||||
uint young_regions = _young_list->length();
|
||||
@ -3682,7 +3694,7 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
|
||||
}
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
uint region_idx = r->hrs_index();
|
||||
uint region_idx = r->hrm_index();
|
||||
bool is_candidate = !g1h->humongous_region_is_always_live(region_idx);
|
||||
// Is_candidate already filters out humongous regions with some remembered set.
|
||||
// This will not lead to humongous object that we mistakenly keep alive because
|
||||
@ -4205,7 +4217,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
// output from the concurrent mark thread interfering with this
|
||||
// logging output either.
|
||||
|
||||
_hrs.verify_optional();
|
||||
_hrm.verify_optional();
|
||||
verify_region_sets_optional();
|
||||
|
||||
TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
|
||||
@ -6024,7 +6036,7 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
|
||||
bool locked) {
|
||||
assert(!hr->isHumongous(), "this is only for non-humongous regions");
|
||||
assert(!hr->is_empty(), "the region should not be empty");
|
||||
assert(_hrs.is_available(hr->hrs_index()), "region should be committed");
|
||||
assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
|
||||
assert(free_list != NULL, "pre-condition");
|
||||
|
||||
if (G1VerifyBitmaps) {
|
||||
@ -6055,7 +6067,7 @@ void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
|
||||
hr->set_notHumongous();
|
||||
free_region(hr, free_list, par);
|
||||
|
||||
uint i = hr->hrs_index() + 1;
|
||||
uint i = hr->hrm_index() + 1;
|
||||
while (i < last_index) {
|
||||
HeapRegion* curr_hr = region_at(i);
|
||||
assert(curr_hr->continuesHumongous(), "invariant");
|
||||
@ -6079,7 +6091,7 @@ void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
|
||||
assert(list != NULL, "list can't be null");
|
||||
if (!list->is_empty()) {
|
||||
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
_hrs.insert_list_into_free_list(list);
|
||||
_hrm.insert_list_into_free_list(list);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6448,7 +6460,7 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
||||
// While this cleanup is not strictly necessary to be done (or done instantly),
|
||||
// given that their occurrence is very low, this saves us this additional
|
||||
// complexity.
|
||||
uint region_idx = r->hrs_index();
|
||||
uint region_idx = r->hrm_index();
|
||||
if (g1h->humongous_is_live(region_idx) ||
|
||||
g1h->humongous_region_is_always_live(region_idx)) {
|
||||
|
||||
@ -6687,22 +6699,22 @@ void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
|
||||
// this is that during a full GC string deduplication needs to know if
|
||||
// a collected region was young or old when the full GC was initiated.
|
||||
}
|
||||
_hrs.remove_all_free_regions();
|
||||
_hrm.remove_all_free_regions();
|
||||
}
|
||||
|
||||
class RebuildRegionSetsClosure : public HeapRegionClosure {
|
||||
private:
|
||||
bool _free_list_only;
|
||||
HeapRegionSet* _old_set;
|
||||
HeapRegionSeq* _hrs;
|
||||
HeapRegionManager* _hrm;
|
||||
size_t _total_used;
|
||||
|
||||
public:
|
||||
RebuildRegionSetsClosure(bool free_list_only,
|
||||
HeapRegionSet* old_set, HeapRegionSeq* hrs) :
|
||||
HeapRegionSet* old_set, HeapRegionManager* hrm) :
|
||||
_free_list_only(free_list_only),
|
||||
_old_set(old_set), _hrs(hrs), _total_used(0) {
|
||||
assert(_hrs->num_free_regions() == 0, "pre-condition");
|
||||
_old_set(old_set), _hrm(hrm), _total_used(0) {
|
||||
assert(_hrm->num_free_regions() == 0, "pre-condition");
|
||||
if (!free_list_only) {
|
||||
assert(_old_set->is_empty(), "pre-condition");
|
||||
}
|
||||
@ -6715,7 +6727,7 @@ public:
|
||||
|
||||
if (r->is_empty()) {
|
||||
// Add free regions to the free list
|
||||
_hrs->insert_into_free_list(r);
|
||||
_hrm->insert_into_free_list(r);
|
||||
} else if (!_free_list_only) {
|
||||
assert(!r->is_young(), "we should not come across young regions");
|
||||
|
||||
@ -6743,7 +6755,7 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
|
||||
_young_list->empty_list();
|
||||
}
|
||||
|
||||
RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrs);
|
||||
RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
|
||||
heap_region_iterate(&cl);
|
||||
|
||||
if (!free_list_only) {
|
||||
@ -6933,7 +6945,7 @@ class VerifyRegionListsClosure : public HeapRegionClosure {
|
||||
private:
|
||||
HeapRegionSet* _old_set;
|
||||
HeapRegionSet* _humongous_set;
|
||||
HeapRegionSeq* _hrs;
|
||||
HeapRegionManager* _hrm;
|
||||
|
||||
public:
|
||||
HeapRegionSetCount _old_count;
|
||||
@ -6942,8 +6954,8 @@ public:
|
||||
|
||||
VerifyRegionListsClosure(HeapRegionSet* old_set,
|
||||
HeapRegionSet* humongous_set,
|
||||
HeapRegionSeq* hrs) :
|
||||
_old_set(old_set), _humongous_set(humongous_set), _hrs(hrs),
|
||||
HeapRegionManager* hrm) :
|
||||
_old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
|
||||
_old_count(), _humongous_count(), _free_count(){ }
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
@ -6954,19 +6966,19 @@ public:
|
||||
if (hr->is_young()) {
|
||||
// TODO
|
||||
} else if (hr->startsHumongous()) {
|
||||
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index()));
|
||||
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
|
||||
_humongous_count.increment(1u, hr->capacity());
|
||||
} else if (hr->is_empty()) {
|
||||
assert(_hrs->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
|
||||
assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
|
||||
_free_count.increment(1u, hr->capacity());
|
||||
} else {
|
||||
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index()));
|
||||
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
|
||||
_old_count.increment(1u, hr->capacity());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionSeq* free_list) {
|
||||
void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
|
||||
guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
|
||||
guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
|
||||
old_set->total_capacity_bytes(), _old_count.capacity()));
|
||||
@ -6985,7 +6997,7 @@ void G1CollectedHeap::verify_region_sets() {
|
||||
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
|
||||
|
||||
// First, check the explicit lists.
|
||||
_hrs.verify();
|
||||
_hrm.verify();
|
||||
{
|
||||
// Given that a concurrent operation might be adding regions to
|
||||
// the secondary free list we have to take the lock before
|
||||
@ -7016,9 +7028,9 @@ void G1CollectedHeap::verify_region_sets() {
|
||||
// Finally, make sure that the region accounting in the lists is
|
||||
// consistent with what we see in the heap.
|
||||
|
||||
VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrs);
|
||||
VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
|
||||
heap_region_iterate(&cl);
|
||||
cl.verify_counts(&_old_set, &_humongous_set, &_hrs);
|
||||
cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
|
||||
}
|
||||
|
||||
// Optimized nmethod scanning
|
||||
|
@ -33,7 +33,7 @@
|
||||
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/g1/g1YCTypes.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSet.hpp"
|
||||
#include "gc_implementation/shared/hSpaceCounters.hpp"
|
||||
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
|
||||
@ -291,7 +291,7 @@ private:
|
||||
G1RegionMappingChangedListener _listener;
|
||||
|
||||
// The sequence of all heap regions in the heap.
|
||||
HeapRegionSeq _hrs;
|
||||
HeapRegionManager _hrm;
|
||||
|
||||
// Alloc region used to satisfy mutator allocation requests.
|
||||
MutatorAllocRegion _mutator_alloc_region;
|
||||
@ -415,6 +415,7 @@ private:
|
||||
volatile unsigned int _old_marking_cycles_completed;
|
||||
|
||||
bool _concurrent_cycle_started;
|
||||
bool _heap_summary_sent;
|
||||
|
||||
// This is a non-product method that is helpful for testing. It is
|
||||
// called at the end of a GC and artificially expands the heap by
|
||||
@ -429,7 +430,7 @@ private:
|
||||
|
||||
// If the HR printer is active, dump the state of the regions in the
|
||||
// heap after a compaction.
|
||||
void print_hrs_post_compaction();
|
||||
void print_hrm_post_compaction();
|
||||
|
||||
double verify(bool guard, const char* msg);
|
||||
void verify_before_gc();
|
||||
@ -715,7 +716,7 @@ public:
|
||||
// We register a region with the fast "in collection set" test. We
|
||||
// simply set to true the array slot corresponding to this region.
|
||||
void register_region_with_in_cset_fast_test(HeapRegion* r) {
|
||||
_in_cset_fast_test.set_in_cset(r->hrs_index());
|
||||
_in_cset_fast_test.set_in_cset(r->hrm_index());
|
||||
}
|
||||
|
||||
// This is a fast test on whether a reference points into the
|
||||
@ -1171,17 +1172,17 @@ public:
|
||||
// But G1CollectedHeap doesn't yet support this.
|
||||
|
||||
virtual bool is_maximal_no_gc() const {
|
||||
return _hrs.available() == 0;
|
||||
return _hrm.available() == 0;
|
||||
}
|
||||
|
||||
// The current number of regions in the heap.
|
||||
uint num_regions() const { return _hrs.length(); }
|
||||
uint num_regions() const { return _hrm.length(); }
|
||||
|
||||
// The max number of regions in the heap.
|
||||
uint max_regions() const { return _hrs.max_length(); }
|
||||
uint max_regions() const { return _hrm.max_length(); }
|
||||
|
||||
// The number of regions that are completely free.
|
||||
uint num_free_regions() const { return _hrs.num_free_regions(); }
|
||||
uint num_free_regions() const { return _hrm.num_free_regions(); }
|
||||
|
||||
// The number of regions that are not completely free.
|
||||
uint num_used_regions() const { return num_regions() - num_free_regions(); }
|
||||
@ -1233,7 +1234,7 @@ public:
|
||||
|
||||
#ifdef ASSERT
|
||||
bool is_on_master_free_list(HeapRegion* hr) {
|
||||
return _hrs.is_free(hr);
|
||||
return _hrm.is_free(hr);
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
@ -1245,7 +1246,7 @@ public:
|
||||
}
|
||||
|
||||
void append_secondary_free_list() {
|
||||
_hrs.insert_list_into_free_list(&_secondary_free_list);
|
||||
_hrm.insert_list_into_free_list(&_secondary_free_list);
|
||||
}
|
||||
|
||||
void append_secondary_free_list_if_not_empty_with_lock() {
|
||||
@ -1356,13 +1357,13 @@ public:
|
||||
// Return "TRUE" iff the given object address is in the reserved
|
||||
// region of g1.
|
||||
bool is_in_g1_reserved(const void* p) const {
|
||||
return _hrs.reserved().contains(p);
|
||||
return _hrm.reserved().contains(p);
|
||||
}
|
||||
|
||||
// Returns a MemRegion that corresponds to the space that has been
|
||||
// reserved for the heap
|
||||
MemRegion g1_reserved() const {
|
||||
return _hrs.reserved();
|
||||
return _hrm.reserved();
|
||||
}
|
||||
|
||||
virtual bool is_in_closed_subset(const void* p) const;
|
||||
|
@ -30,15 +30,15 @@
|
||||
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "runtime/orderAccess.inline.hpp"
|
||||
#include "utilities/taskqueue.hpp"
|
||||
|
||||
// Inline functions for G1CollectedHeap
|
||||
|
||||
// Return the region with the given index. It assumes the index is valid.
|
||||
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
|
||||
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
|
||||
|
||||
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
|
||||
assert(is_in_reserved(addr),
|
||||
@ -48,7 +48,7 @@ inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
|
||||
}
|
||||
|
||||
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
|
||||
return _hrs.reserved().start() + index * HeapRegion::GrainWords;
|
||||
return _hrm.reserved().start() + index * HeapRegion::GrainWords;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
@ -57,7 +57,7 @@ inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) con
|
||||
assert(is_in_g1_reserved((const void*) addr),
|
||||
err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
|
||||
p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
|
||||
return _hrs.addr_to_region((HeapWord*) addr);
|
||||
return _hrm.addr_to_region((HeapWord*) addr);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
@ -87,7 +87,7 @@ inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::obj_in_cs(oop obj) {
|
||||
HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
|
||||
HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
|
||||
return r != NULL && r->in_collection_set();
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@
|
||||
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
@ -29,7 +29,7 @@
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/shared/liveRange.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
@ -322,34 +322,11 @@ bool HeapRegion::claimHeapRegion(jint claimValue) {
|
||||
return false;
|
||||
}
|
||||
|
||||
HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
|
||||
HeapWord* low = addr;
|
||||
HeapWord* high = end();
|
||||
while (low < high) {
|
||||
size_t diff = pointer_delta(high, low);
|
||||
// Must add one below to bias toward the high amount. Otherwise, if
|
||||
// "high" were at the desired value, and "low" were one less, we
|
||||
// would not converge on "high". This is not symmetric, because
|
||||
// we set "high" to a block start, which might be the right one,
|
||||
// which we don't do for "low".
|
||||
HeapWord* middle = low + (diff+1)/2;
|
||||
if (middle == high) return high;
|
||||
HeapWord* mid_bs = block_start_careful(middle);
|
||||
if (mid_bs < addr) {
|
||||
low = middle;
|
||||
} else {
|
||||
high = mid_bs;
|
||||
}
|
||||
}
|
||||
assert(low == high && low >= addr, "Didn't work.");
|
||||
return low;
|
||||
}
|
||||
|
||||
HeapRegion::HeapRegion(uint hrs_index,
|
||||
HeapRegion::HeapRegion(uint hrm_index,
|
||||
G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
MemRegion mr) :
|
||||
G1OffsetTableContigSpace(sharedOffsetArray, mr),
|
||||
_hrs_index(hrs_index),
|
||||
_hrm_index(hrm_index),
|
||||
_humongous_type(NotHumongous), _humongous_start_region(NULL),
|
||||
_in_collection_set(false),
|
||||
_next_in_special_set(NULL), _orig_end(NULL),
|
||||
|
@ -54,15 +54,15 @@ class nmethod;
|
||||
|
||||
#define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
|
||||
#define HR_FORMAT_PARAMS(_hr_) \
|
||||
(_hr_)->hrs_index(), \
|
||||
(_hr_)->hrm_index(), \
|
||||
(_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \
|
||||
(_hr_)->startsHumongous() ? "HS" : \
|
||||
(_hr_)->continuesHumongous() ? "HC" : \
|
||||
!(_hr_)->is_empty() ? "O" : "F", \
|
||||
p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
|
||||
|
||||
// sentinel value for hrs_index
|
||||
#define G1_NO_HRS_INDEX ((uint) -1)
|
||||
// sentinel value for hrm_index
|
||||
#define G1_NO_HRM_INDEX ((uint) -1)
|
||||
|
||||
// A dirty card to oop closure for heap regions. It
|
||||
// knows how to get the G1 heap and how to use the bitmap
|
||||
@ -206,10 +206,6 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
|
||||
_offsets.reset_bot();
|
||||
}
|
||||
|
||||
void update_bot_for_object(HeapWord* start, size_t word_size) {
|
||||
_offsets.alloc_block(start, word_size);
|
||||
}
|
||||
|
||||
void print_bot_on(outputStream* out) {
|
||||
_offsets.print_on(out);
|
||||
}
|
||||
@ -234,7 +230,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
|
||||
protected:
|
||||
// The index of this region in the heap region sequence.
|
||||
uint _hrs_index;
|
||||
uint _hrm_index;
|
||||
|
||||
HumongousType _humongous_type;
|
||||
// For a humongous region, region in which it starts.
|
||||
@ -330,7 +326,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
size_t _predicted_bytes_to_copy;
|
||||
|
||||
public:
|
||||
HeapRegion(uint hrs_index,
|
||||
HeapRegion(uint hrm_index,
|
||||
G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
MemRegion mr);
|
||||
|
||||
@ -385,9 +381,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
|
||||
inline HeapWord* allocate_no_bot_updates(size_t word_size);
|
||||
|
||||
// If this region is a member of a HeapRegionSeq, the index in that
|
||||
// If this region is a member of a HeapRegionManager, the index in that
|
||||
// sequence, otherwise -1.
|
||||
uint hrs_index() const { return _hrs_index; }
|
||||
uint hrm_index() const { return _hrm_index; }
|
||||
|
||||
// The number of bytes marked live in the region in the last marking phase.
|
||||
size_t marked_bytes() { return _prev_marked_bytes; }
|
||||
@ -458,7 +454,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// with this HS region.
|
||||
uint last_hc_index() const {
|
||||
assert(startsHumongous(), "don't call this otherwise");
|
||||
return hrs_index() + region_num();
|
||||
return hrm_index() + region_num();
|
||||
}
|
||||
|
||||
// Same as Space::is_in_reserved, but will use the original size of the region.
|
||||
@ -570,7 +566,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
|
||||
bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
|
||||
|
||||
HeapWord* orig_end() { return _orig_end; }
|
||||
HeapWord* orig_end() const { return _orig_end; }
|
||||
|
||||
// Reset HR stuff to default values.
|
||||
void hr_clear(bool par, bool clear_space, bool locked = false);
|
||||
@ -737,18 +733,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
bool filter_young,
|
||||
jbyte* card_ptr);
|
||||
|
||||
// A version of block start that is guaranteed to find *some* block
|
||||
// boundary at or before "p", but does not object iteration, and may
|
||||
// therefore be used safely when the heap is unparseable.
|
||||
HeapWord* block_start_careful(const void* p) const {
|
||||
return _offsets.block_start_careful(p);
|
||||
}
|
||||
|
||||
// Requires that "addr" is within the region. Returns the start of the
|
||||
// first ("careful") block that starts at or after "addr", or else the
|
||||
// "end" of the region if there is no such block.
|
||||
HeapWord* next_block_start_careful(HeapWord* addr);
|
||||
|
||||
size_t recorded_rs_length() const { return _recorded_rs_length; }
|
||||
double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
|
||||
size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
|
||||
@ -813,7 +797,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// HeapRegionClosure is used for iterating over regions.
|
||||
// Terminates the iteration when the "doHeapRegion" method returns "true".
|
||||
class HeapRegionClosure : public StackObj {
|
||||
friend class HeapRegionSeq;
|
||||
friend class HeapRegionManager;
|
||||
friend class G1CollectedHeap;
|
||||
|
||||
bool _complete;
|
||||
|
@ -24,13 +24,13 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/concurrentG1Refine.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
void HeapRegionSeq::initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
G1RegionToSpaceMapper* prev_bitmap,
|
||||
G1RegionToSpaceMapper* next_bitmap,
|
||||
G1RegionToSpaceMapper* bot,
|
||||
@ -55,24 +55,24 @@ void HeapRegionSeq::initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
_available_map.clear();
|
||||
}
|
||||
|
||||
bool HeapRegionSeq::is_available(uint region) const {
|
||||
bool HeapRegionManager::is_available(uint region) const {
|
||||
return _available_map.at(region);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool HeapRegionSeq::is_free(HeapRegion* hr) const {
|
||||
bool HeapRegionManager::is_free(HeapRegion* hr) const {
|
||||
return _free_list.contains(hr);
|
||||
}
|
||||
#endif
|
||||
|
||||
HeapRegion* HeapRegionSeq::new_heap_region(uint hrs_index) {
|
||||
HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrs_index);
|
||||
HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
|
||||
HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrm_index);
|
||||
MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
|
||||
assert(reserved().contains(mr), "invariant");
|
||||
return new HeapRegion(hrs_index, G1CollectedHeap::heap()->bot_shared(), mr);
|
||||
return new HeapRegion(hrm_index, G1CollectedHeap::heap()->bot_shared(), mr);
|
||||
}
|
||||
|
||||
void HeapRegionSeq::commit_regions(uint index, size_t num_regions) {
|
||||
void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
|
||||
guarantee(num_regions > 0, "Must commit more than zero regions");
|
||||
guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
|
||||
|
||||
@ -90,7 +90,7 @@ void HeapRegionSeq::commit_regions(uint index, size_t num_regions) {
|
||||
_card_counts_mapper->commit_regions(index, num_regions);
|
||||
}
|
||||
|
||||
void HeapRegionSeq::uncommit_regions(uint start, size_t num_regions) {
|
||||
void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) {
|
||||
guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start));
|
||||
guarantee(_num_committed >= num_regions, "pre-condition");
|
||||
|
||||
@ -117,7 +117,7 @@ void HeapRegionSeq::uncommit_regions(uint start, size_t num_regions) {
|
||||
_card_counts_mapper->uncommit_regions(start, num_regions);
|
||||
}
|
||||
|
||||
void HeapRegionSeq::make_regions_available(uint start, uint num_regions) {
|
||||
void HeapRegionManager::make_regions_available(uint start, uint num_regions) {
|
||||
guarantee(num_regions > 0, "No point in calling this for zero regions");
|
||||
commit_regions(start, num_regions);
|
||||
for (uint i = start; i < start + num_regions; i++) {
|
||||
@ -144,11 +144,11 @@ void HeapRegionSeq::make_regions_available(uint start, uint num_regions) {
|
||||
}
|
||||
}
|
||||
|
||||
uint HeapRegionSeq::expand_by(uint num_regions) {
|
||||
uint HeapRegionManager::expand_by(uint num_regions) {
|
||||
return expand_at(0, num_regions);
|
||||
}
|
||||
|
||||
uint HeapRegionSeq::expand_at(uint start, uint num_regions) {
|
||||
uint HeapRegionManager::expand_at(uint start, uint num_regions) {
|
||||
if (num_regions == 0) {
|
||||
return 0;
|
||||
}
|
||||
@ -171,7 +171,7 @@ uint HeapRegionSeq::expand_at(uint start, uint num_regions) {
|
||||
return expanded;
|
||||
}
|
||||
|
||||
uint HeapRegionSeq::find_contiguous(size_t num, bool empty_only) {
|
||||
uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) {
|
||||
uint found = 0;
|
||||
size_t length_found = 0;
|
||||
uint cur = 0;
|
||||
@ -199,14 +199,14 @@ uint HeapRegionSeq::find_contiguous(size_t num, bool empty_only) {
|
||||
}
|
||||
return found;
|
||||
} else {
|
||||
return G1_NO_HRS_INDEX;
|
||||
return G1_NO_HRM_INDEX;
|
||||
}
|
||||
}
|
||||
|
||||
HeapRegion* HeapRegionSeq::next_region_in_heap(const HeapRegion* r) const {
|
||||
HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
|
||||
guarantee(r != NULL, "Start region must be a valid region");
|
||||
guarantee(is_available(r->hrs_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrs_index()));
|
||||
for (uint i = r->hrs_index() + 1; i < _allocated_heapregions_length; i++) {
|
||||
guarantee(is_available(r->hrm_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrm_index()));
|
||||
for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
|
||||
HeapRegion* hr = _regions.get_by_index(i);
|
||||
if (is_available(i)) {
|
||||
return hr;
|
||||
@ -215,7 +215,7 @@ HeapRegion* HeapRegionSeq::next_region_in_heap(const HeapRegion* r) const {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
|
||||
void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
|
||||
uint len = max_length();
|
||||
|
||||
for (uint i = 0; i < len; i++) {
|
||||
@ -231,7 +231,7 @@ void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
|
||||
}
|
||||
}
|
||||
|
||||
uint HeapRegionSeq::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
|
||||
uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
|
||||
guarantee(res_idx != NULL, "checking");
|
||||
guarantee(start_idx <= (max_length() + 1), "checking");
|
||||
|
||||
@ -259,11 +259,11 @@ uint HeapRegionSeq::find_unavailable_from_idx(uint start_idx, uint* res_idx) con
|
||||
return num_regions;
|
||||
}
|
||||
|
||||
uint HeapRegionSeq::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
|
||||
uint HeapRegionManager::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
|
||||
return num_regions * worker_i / num_workers;
|
||||
}
|
||||
|
||||
void HeapRegionSeq::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
|
||||
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
|
||||
const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
|
||||
|
||||
// Every worker will actually look at all regions, skipping over regions that
|
||||
@ -334,7 +334,7 @@ void HeapRegionSeq::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num
|
||||
}
|
||||
}
|
||||
|
||||
uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
|
||||
uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
|
||||
assert(length() > 0, "the region sequence should not be empty");
|
||||
assert(length() <= _allocated_heapregions_length, "invariant");
|
||||
assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
|
||||
@ -351,10 +351,6 @@ uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
|
||||
|
||||
while ((removed < num_regions_to_remove) &&
|
||||
(num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
|
||||
// Only allow uncommit from the end of the heap.
|
||||
if ((idx_last_found + num_last_found) != _allocated_heapregions_length) {
|
||||
return 0;
|
||||
}
|
||||
uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
|
||||
|
||||
uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
|
||||
@ -368,7 +364,7 @@ uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
|
||||
return removed;
|
||||
}
|
||||
|
||||
uint HeapRegionSeq::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
|
||||
uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
|
||||
guarantee(start_idx < _allocated_heapregions_length, "checking");
|
||||
guarantee(res_idx != NULL, "checking");
|
||||
|
||||
@ -397,7 +393,7 @@ uint HeapRegionSeq::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) c
|
||||
return num_regions_found;
|
||||
}
|
||||
|
||||
void HeapRegionSeq::verify() {
|
||||
void HeapRegionManager::verify() {
|
||||
guarantee(length() <= _allocated_heapregions_length,
|
||||
err_msg("invariant: _length: %u _allocated_length: %u",
|
||||
length(), _allocated_heapregions_length));
|
||||
@ -419,8 +415,8 @@ void HeapRegionSeq::verify() {
|
||||
guarantee(!prev_committed || hr->bottom() == prev_end,
|
||||
err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
|
||||
i, HR_FORMAT_PARAMS(hr), p2i(prev_end)));
|
||||
guarantee(hr->hrs_index() == i,
|
||||
err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
|
||||
guarantee(hr->hrm_index() == i,
|
||||
err_msg("invariant: i: %u hrm_index(): %u", i, hr->hrm_index()));
|
||||
// Asserts will fire if i is >= _length
|
||||
HeapWord* addr = hr->bottom();
|
||||
guarantee(addr_to_region(addr) == hr, "sanity");
|
||||
@ -443,7 +439,7 @@ void HeapRegionSeq::verify() {
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void HeapRegionSeq::verify_optional() {
|
||||
void HeapRegionManager::verify_optional() {
|
||||
verify();
|
||||
}
|
||||
#endif // PRODUCT
|
@ -22,8 +22,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1BiasedArray.hpp"
|
||||
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
|
||||
@ -64,7 +64,7 @@ class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
|
||||
// * max_length() returns the maximum number of regions the heap can have.
|
||||
//
|
||||
|
||||
class HeapRegionSeq: public CHeapObj<mtGC> {
|
||||
class HeapRegionManager: public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
|
||||
G1HeapRegionTable _regions;
|
||||
@ -104,7 +104,7 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
|
||||
uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
|
||||
|
||||
// Find a contiguous set of empty or uncommitted regions of length num and return
|
||||
// the index of the first region or G1_NO_HRS_INDEX if the search was unsuccessful.
|
||||
// the index of the first region or G1_NO_HRM_INDEX if the search was unsuccessful.
|
||||
// If only_empty is true, only empty regions are considered.
|
||||
// Searches from bottom to top of the heap, doing a first-fit.
|
||||
uint find_contiguous(size_t num, bool only_empty);
|
||||
@ -117,7 +117,7 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
|
||||
// sequence could be found, otherwise res_idx contains the start index of this range.
|
||||
uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
|
||||
// Allocate a new HeapRegion for the given index.
|
||||
HeapRegion* new_heap_region(uint hrs_index);
|
||||
HeapRegion* new_heap_region(uint hrm_index);
|
||||
#ifdef ASSERT
|
||||
public:
|
||||
bool is_free(HeapRegion* hr) const;
|
||||
@ -127,7 +127,7 @@ public:
|
||||
|
||||
public:
|
||||
// Empty constructor, we'll initialize it with the initialize() method.
|
||||
HeapRegionSeq() : _regions(), _heap_mapper(NULL), _num_committed(0),
|
||||
HeapRegionManager() : _regions(), _heap_mapper(NULL), _num_committed(0),
|
||||
_next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL),
|
||||
_allocated_heapregions_length(0), _available_map(),
|
||||
_free_list("Free list", new MasterFreeRegionListMtSafeChecker())
|
||||
@ -167,7 +167,7 @@ public:
|
||||
|
||||
if (hr != NULL) {
|
||||
assert(hr->next() == NULL, "Single region should not have next");
|
||||
assert(is_available(hr->hrs_index()), "Must be committed");
|
||||
assert(is_available(hr->hrm_index()), "Must be committed");
|
||||
}
|
||||
return hr;
|
||||
}
|
||||
@ -211,10 +211,10 @@ public:
|
||||
uint expand_at(uint start, uint num_regions);
|
||||
|
||||
// Find a contiguous set of empty regions of length num. Returns the start index of
|
||||
// that set, or G1_NO_HRS_INDEX.
|
||||
// that set, or G1_NO_HRM_INDEX.
|
||||
uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
|
||||
// Find a contiguous set of empty or unavailable regions of length num. Returns the
|
||||
// start index of that set, or G1_NO_HRS_INDEX.
|
||||
// start index of that set, or G1_NO_HRM_INDEX.
|
||||
uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
|
||||
|
||||
HeapRegion* next_region_in_heap(const HeapRegion* r) const;
|
||||
@ -235,5 +235,5 @@ public:
|
||||
void verify_optional() PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
|
||||
|
@ -22,14 +22,14 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
||||
|
||||
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
|
||||
inline HeapRegion* HeapRegionManager::addr_to_region(HeapWord* addr) const {
|
||||
assert(addr < heap_end(),
|
||||
err_msg("addr: "PTR_FORMAT" end: "PTR_FORMAT, p2i(addr), p2i(heap_end())));
|
||||
assert(addr >= heap_bottom(),
|
||||
@ -39,20 +39,20 @@ inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
|
||||
return hr;
|
||||
}
|
||||
|
||||
inline HeapRegion* HeapRegionSeq::at(uint index) const {
|
||||
inline HeapRegion* HeapRegionManager::at(uint index) const {
|
||||
assert(is_available(index), "pre-condition");
|
||||
HeapRegion* hr = _regions.get_by_index(index);
|
||||
assert(hr != NULL, "sanity");
|
||||
assert(hr->hrs_index() == index, "sanity");
|
||||
assert(hr->hrm_index() == index, "sanity");
|
||||
return hr;
|
||||
}
|
||||
|
||||
inline void HeapRegionSeq::insert_into_free_list(HeapRegion* hr) {
|
||||
inline void HeapRegionManager::insert_into_free_list(HeapRegion* hr) {
|
||||
_free_list.add_ordered(hr);
|
||||
}
|
||||
|
||||
inline void HeapRegionSeq::allocate_free_regions_starting_at(uint first, uint num_regions) {
|
||||
inline void HeapRegionManager::allocate_free_regions_starting_at(uint first, uint num_regions) {
|
||||
_free_list.remove_starting_at(at(first), num_regions);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP
|
@ -27,7 +27,7 @@
|
||||
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/padded.inline.hpp"
|
||||
#include "memory/space.inline.hpp"
|
||||
@ -420,7 +420,7 @@ void OtherRegionsTable::print_from_card_cache() {
|
||||
}
|
||||
|
||||
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
uint cur_hrs_ind = hr()->hrs_index();
|
||||
uint cur_hrm_ind = hr()->hrm_index();
|
||||
|
||||
if (G1TraceHeapRegionRememberedSet) {
|
||||
gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
|
||||
@ -435,10 +435,10 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
if (G1TraceHeapRegionRememberedSet) {
|
||||
gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
|
||||
hr()->bottom(), from_card,
|
||||
FromCardCache::at((uint)tid, cur_hrs_ind));
|
||||
FromCardCache::at((uint)tid, cur_hrm_ind));
|
||||
}
|
||||
|
||||
if (FromCardCache::contains_or_replace((uint)tid, cur_hrs_ind, from_card)) {
|
||||
if (FromCardCache::contains_or_replace((uint)tid, cur_hrm_ind, from_card)) {
|
||||
if (G1TraceHeapRegionRememberedSet) {
|
||||
gclog_or_tty->print_cr(" from-card cache hit.");
|
||||
}
|
||||
@ -448,7 +448,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
|
||||
// Note that this may be a continued H region.
|
||||
HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
|
||||
RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
|
||||
RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrm_index();
|
||||
|
||||
// If the region is already coarsened, return.
|
||||
if (_coarse_map.at(from_hrs_ind)) {
|
||||
@ -495,7 +495,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
if (G1TraceHeapRegionRememberedSet) {
|
||||
gclog_or_tty->print_cr(" [tid %d] sparse table entry "
|
||||
"overflow(f: %d, t: %u)",
|
||||
tid, from_hrs_ind, cur_hrs_ind);
|
||||
tid, from_hrs_ind, cur_hrm_ind);
|
||||
}
|
||||
}
|
||||
|
||||
@ -607,9 +607,9 @@ PerRegionTable* OtherRegionsTable::delete_region_table() {
|
||||
guarantee(max != NULL, "Since _n_fine_entries > 0");
|
||||
|
||||
// Set the corresponding coarse bit.
|
||||
size_t max_hrs_index = (size_t) max->hr()->hrs_index();
|
||||
if (!_coarse_map.at(max_hrs_index)) {
|
||||
_coarse_map.at_put(max_hrs_index, true);
|
||||
size_t max_hrm_index = (size_t) max->hr()->hrm_index();
|
||||
if (!_coarse_map.at(max_hrm_index)) {
|
||||
_coarse_map.at_put(max_hrm_index, true);
|
||||
_n_coarse_entries++;
|
||||
if (G1TraceHeapRegionRememberedSet) {
|
||||
gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
|
||||
@ -633,7 +633,7 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
|
||||
BitMap* region_bm, BitMap* card_bm) {
|
||||
// First eliminated garbage regions from the coarse map.
|
||||
if (G1RSScrubVerbose) {
|
||||
gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrs_index());
|
||||
gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrm_index());
|
||||
}
|
||||
|
||||
assert(_coarse_map.size() == region_bm->size(), "Precondition");
|
||||
@ -656,9 +656,9 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
|
||||
// If the entire region is dead, eliminate.
|
||||
if (G1RSScrubVerbose) {
|
||||
gclog_or_tty->print_cr(" For other region %u:",
|
||||
cur->hr()->hrs_index());
|
||||
cur->hr()->hrm_index());
|
||||
}
|
||||
if (!region_bm->at((size_t) cur->hr()->hrs_index())) {
|
||||
if (!region_bm->at((size_t) cur->hr()->hrm_index())) {
|
||||
*prev = nxt;
|
||||
cur->set_collision_list_next(NULL);
|
||||
_n_fine_entries--;
|
||||
@ -752,7 +752,7 @@ size_t OtherRegionsTable::fl_mem_size() {
|
||||
}
|
||||
|
||||
void OtherRegionsTable::clear_fcc() {
|
||||
FromCardCache::clear(hr()->hrs_index());
|
||||
FromCardCache::clear(hr()->hrm_index());
|
||||
}
|
||||
|
||||
void OtherRegionsTable::clear() {
|
||||
@ -803,7 +803,7 @@ bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
|
||||
|
||||
bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(from);
|
||||
RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
|
||||
RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
|
||||
// Is this region in the coarse map?
|
||||
if (_coarse_map.at(hr_ind)) return true;
|
||||
|
||||
@ -840,7 +840,7 @@ uint HeapRegionRemSet::num_par_rem_sets() {
|
||||
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
|
||||
HeapRegion* hr)
|
||||
: _bosa(bosa),
|
||||
_m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true),
|
||||
_m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true),
|
||||
_code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
|
||||
reset_for_par_iteration();
|
||||
}
|
||||
|
@ -39,11 +39,11 @@ void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) {
|
||||
|
||||
#ifndef PRODUCT
|
||||
void HeapRegionSetBase::verify_region(HeapRegion* hr) {
|
||||
assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrs_index()));
|
||||
assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrs_index())); // currently we don't use these sets for young regions
|
||||
assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrs_index(), name()));
|
||||
assert(hr->is_empty() == regions_empty(), err_msg("Wrong empty state for region %u and set %s", hr->hrs_index(), name()));
|
||||
assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrs_index()));
|
||||
assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrm_index()));
|
||||
assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrm_index())); // currently we don't use these sets for young regions
|
||||
assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
|
||||
assert(hr->is_empty() == regions_empty(), err_msg("Wrong empty state for region %u and set %s", hr->hrm_index(), name()));
|
||||
assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrm_index()));
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -158,7 +158,7 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) {
|
||||
HeapRegion* curr_from = from_list->_head;
|
||||
|
||||
while (curr_from != NULL) {
|
||||
while (curr_to != NULL && curr_to->hrs_index() < curr_from->hrs_index()) {
|
||||
while (curr_to != NULL && curr_to->hrm_index() < curr_from->hrm_index()) {
|
||||
curr_to = curr_to->next();
|
||||
}
|
||||
|
||||
@ -183,7 +183,7 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) {
|
||||
}
|
||||
}
|
||||
|
||||
if (_tail->hrs_index() < from_list->_tail->hrs_index()) {
|
||||
if (_tail->hrm_index() < from_list->_tail->hrm_index()) {
|
||||
_tail = from_list->_tail;
|
||||
}
|
||||
}
|
||||
@ -309,8 +309,8 @@ void FreeRegionList::verify_list() {
|
||||
if (curr->next() != NULL) {
|
||||
guarantee(curr->next()->prev() == curr, "Next or prev pointers messed up");
|
||||
}
|
||||
guarantee(curr->hrs_index() == 0 || curr->hrs_index() > last_index, "List should be sorted");
|
||||
last_index = curr->hrs_index();
|
||||
guarantee(curr->hrm_index() == 0 || curr->hrm_index() > last_index, "List should be sorted");
|
||||
last_index = curr->hrm_index();
|
||||
|
||||
capacity += curr->capacity();
|
||||
|
||||
@ -319,7 +319,7 @@ void FreeRegionList::verify_list() {
|
||||
curr = curr->next();
|
||||
}
|
||||
|
||||
guarantee(_tail == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), _tail->hrs_index(), prev0->hrs_index()));
|
||||
guarantee(_tail == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), _tail->hrm_index(), prev0->hrm_index()));
|
||||
guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next");
|
||||
guarantee(length() == count, err_msg("%s count mismatch. Expected %u, actual %u.", name(), length(), count));
|
||||
guarantee(total_capacity_bytes() == capacity, err_msg("%s capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
|
||||
|
@ -238,14 +238,14 @@ public:
|
||||
|
||||
// Add hr to the list. The region should not be a member of another set.
|
||||
// Assumes that the list is ordered and will preserve that order. The order
|
||||
// is determined by hrs_index.
|
||||
// is determined by hrm_index.
|
||||
inline void add_ordered(HeapRegion* hr);
|
||||
|
||||
// Removes from head or tail based on the given argument.
|
||||
HeapRegion* remove_region(bool from_head);
|
||||
|
||||
// Merge two ordered lists. The result is also ordered. The order is
|
||||
// determined by hrs_index.
|
||||
// determined by hrm_index.
|
||||
void add_ordered(FreeRegionList* from_list);
|
||||
|
||||
// It empties the list by removing all regions from it.
|
||||
|
@ -60,14 +60,14 @@ inline void FreeRegionList::add_ordered(HeapRegion* hr) {
|
||||
if (_head != NULL) {
|
||||
HeapRegion* curr;
|
||||
|
||||
if (_last != NULL && _last->hrs_index() < hr->hrs_index()) {
|
||||
if (_last != NULL && _last->hrm_index() < hr->hrm_index()) {
|
||||
curr = _last;
|
||||
} else {
|
||||
curr = _head;
|
||||
}
|
||||
|
||||
// Find first entry with a Region Index larger than entry to insert.
|
||||
while (curr != NULL && curr->hrs_index() < hr->hrs_index()) {
|
||||
while (curr != NULL && curr->hrm_index() < hr->hrm_index()) {
|
||||
curr = curr->next();
|
||||
}
|
||||
|
||||
|
@ -453,7 +453,7 @@ size_t SparsePRT::mem_size() const {
|
||||
bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
|
||||
#if SPARSE_PRT_VERBOSE
|
||||
gclog_or_tty->print_cr(" Adding card %d from region %d to region %u sparse.",
|
||||
card_index, region_id, _hr->hrs_index());
|
||||
card_index, region_id, _hr->hrm_index());
|
||||
#endif
|
||||
if (_next->occupied_entries() * 2 > _next->capacity()) {
|
||||
expand();
|
||||
@ -505,7 +505,7 @@ void SparsePRT::expand() {
|
||||
|
||||
#if SPARSE_PRT_VERBOSE
|
||||
gclog_or_tty->print_cr(" Expanded sparse table for %u to %d.",
|
||||
_hr->hrs_index(), _next->capacity());
|
||||
_hr->hrm_index(), _next->capacity());
|
||||
#endif
|
||||
for (size_t i = 0; i < last->capacity(); i++) {
|
||||
SparsePRTEntry* e = last->entry((int)i);
|
||||
|
@ -26,7 +26,7 @@
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
|
||||
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
|
||||
#define VM_STRUCTS_G1(nonstatic_field, static_field) \
|
||||
@ -42,10 +42,10 @@
|
||||
nonstatic_field(G1HeapRegionTable, _bias, size_t) \
|
||||
nonstatic_field(G1HeapRegionTable, _shift_by, uint) \
|
||||
\
|
||||
nonstatic_field(HeapRegionSeq, _regions, G1HeapRegionTable) \
|
||||
nonstatic_field(HeapRegionSeq, _num_committed, uint) \
|
||||
nonstatic_field(HeapRegionManager, _regions, G1HeapRegionTable) \
|
||||
nonstatic_field(HeapRegionManager, _num_committed, uint) \
|
||||
\
|
||||
nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \
|
||||
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \
|
||||
nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
|
||||
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
|
||||
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
|
||||
@ -72,7 +72,7 @@
|
||||
\
|
||||
declare_type(G1OffsetTableContigSpace, CompactibleSpace) \
|
||||
declare_type(HeapRegion, G1OffsetTableContigSpace) \
|
||||
declare_toplevel_type(HeapRegionSeq) \
|
||||
declare_toplevel_type(HeapRegionManager) \
|
||||
declare_toplevel_type(HeapRegionSetBase) \
|
||||
declare_toplevel_type(HeapRegionSetCount) \
|
||||
declare_toplevel_type(G1MonitoringSupport) \
|
||||
|
@ -30,6 +30,8 @@
|
||||
|
||||
PSGenerationCounters::PSGenerationCounters(const char* name,
|
||||
int ordinal, int spaces,
|
||||
size_t min_capacity,
|
||||
size_t max_capacity,
|
||||
PSVirtualSpace* v):
|
||||
_ps_virtual_space(v) {
|
||||
|
||||
@ -52,11 +54,11 @@ PSGenerationCounters::PSGenerationCounters(const char* name,
|
||||
|
||||
cname = PerfDataManager::counter_name(_name_space, "minCapacity");
|
||||
PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
|
||||
_ps_virtual_space->committed_size(), CHECK);
|
||||
min_capacity, CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
|
||||
PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
|
||||
_ps_virtual_space->reserved_size(), CHECK);
|
||||
max_capacity, CHECK);
|
||||
|
||||
cname = PerfDataManager::counter_name(_name_space, "capacity");
|
||||
_current_size = PerfDataManager::create_variable(SUN_GC, cname,
|
||||
|
@ -41,7 +41,7 @@ class PSGenerationCounters: public GenerationCounters {
|
||||
|
||||
public:
|
||||
PSGenerationCounters(const char* name, int ordinal, int spaces,
|
||||
PSVirtualSpace* v);
|
||||
size_t min_capacity, size_t max_capacity, PSVirtualSpace* v);
|
||||
|
||||
void update_all() {
|
||||
assert(_virtual_space == NULL, "Only one should be in use");
|
||||
|
@ -149,8 +149,8 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
|
||||
|
||||
void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
|
||||
// Generation Counters, generation 'level', 1 subspace
|
||||
_gen_counters = new PSGenerationCounters(perf_data_name, level, 1,
|
||||
virtual_space());
|
||||
_gen_counters = new PSGenerationCounters(perf_data_name, level, 1, _min_gen_size,
|
||||
_max_gen_size, virtual_space());
|
||||
_space_counters = new SpaceCounters(perf_data_name, 0,
|
||||
virtual_space()->reserved_size(),
|
||||
_object_space, _gen_counters);
|
||||
|
@ -101,7 +101,8 @@ void PSYoungGen::initialize_work() {
|
||||
}
|
||||
|
||||
// Generation Counters - generation 0, 3 subspaces
|
||||
_gen_counters = new PSGenerationCounters("new", 0, 3, _virtual_space);
|
||||
_gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size,
|
||||
_max_gen_size, _virtual_space);
|
||||
|
||||
// Compute maximum space sizes for performance counters
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
|
@ -62,11 +62,12 @@ void GenerationCounters::initialize(const char* name, int ordinal, int spaces,
|
||||
|
||||
GenerationCounters::GenerationCounters(const char* name,
|
||||
int ordinal, int spaces,
|
||||
size_t min_capacity, size_t max_capacity,
|
||||
VirtualSpace* v)
|
||||
: _virtual_space(v) {
|
||||
assert(v != NULL, "don't call this constructor if v == NULL");
|
||||
initialize(name, ordinal, spaces,
|
||||
v->committed_size(), v->reserved_size(), v->committed_size());
|
||||
min_capacity, max_capacity, v->committed_size());
|
||||
}
|
||||
|
||||
GenerationCounters::GenerationCounters(const char* name,
|
||||
|
@ -66,7 +66,7 @@ private:
|
||||
|
||||
public:
|
||||
GenerationCounters(const char* name, int ordinal, int spaces,
|
||||
VirtualSpace* v);
|
||||
size_t min_capacity, size_t max_capacity, VirtualSpace* v);
|
||||
|
||||
~GenerationCounters() {
|
||||
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC);
|
||||
|
@ -214,9 +214,11 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
|
||||
_max_eden_size = size - (2*_max_survivor_size);
|
||||
|
||||
// allocate the performance counters
|
||||
GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
|
||||
|
||||
// Generation counters -- generation 0, 3 subspaces
|
||||
_gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
|
||||
_gen_counters = new GenerationCounters("new", 0, 3,
|
||||
gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
|
||||
_gc_counters = new CollectorCounters(policy, 0);
|
||||
|
||||
_eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
|
||||
|
@ -53,9 +53,11 @@ TenuredGeneration::TenuredGeneration(ReservedSpace rs,
|
||||
// initialize performance counters
|
||||
|
||||
const char* gen_name = "old";
|
||||
GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
|
||||
|
||||
// Generation Counters -- generation 1, 1 subspace
|
||||
_gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
|
||||
_gen_counters = new GenerationCounters(gen_name, 1, 1,
|
||||
gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
|
||||
|
||||
_gc_counters = new CollectorCounters("MSC", 1);
|
||||
|
||||
|
@ -1256,8 +1256,8 @@ void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
|
||||
// Deoptimize all activations depending on marked nmethods
|
||||
Deoptimization::deoptimize_dependents();
|
||||
|
||||
// Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
|
||||
CodeCache::make_marked_nmethods_not_entrant();
|
||||
// Make the dependent methods zombies (like VM_Deoptimize)
|
||||
CodeCache::make_marked_nmethods_zombies();
|
||||
}
|
||||
}
|
||||
#endif // HOTSWAP
|
||||
|
@ -68,7 +68,7 @@
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
|
||||
@ -460,6 +460,8 @@ objArrayOop InstanceKlass::signers() const {
|
||||
oop InstanceKlass::init_lock() const {
|
||||
// return the init lock from the mirror
|
||||
oop lock = java_lang_Class::init_lock(java_mirror());
|
||||
// Prevent reordering with any access of initialization state
|
||||
OrderAccess::loadload();
|
||||
assert((oop)lock != NULL || !is_not_initialized(), // initialized or in_error state
|
||||
"only fully initialized state can have a null lock");
|
||||
return lock;
|
||||
@ -2437,16 +2439,6 @@ void InstanceKlass::release_C_heap_structures() {
|
||||
assert(breakpoints() == 0x0, "should have cleared breakpoints");
|
||||
}
|
||||
|
||||
// deallocate information about previous versions
|
||||
if (_previous_versions != NULL) {
|
||||
for (int i = _previous_versions->length() - 1; i >= 0; i--) {
|
||||
PreviousVersionNode * pv_node = _previous_versions->at(i);
|
||||
delete pv_node;
|
||||
}
|
||||
delete _previous_versions;
|
||||
_previous_versions = NULL;
|
||||
}
|
||||
|
||||
// deallocate the cached class file
|
||||
if (_cached_class_file != NULL) {
|
||||
os::free(_cached_class_file, mtClass);
|
||||
@ -3020,16 +3012,17 @@ void InstanceKlass::print_on(outputStream* st) const {
|
||||
st->print(BULLET"field type annotations: "); fields_type_annotations()->print_value_on(st); st->cr();
|
||||
{
|
||||
bool have_pv = false;
|
||||
PreviousVersionWalker pvw(Thread::current(), (InstanceKlass*)this);
|
||||
for (PreviousVersionNode * pv_node = pvw.next_previous_version();
|
||||
pv_node != NULL; pv_node = pvw.next_previous_version()) {
|
||||
// previous versions are linked together through the InstanceKlass
|
||||
for (InstanceKlass* pv_node = _previous_versions;
|
||||
pv_node != NULL;
|
||||
pv_node = pv_node->previous_versions()) {
|
||||
if (!have_pv)
|
||||
st->print(BULLET"previous version: ");
|
||||
have_pv = true;
|
||||
pv_node->prev_constant_pool()->print_value_on(st);
|
||||
pv_node->constants()->print_value_on(st);
|
||||
}
|
||||
if (have_pv) st->cr();
|
||||
} // pvw is cleaned up
|
||||
}
|
||||
|
||||
if (generic_signature() != NULL) {
|
||||
st->print(BULLET"generic signature: ");
|
||||
@ -3443,92 +3436,92 @@ void InstanceKlass::set_init_state(ClassState state) {
|
||||
// RedefineClasses() support for previous versions:
|
||||
|
||||
// Purge previous versions
|
||||
static void purge_previous_versions_internal(InstanceKlass* ik, int emcp_method_count) {
|
||||
void InstanceKlass::purge_previous_versions(InstanceKlass* ik) {
|
||||
if (ik->previous_versions() != NULL) {
|
||||
// This klass has previous versions so see what we can cleanup
|
||||
// while it is safe to do so.
|
||||
|
||||
int deleted_count = 0; // leave debugging breadcrumbs
|
||||
int live_count = 0;
|
||||
ClassLoaderData* loader_data = ik->class_loader_data() == NULL ?
|
||||
ClassLoaderData::the_null_class_loader_data() :
|
||||
ik->class_loader_data();
|
||||
ClassLoaderData* loader_data = ik->class_loader_data();
|
||||
assert(loader_data != NULL, "should never be null");
|
||||
|
||||
// RC_TRACE macro has an embedded ResourceMark
|
||||
RC_TRACE(0x00000200, ("purge: %s: previous version length=%d",
|
||||
ik->external_name(), ik->previous_versions()->length()));
|
||||
RC_TRACE(0x00000200, ("purge: %s: previous versions", ik->external_name()));
|
||||
|
||||
for (int i = ik->previous_versions()->length() - 1; i >= 0; i--) {
|
||||
// check the previous versions array
|
||||
PreviousVersionNode * pv_node = ik->previous_versions()->at(i);
|
||||
ConstantPool* cp_ref = pv_node->prev_constant_pool();
|
||||
assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
|
||||
// previous versions are linked together through the InstanceKlass
|
||||
InstanceKlass* pv_node = ik->previous_versions();
|
||||
InstanceKlass* last = ik;
|
||||
int version = 0;
|
||||
|
||||
// check the previous versions list
|
||||
for (; pv_node != NULL; ) {
|
||||
|
||||
ConstantPool* pvcp = pv_node->constants();
|
||||
assert(pvcp != NULL, "cp ref was unexpectedly cleared");
|
||||
|
||||
ConstantPool* pvcp = cp_ref;
|
||||
if (!pvcp->on_stack()) {
|
||||
// If the constant pool isn't on stack, none of the methods
|
||||
// are executing. Delete all the methods, the constant pool and
|
||||
// and this previous version node.
|
||||
GrowableArray<Method*>* method_refs = pv_node->prev_EMCP_methods();
|
||||
if (method_refs != NULL) {
|
||||
for (int j = method_refs->length() - 1; j >= 0; j--) {
|
||||
Method* method = method_refs->at(j);
|
||||
assert(method != NULL, "method ref was unexpectedly cleared");
|
||||
method_refs->remove_at(j);
|
||||
// method will be freed with associated class.
|
||||
}
|
||||
}
|
||||
// Remove the constant pool
|
||||
delete pv_node;
|
||||
// Since we are traversing the array backwards, we don't have to
|
||||
// do anything special with the index.
|
||||
ik->previous_versions()->remove_at(i);
|
||||
// are executing. Unlink this previous_version.
|
||||
// The previous version InstanceKlass is on the ClassLoaderData deallocate list
|
||||
// so will be deallocated during the next phase of class unloading.
|
||||
pv_node = pv_node->previous_versions();
|
||||
last->link_previous_versions(pv_node);
|
||||
deleted_count++;
|
||||
version++;
|
||||
continue;
|
||||
} else {
|
||||
RC_TRACE(0x00000200, ("purge: previous version @%d is alive", i));
|
||||
RC_TRACE(0x00000200, ("purge: previous version " INTPTR_FORMAT " is alive",
|
||||
pv_node));
|
||||
assert(pvcp->pool_holder() != NULL, "Constant pool with no holder");
|
||||
guarantee (!loader_data->is_unloading(), "unloaded classes can't be on the stack");
|
||||
live_count++;
|
||||
}
|
||||
|
||||
// At least one method is live in this previous version, clean out
|
||||
// the others or mark them as obsolete.
|
||||
GrowableArray<Method*>* method_refs = pv_node->prev_EMCP_methods();
|
||||
// At least one method is live in this previous version so clean its MethodData.
|
||||
// Reset dead EMCP methods not to get breakpoints.
|
||||
// All methods are deallocated when all of the methods for this class are no
|
||||
// longer running.
|
||||
Array<Method*>* method_refs = pv_node->methods();
|
||||
if (method_refs != NULL) {
|
||||
RC_TRACE(0x00000200, ("purge: previous methods length=%d",
|
||||
method_refs->length()));
|
||||
for (int j = method_refs->length() - 1; j >= 0; j--) {
|
||||
for (int j = 0; j < method_refs->length(); j++) {
|
||||
Method* method = method_refs->at(j);
|
||||
assert(method != NULL, "method ref was unexpectedly cleared");
|
||||
|
||||
// Remove the emcp method if it's not executing
|
||||
// If it's been made obsolete by a redefinition of a non-emcp
|
||||
// method, mark it as obsolete but leave it to clean up later.
|
||||
if (!method->on_stack()) {
|
||||
method_refs->remove_at(j);
|
||||
} else if (emcp_method_count == 0) {
|
||||
method->set_is_obsolete();
|
||||
// no breakpoints for non-running methods
|
||||
if (method->is_running_emcp()) {
|
||||
method->set_running_emcp(false);
|
||||
}
|
||||
} else {
|
||||
assert (method->is_obsolete() || method->is_running_emcp(),
|
||||
"emcp method cannot run after emcp bit is cleared");
|
||||
// RC_TRACE macro has an embedded ResourceMark
|
||||
RC_TRACE(0x00000200,
|
||||
("purge: %s(%s): prev method @%d in version @%d is alive",
|
||||
method->name()->as_C_string(),
|
||||
method->signature()->as_C_string(), j, i));
|
||||
method->signature()->as_C_string(), j, version));
|
||||
if (method->method_data() != NULL) {
|
||||
// Clean out any weak method links
|
||||
// Clean out any weak method links for running methods
|
||||
// (also should include not EMCP methods)
|
||||
method->method_data()->clean_weak_method_links();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// next previous version
|
||||
last = pv_node;
|
||||
pv_node = pv_node->previous_versions();
|
||||
version++;
|
||||
}
|
||||
assert(ik->previous_versions()->length() == live_count, "sanity check");
|
||||
RC_TRACE(0x00000200,
|
||||
("purge: previous version stats: live=%d, deleted=%d", live_count,
|
||||
deleted_count));
|
||||
}
|
||||
|
||||
// Clean MethodData of this class's methods so they don't refer to
|
||||
// old methods that are no longer running.
|
||||
Array<Method*>* methods = ik->methods();
|
||||
int num_methods = methods->length();
|
||||
for (int index2 = 0; index2 < num_methods; ++index2) {
|
||||
@ -3538,122 +3531,30 @@ static void purge_previous_versions_internal(InstanceKlass* ik, int emcp_method_
|
||||
}
|
||||
}
|
||||
|
||||
// External interface for use during class unloading.
|
||||
void InstanceKlass::purge_previous_versions(InstanceKlass* ik) {
|
||||
// Call with >0 emcp methods since they are not currently being redefined.
|
||||
purge_previous_versions_internal(ik, 1);
|
||||
}
|
||||
|
||||
|
||||
// Potentially add an information node that contains pointers to the
|
||||
// interesting parts of the previous version of the_class.
|
||||
// This is also where we clean out any unused references.
|
||||
// Note that while we delete nodes from the _previous_versions
|
||||
// array, we never delete the array itself until the klass is
|
||||
// unloaded. The has_been_redefined() query depends on that fact.
|
||||
//
|
||||
void InstanceKlass::add_previous_version(instanceKlassHandle ikh,
|
||||
BitMap* emcp_methods, int emcp_method_count) {
|
||||
assert(Thread::current()->is_VM_thread(),
|
||||
"only VMThread can add previous versions");
|
||||
|
||||
if (_previous_versions == NULL) {
|
||||
// This is the first previous version so make some space.
|
||||
// Start with 2 elements under the assumption that the class
|
||||
// won't be redefined much.
|
||||
_previous_versions = new (ResourceObj::C_HEAP, mtClass)
|
||||
GrowableArray<PreviousVersionNode *>(2, true);
|
||||
}
|
||||
|
||||
ConstantPool* cp_ref = ikh->constants();
|
||||
|
||||
// RC_TRACE macro has an embedded ResourceMark
|
||||
RC_TRACE(0x00000400, ("adding previous version ref for %s @%d, EMCP_cnt=%d "
|
||||
"on_stack=%d",
|
||||
ikh->external_name(), _previous_versions->length(), emcp_method_count,
|
||||
cp_ref->on_stack()));
|
||||
|
||||
// If the constant pool for this previous version of the class
|
||||
// is not marked as being on the stack, then none of the methods
|
||||
// in this previous version of the class are on the stack so
|
||||
// we don't need to create a new PreviousVersionNode. However,
|
||||
// we still need to examine older previous versions below.
|
||||
Array<Method*>* old_methods = ikh->methods();
|
||||
|
||||
if (cp_ref->on_stack()) {
|
||||
PreviousVersionNode * pv_node = NULL;
|
||||
if (emcp_method_count == 0) {
|
||||
// non-shared ConstantPool gets a reference
|
||||
pv_node = new PreviousVersionNode(cp_ref, NULL);
|
||||
RC_TRACE(0x00000400,
|
||||
("add: all methods are obsolete; flushing any EMCP refs"));
|
||||
} else {
|
||||
int local_count = 0;
|
||||
GrowableArray<Method*>* method_refs = new (ResourceObj::C_HEAP, mtClass)
|
||||
GrowableArray<Method*>(emcp_method_count, true);
|
||||
for (int i = 0; i < old_methods->length(); i++) {
|
||||
if (emcp_methods->at(i)) {
|
||||
// this old method is EMCP. Save it only if it's on the stack
|
||||
Method* old_method = old_methods->at(i);
|
||||
if (old_method->on_stack()) {
|
||||
method_refs->append(old_method);
|
||||
}
|
||||
if (++local_count >= emcp_method_count) {
|
||||
// no more EMCP methods so bail out now
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// non-shared ConstantPool gets a reference
|
||||
pv_node = new PreviousVersionNode(cp_ref, method_refs);
|
||||
}
|
||||
// append new previous version.
|
||||
_previous_versions->append(pv_node);
|
||||
}
|
||||
|
||||
// Since the caller is the VMThread and we are at a safepoint, this
|
||||
// is a good time to clear out unused references.
|
||||
|
||||
RC_TRACE(0x00000400, ("add: previous version length=%d",
|
||||
_previous_versions->length()));
|
||||
|
||||
// Purge previous versions not executing on the stack
|
||||
purge_previous_versions_internal(this, emcp_method_count);
|
||||
|
||||
void InstanceKlass::mark_newly_obsolete_methods(Array<Method*>* old_methods,
|
||||
int emcp_method_count) {
|
||||
int obsolete_method_count = old_methods->length() - emcp_method_count;
|
||||
|
||||
if (emcp_method_count != 0 && obsolete_method_count != 0 &&
|
||||
_previous_versions->length() > 0) {
|
||||
_previous_versions != NULL) {
|
||||
// We have a mix of obsolete and EMCP methods so we have to
|
||||
// clear out any matching EMCP method entries the hard way.
|
||||
int local_count = 0;
|
||||
for (int i = 0; i < old_methods->length(); i++) {
|
||||
if (!emcp_methods->at(i)) {
|
||||
Method* old_method = old_methods->at(i);
|
||||
if (old_method->is_obsolete()) {
|
||||
// only obsolete methods are interesting
|
||||
Method* old_method = old_methods->at(i);
|
||||
Symbol* m_name = old_method->name();
|
||||
Symbol* m_signature = old_method->signature();
|
||||
|
||||
// we might not have added the last entry
|
||||
for (int j = _previous_versions->length() - 1; j >= 0; j--) {
|
||||
// check the previous versions array for non executing obsolete methods
|
||||
PreviousVersionNode * pv_node = _previous_versions->at(j);
|
||||
// previous versions are linked together through the InstanceKlass
|
||||
int j = 0;
|
||||
for (InstanceKlass* prev_version = _previous_versions;
|
||||
prev_version != NULL;
|
||||
prev_version = prev_version->previous_versions(), j++) {
|
||||
|
||||
GrowableArray<Method*>* method_refs = pv_node->prev_EMCP_methods();
|
||||
if (method_refs == NULL) {
|
||||
// We have run into a PreviousVersion generation where
|
||||
// all methods were made obsolete during that generation's
|
||||
// RedefineClasses() operation. At the time of that
|
||||
// operation, all EMCP methods were flushed so we don't
|
||||
// have to go back any further.
|
||||
//
|
||||
// A NULL method_refs is different than an empty method_refs.
|
||||
// We cannot infer any optimizations about older generations
|
||||
// from an empty method_refs for the current generation.
|
||||
break;
|
||||
}
|
||||
|
||||
for (int k = method_refs->length() - 1; k >= 0; k--) {
|
||||
Array<Method*>* method_refs = prev_version->methods();
|
||||
for (int k = 0; k < method_refs->length(); k++) {
|
||||
Method* method = method_refs->at(k);
|
||||
|
||||
if (!method->is_obsolete() &&
|
||||
@ -3661,14 +3562,11 @@ void InstanceKlass::add_previous_version(instanceKlassHandle ikh,
|
||||
method->signature() == m_signature) {
|
||||
// The current RedefineClasses() call has made all EMCP
|
||||
// versions of this method obsolete so mark it as obsolete
|
||||
// and remove the reference.
|
||||
RC_TRACE(0x00000400,
|
||||
("add: %s(%s): flush obsolete method @%d in version @%d",
|
||||
m_name->as_C_string(), m_signature->as_C_string(), k, j));
|
||||
|
||||
method->set_is_obsolete();
|
||||
// Leave obsolete methods on the previous version list to
|
||||
// clean up later.
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -3676,9 +3574,9 @@ void InstanceKlass::add_previous_version(instanceKlassHandle ikh,
|
||||
// The previous loop may not find a matching EMCP method, but
|
||||
// that doesn't mean that we can optimize and not go any
|
||||
// further back in the PreviousVersion generations. The EMCP
|
||||
// method for this generation could have already been deleted,
|
||||
// method for this generation could have already been made obsolete,
|
||||
// but there still may be an older EMCP method that has not
|
||||
// been deleted.
|
||||
// been made obsolete.
|
||||
}
|
||||
|
||||
if (++local_count >= obsolete_method_count) {
|
||||
@ -3688,15 +3586,69 @@ void InstanceKlass::add_previous_version(instanceKlassHandle ikh,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save the scratch_class as the previous version if any of the methods are running.
|
||||
// The previous_versions are used to set breakpoints in EMCP methods and they are
|
||||
// also used to clean MethodData links to redefined methods that are no longer running.
|
||||
void InstanceKlass::add_previous_version(instanceKlassHandle scratch_class,
|
||||
int emcp_method_count) {
|
||||
assert(Thread::current()->is_VM_thread(),
|
||||
"only VMThread can add previous versions");
|
||||
|
||||
// RC_TRACE macro has an embedded ResourceMark
|
||||
RC_TRACE(0x00000400, ("adding previous version ref for %s, EMCP_cnt=%d",
|
||||
scratch_class->external_name(), emcp_method_count));
|
||||
|
||||
// Clean out old previous versions
|
||||
purge_previous_versions(this);
|
||||
|
||||
// Mark newly obsolete methods in remaining previous versions. An EMCP method from
|
||||
// a previous redefinition may be made obsolete by this redefinition.
|
||||
Array<Method*>* old_methods = scratch_class->methods();
|
||||
mark_newly_obsolete_methods(old_methods, emcp_method_count);
|
||||
|
||||
// If the constant pool for this previous version of the class
|
||||
// is not marked as being on the stack, then none of the methods
|
||||
// in this previous version of the class are on the stack so
|
||||
// we don't need to add this as a previous version.
|
||||
ConstantPool* cp_ref = scratch_class->constants();
|
||||
if (!cp_ref->on_stack()) {
|
||||
RC_TRACE(0x00000400, ("add: scratch class not added; no methods are running"));
|
||||
return;
|
||||
}
|
||||
|
||||
if (emcp_method_count != 0) {
|
||||
// At least one method is still running, check for EMCP methods
|
||||
for (int i = 0; i < old_methods->length(); i++) {
|
||||
Method* old_method = old_methods->at(i);
|
||||
if (!old_method->is_obsolete() && old_method->on_stack()) {
|
||||
// if EMCP method (not obsolete) is on the stack, mark as EMCP so that
|
||||
// we can add breakpoints for it.
|
||||
|
||||
// We set the method->on_stack bit during safepoints for class redefinition and
|
||||
// class unloading and use this bit to set the is_running_emcp bit.
|
||||
// After the safepoint, the on_stack bit is cleared and the running emcp
|
||||
// method may exit. If so, we would set a breakpoint in a method that
|
||||
// is never reached, but this won't be noticeable to the programmer.
|
||||
old_method->set_running_emcp(true);
|
||||
RC_TRACE(0x00000400, ("add: EMCP method %s is on_stack " INTPTR_FORMAT,
|
||||
old_method->name_and_sig_as_C_string(), old_method));
|
||||
} else if (!old_method->is_obsolete()) {
|
||||
RC_TRACE(0x00000400, ("add: EMCP method %s is NOT on_stack " INTPTR_FORMAT,
|
||||
old_method->name_and_sig_as_C_string(), old_method));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add previous version if any methods are still running.
|
||||
RC_TRACE(0x00000400, ("add: scratch class added; one of its methods is on_stack"));
|
||||
assert(scratch_class->previous_versions() == NULL, "shouldn't have a previous version");
|
||||
scratch_class->link_previous_versions(previous_versions());
|
||||
link_previous_versions(scratch_class());
|
||||
} // end add_previous_version()
|
||||
|
||||
|
||||
// Determine if InstanceKlass has a previous version.
|
||||
bool InstanceKlass::has_previous_version() const {
|
||||
return (_previous_versions != NULL && _previous_versions->length() > 0);
|
||||
} // end has_previous_version()
|
||||
|
||||
|
||||
Method* InstanceKlass::method_with_idnum(int idnum) {
|
||||
Method* m = NULL;
|
||||
if (idnum < methods()->length()) {
|
||||
@ -3722,61 +3674,3 @@ jint InstanceKlass::get_cached_class_file_len() {
|
||||
unsigned char * InstanceKlass::get_cached_class_file_bytes() {
|
||||
return VM_RedefineClasses::get_cached_class_file_bytes(_cached_class_file);
|
||||
}
|
||||
|
||||
|
||||
// Construct a PreviousVersionNode entry for the array hung off
|
||||
// the InstanceKlass.
|
||||
PreviousVersionNode::PreviousVersionNode(ConstantPool* prev_constant_pool,
|
||||
GrowableArray<Method*>* prev_EMCP_methods) {
|
||||
|
||||
_prev_constant_pool = prev_constant_pool;
|
||||
_prev_EMCP_methods = prev_EMCP_methods;
|
||||
}
|
||||
|
||||
|
||||
// Destroy a PreviousVersionNode
|
||||
PreviousVersionNode::~PreviousVersionNode() {
|
||||
if (_prev_constant_pool != NULL) {
|
||||
_prev_constant_pool = NULL;
|
||||
}
|
||||
|
||||
if (_prev_EMCP_methods != NULL) {
|
||||
delete _prev_EMCP_methods;
|
||||
}
|
||||
}
|
||||
|
||||
// Construct a helper for walking the previous versions array
|
||||
PreviousVersionWalker::PreviousVersionWalker(Thread* thread, InstanceKlass *ik) {
|
||||
_thread = thread;
|
||||
_previous_versions = ik->previous_versions();
|
||||
_current_index = 0;
|
||||
_current_p = NULL;
|
||||
_current_constant_pool_handle = constantPoolHandle(thread, ik->constants());
|
||||
}
|
||||
|
||||
|
||||
// Return the interesting information for the next previous version
|
||||
// of the klass. Returns NULL if there are no more previous versions.
|
||||
PreviousVersionNode* PreviousVersionWalker::next_previous_version() {
|
||||
if (_previous_versions == NULL) {
|
||||
// no previous versions so nothing to return
|
||||
return NULL;
|
||||
}
|
||||
|
||||
_current_p = NULL; // reset to NULL
|
||||
_current_constant_pool_handle = NULL;
|
||||
|
||||
int length = _previous_versions->length();
|
||||
|
||||
while (_current_index < length) {
|
||||
PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
|
||||
|
||||
// Save a handle to the constant pool for this previous version,
|
||||
// which keeps all the methods from being deallocated.
|
||||
_current_constant_pool_handle = constantPoolHandle(_thread, pv_node->prev_constant_pool());
|
||||
_current_p = pv_node;
|
||||
return pv_node;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
} // end next_previous_version()
|
||||
|
@ -59,7 +59,6 @@ class BreakpointInfo;
|
||||
class fieldDescriptor;
|
||||
class DepChange;
|
||||
class nmethodBucket;
|
||||
class PreviousVersionNode;
|
||||
class JvmtiCachedClassFieldMap;
|
||||
class MemberNameTable;
|
||||
|
||||
@ -205,7 +204,8 @@ class InstanceKlass: public Klass {
|
||||
_misc_should_verify_class = 1 << 2, // allow caching of preverification
|
||||
_misc_is_anonymous = 1 << 3, // has embedded _host_klass field
|
||||
_misc_is_contended = 1 << 4, // marked with contended annotation
|
||||
_misc_has_default_methods = 1 << 5 // class/superclass/implemented interfaces has default methods
|
||||
_misc_has_default_methods = 1 << 5, // class/superclass/implemented interfaces has default methods
|
||||
_misc_has_been_redefined = 1 << 6 // class has been redefined
|
||||
};
|
||||
u2 _misc_flags;
|
||||
u2 _minor_version; // minor version number of class file
|
||||
@ -220,9 +220,8 @@ class InstanceKlass: public Klass {
|
||||
nmethodBucket* _dependencies; // list of dependent nmethods
|
||||
nmethod* _osr_nmethods_head; // Head of list of on-stack replacement nmethods for this class
|
||||
BreakpointInfo* _breakpoints; // bpt lists, managed by Method*
|
||||
// Array of interesting part(s) of the previous version(s) of this
|
||||
// InstanceKlass. See PreviousVersionWalker below.
|
||||
GrowableArray<PreviousVersionNode *>* _previous_versions;
|
||||
// Linked instanceKlasses of previous versions
|
||||
InstanceKlass* _previous_versions;
|
||||
// JVMTI fields can be moved to their own structure - see 6315920
|
||||
// JVMTI: cached class file, before retransformable agent modified it in CFLH
|
||||
JvmtiCachedClassFileData* _cached_class_file;
|
||||
@ -608,19 +607,20 @@ class InstanceKlass: public Klass {
|
||||
}
|
||||
|
||||
// RedefineClasses() support for previous versions:
|
||||
void add_previous_version(instanceKlassHandle ikh, BitMap *emcp_methods,
|
||||
int emcp_method_count);
|
||||
// If the _previous_versions array is non-NULL, then this klass
|
||||
// has been redefined at least once even if we aren't currently
|
||||
// tracking a previous version.
|
||||
bool has_been_redefined() const { return _previous_versions != NULL; }
|
||||
bool has_previous_version() const;
|
||||
void add_previous_version(instanceKlassHandle ikh, int emcp_method_count);
|
||||
|
||||
InstanceKlass* previous_versions() const { return _previous_versions; }
|
||||
|
||||
bool has_been_redefined() const {
|
||||
return (_misc_flags & _misc_has_been_redefined) != 0;
|
||||
}
|
||||
void set_has_been_redefined() {
|
||||
_misc_flags |= _misc_has_been_redefined;
|
||||
}
|
||||
|
||||
void init_previous_versions() {
|
||||
_previous_versions = NULL;
|
||||
}
|
||||
GrowableArray<PreviousVersionNode *>* previous_versions() const {
|
||||
return _previous_versions;
|
||||
}
|
||||
|
||||
static void purge_previous_versions(InstanceKlass* ik);
|
||||
|
||||
@ -1042,6 +1042,10 @@ private:
|
||||
|
||||
// Free CHeap allocated fields.
|
||||
void release_C_heap_structures();
|
||||
|
||||
// RedefineClasses support
|
||||
void link_previous_versions(InstanceKlass* pv) { _previous_versions = pv; }
|
||||
void mark_newly_obsolete_methods(Array<Method*>* old_methods, int emcp_method_count);
|
||||
public:
|
||||
// CDS support - remove and restore oops from metadata. Oops are not shared.
|
||||
virtual void remove_unshareable_info();
|
||||
@ -1141,62 +1145,6 @@ class JNIid: public CHeapObj<mtClass> {
|
||||
};
|
||||
|
||||
|
||||
// If breakpoints are more numerous than just JVMTI breakpoints,
|
||||
// consider compressing this data structure.
|
||||
// It is currently a simple linked list defined in method.hpp.
|
||||
|
||||
class BreakpointInfo;
|
||||
|
||||
|
||||
// A collection point for interesting information about the previous
|
||||
// version(s) of an InstanceKlass. A GrowableArray of PreviousVersionNodes
|
||||
// is attached to the InstanceKlass as needed. See PreviousVersionWalker below.
|
||||
class PreviousVersionNode : public CHeapObj<mtClass> {
|
||||
private:
|
||||
ConstantPool* _prev_constant_pool;
|
||||
|
||||
// If the previous version of the InstanceKlass doesn't have any
|
||||
// EMCP methods, then _prev_EMCP_methods will be NULL. If all the
|
||||
// EMCP methods have been collected, then _prev_EMCP_methods can
|
||||
// have a length of zero.
|
||||
GrowableArray<Method*>* _prev_EMCP_methods;
|
||||
|
||||
public:
|
||||
PreviousVersionNode(ConstantPool* prev_constant_pool,
|
||||
GrowableArray<Method*>* prev_EMCP_methods);
|
||||
~PreviousVersionNode();
|
||||
ConstantPool* prev_constant_pool() const {
|
||||
return _prev_constant_pool;
|
||||
}
|
||||
GrowableArray<Method*>* prev_EMCP_methods() const {
|
||||
return _prev_EMCP_methods;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Helper object for walking previous versions.
|
||||
class PreviousVersionWalker : public StackObj {
|
||||
private:
|
||||
Thread* _thread;
|
||||
GrowableArray<PreviousVersionNode *>* _previous_versions;
|
||||
int _current_index;
|
||||
|
||||
// A pointer to the current node object so we can handle the deletes.
|
||||
PreviousVersionNode* _current_p;
|
||||
|
||||
// The constant pool handle keeps all the methods in this class from being
|
||||
// deallocated from the metaspace during class unloading.
|
||||
constantPoolHandle _current_constant_pool_handle;
|
||||
|
||||
public:
|
||||
PreviousVersionWalker(Thread* thread, InstanceKlass *ik);
|
||||
|
||||
// Return the interesting information for the next previous version
|
||||
// of the klass. Returns NULL if there are no more previous versions.
|
||||
PreviousVersionNode* next_previous_version();
|
||||
};
|
||||
|
||||
|
||||
//
|
||||
// nmethodBucket is used to record dependent nmethods for
|
||||
// deoptimization. nmethod dependencies are actually <klass, method>
|
||||
|
@ -42,7 +42,7 @@
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
|
||||
|
@ -38,7 +38,7 @@
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
|
||||
|
@ -80,7 +80,8 @@ class Method : public Metadata {
|
||||
_caller_sensitive = 1 << 1,
|
||||
_force_inline = 1 << 2,
|
||||
_dont_inline = 1 << 3,
|
||||
_hidden = 1 << 4
|
||||
_hidden = 1 << 4,
|
||||
_running_emcp = 1 << 5
|
||||
};
|
||||
u1 _flags;
|
||||
|
||||
@ -688,6 +689,21 @@ class Method : public Metadata {
|
||||
void set_is_obsolete() { _access_flags.set_is_obsolete(); }
|
||||
bool is_deleted() const { return access_flags().is_deleted(); }
|
||||
void set_is_deleted() { _access_flags.set_is_deleted(); }
|
||||
|
||||
bool is_running_emcp() const {
|
||||
// EMCP methods are old but not obsolete or deleted. Equivalent
|
||||
// Modulo Constant Pool means the method is equivalent except
|
||||
// the constant pool and instructions that access the constant
|
||||
// pool might be different.
|
||||
// If a breakpoint is set in a redefined method, its EMCP methods that are
|
||||
// still running must have a breakpoint also.
|
||||
return (_flags & _running_emcp) != 0;
|
||||
}
|
||||
|
||||
void set_running_emcp(bool x) {
|
||||
_flags = x ? (_flags | _running_emcp) : (_flags & ~_running_emcp);
|
||||
}
|
||||
|
||||
bool on_stack() const { return access_flags().on_stack(); }
|
||||
void set_on_stack(const bool value);
|
||||
|
||||
|
@ -51,7 +51,7 @@
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
|
||||
|
@ -1708,8 +1708,8 @@ const TypeTuple *TypeTuple::LONG_CC_PAIR;
|
||||
// Make a TypeTuple from the range of a method signature
|
||||
const TypeTuple *TypeTuple::make_range(ciSignature* sig) {
|
||||
ciType* return_type = sig->return_type();
|
||||
uint total_fields = TypeFunc::Parms + return_type->size();
|
||||
const Type **field_array = fields(total_fields);
|
||||
uint arg_cnt = return_type->size();
|
||||
const Type **field_array = fields(arg_cnt);
|
||||
switch (return_type->basic_type()) {
|
||||
case T_LONG:
|
||||
field_array[TypeFunc::Parms] = TypeLong::LONG;
|
||||
@ -1734,26 +1734,26 @@ const TypeTuple *TypeTuple::make_range(ciSignature* sig) {
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
return (TypeTuple*)(new TypeTuple(total_fields,field_array))->hashcons();
|
||||
return (TypeTuple*)(new TypeTuple(TypeFunc::Parms + arg_cnt, field_array))->hashcons();
|
||||
}
|
||||
|
||||
// Make a TypeTuple from the domain of a method signature
|
||||
const TypeTuple *TypeTuple::make_domain(ciInstanceKlass* recv, ciSignature* sig) {
|
||||
uint total_fields = TypeFunc::Parms + sig->size();
|
||||
uint arg_cnt = sig->size();
|
||||
|
||||
uint pos = TypeFunc::Parms;
|
||||
const Type **field_array;
|
||||
if (recv != NULL) {
|
||||
total_fields++;
|
||||
field_array = fields(total_fields);
|
||||
arg_cnt++;
|
||||
field_array = fields(arg_cnt);
|
||||
// Use get_const_type here because it respects UseUniqueSubclasses:
|
||||
field_array[pos++] = get_const_type(recv)->join_speculative(TypePtr::NOTNULL);
|
||||
} else {
|
||||
field_array = fields(total_fields);
|
||||
field_array = fields(arg_cnt);
|
||||
}
|
||||
|
||||
int i = 0;
|
||||
while (pos < total_fields) {
|
||||
while (pos < TypeFunc::Parms + arg_cnt) {
|
||||
ciType* type = sig->type_at(i);
|
||||
|
||||
switch (type->basic_type()) {
|
||||
@ -1780,7 +1780,8 @@ const TypeTuple *TypeTuple::make_domain(ciInstanceKlass* recv, ciSignature* sig)
|
||||
}
|
||||
i++;
|
||||
}
|
||||
return (TypeTuple*)(new TypeTuple(total_fields,field_array))->hashcons();
|
||||
|
||||
return (TypeTuple*)(new TypeTuple(TypeFunc::Parms + arg_cnt, field_array))->hashcons();
|
||||
}
|
||||
|
||||
const TypeTuple *TypeTuple::make( uint cnt, const Type **fields ) {
|
||||
@ -1789,6 +1790,7 @@ const TypeTuple *TypeTuple::make( uint cnt, const Type **fields ) {
|
||||
|
||||
//------------------------------fields-----------------------------------------
|
||||
// Subroutine call type with space allocated for argument types
|
||||
// Memory for Control, I_O, Memory, FramePtr, and ReturnAdr is allocated implicitly
|
||||
const Type **TypeTuple::fields( uint arg_cnt ) {
|
||||
const Type **flds = (const Type **)(Compile::current()->type_arena()->Amalloc_4((TypeFunc::Parms+arg_cnt)*sizeof(Type*) ));
|
||||
flds[TypeFunc::Control ] = Type::CONTROL;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -635,6 +635,7 @@ public:
|
||||
static const TypeTuple *make_domain(ciInstanceKlass* recv, ciSignature *sig);
|
||||
|
||||
// Subroutine call type with space allocated for argument types
|
||||
// Memory for Control, I_O, Memory, FramePtr, and ReturnAdr is allocated implicitly
|
||||
static const Type **fields( uint arg_cnt );
|
||||
|
||||
virtual const Type *xmeet( const Type *t ) const;
|
||||
|
@ -282,39 +282,22 @@ address JvmtiBreakpoint::getBcp() {
|
||||
void JvmtiBreakpoint::each_method_version_do(method_action meth_act) {
|
||||
((Method*)_method->*meth_act)(_bci);
|
||||
|
||||
// add/remove breakpoint to/from versions of the method that
|
||||
// are EMCP. Directly or transitively obsolete methods are
|
||||
// not saved in the PreviousVersionNodes.
|
||||
// add/remove breakpoint to/from versions of the method that are EMCP.
|
||||
Thread *thread = Thread::current();
|
||||
instanceKlassHandle ikh = instanceKlassHandle(thread, _method->method_holder());
|
||||
Symbol* m_name = _method->name();
|
||||
Symbol* m_signature = _method->signature();
|
||||
|
||||
// search previous versions if they exist
|
||||
PreviousVersionWalker pvw(thread, (InstanceKlass *)ikh());
|
||||
for (PreviousVersionNode * pv_node = pvw.next_previous_version();
|
||||
pv_node != NULL; pv_node = pvw.next_previous_version()) {
|
||||
GrowableArray<Method*>* methods = pv_node->prev_EMCP_methods();
|
||||
|
||||
if (methods == NULL) {
|
||||
// We have run into a PreviousVersion generation where
|
||||
// all methods were made obsolete during that generation's
|
||||
// RedefineClasses() operation. At the time of that
|
||||
// operation, all EMCP methods were flushed so we don't
|
||||
// have to go back any further.
|
||||
//
|
||||
// A NULL methods array is different than an empty methods
|
||||
// array. We cannot infer any optimizations about older
|
||||
// generations from an empty methods array for the current
|
||||
// generation.
|
||||
break;
|
||||
}
|
||||
for (InstanceKlass* pv_node = ikh->previous_versions();
|
||||
pv_node != NULL;
|
||||
pv_node = pv_node->previous_versions()) {
|
||||
Array<Method*>* methods = pv_node->methods();
|
||||
|
||||
for (int i = methods->length() - 1; i >= 0; i--) {
|
||||
Method* method = methods->at(i);
|
||||
// obsolete methods that are running are not deleted from
|
||||
// previous version array, but they are skipped here.
|
||||
if (!method->is_obsolete() &&
|
||||
// Only set breakpoints in running EMCP methods.
|
||||
if (method->is_running_emcp() &&
|
||||
method->name() == m_name &&
|
||||
method->signature() == m_signature) {
|
||||
RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
|
||||
|
@ -135,7 +135,7 @@ void VM_RedefineClasses::doit() {
|
||||
|
||||
// Mark methods seen on stack and everywhere else so old methods are not
|
||||
// cleaned up if they're on the stack.
|
||||
MetadataOnStackMark md_on_stack;
|
||||
MetadataOnStackMark md_on_stack(true);
|
||||
HandleMark hm(thread); // make sure any handles created are deleted
|
||||
// before the stack walk again.
|
||||
|
||||
@ -2826,11 +2826,10 @@ void VM_RedefineClasses::AdjustCpoolCacheAndVtable::do_klass(Klass* k) {
|
||||
}
|
||||
|
||||
// the previous versions' constant pool caches may need adjustment
|
||||
PreviousVersionWalker pvw(_thread, ik);
|
||||
for (PreviousVersionNode * pv_node = pvw.next_previous_version();
|
||||
pv_node != NULL; pv_node = pvw.next_previous_version()) {
|
||||
other_cp = pv_node->prev_constant_pool();
|
||||
cp_cache = other_cp->cache();
|
||||
for (InstanceKlass* pv_node = ik->previous_versions();
|
||||
pv_node != NULL;
|
||||
pv_node = pv_node->previous_versions()) {
|
||||
cp_cache = pv_node->constants()->cache();
|
||||
if (cp_cache != NULL) {
|
||||
cp_cache->adjust_method_entries(_matching_old_methods,
|
||||
_matching_new_methods,
|
||||
@ -2855,9 +2854,8 @@ void VM_RedefineClasses::update_jmethod_ids() {
|
||||
}
|
||||
}
|
||||
|
||||
void VM_RedefineClasses::check_methods_and_mark_as_obsolete(
|
||||
BitMap *emcp_methods, int * emcp_method_count_p) {
|
||||
*emcp_method_count_p = 0;
|
||||
int VM_RedefineClasses::check_methods_and_mark_as_obsolete() {
|
||||
int emcp_method_count = 0;
|
||||
int obsolete_count = 0;
|
||||
int old_index = 0;
|
||||
for (int j = 0; j < _matching_methods_length; ++j, ++old_index) {
|
||||
@ -2931,9 +2929,9 @@ void VM_RedefineClasses::check_methods_and_mark_as_obsolete(
|
||||
// that we get from effectively overwriting the old methods
|
||||
// when the new methods are attached to the_class.
|
||||
|
||||
// track which methods are EMCP for add_previous_version() call
|
||||
emcp_methods->set_bit(old_index);
|
||||
(*emcp_method_count_p)++;
|
||||
// Count number of methods that are EMCP. The method will be marked
|
||||
// old but not obsolete if it is EMCP.
|
||||
emcp_method_count++;
|
||||
|
||||
// An EMCP method is _not_ obsolete. An obsolete method has a
|
||||
// different jmethodID than the current method. An EMCP method
|
||||
@ -2982,10 +2980,11 @@ void VM_RedefineClasses::check_methods_and_mark_as_obsolete(
|
||||
old_method->name()->as_C_string(),
|
||||
old_method->signature()->as_C_string()));
|
||||
}
|
||||
assert((*emcp_method_count_p + obsolete_count) == _old_methods->length(),
|
||||
assert((emcp_method_count + obsolete_count) == _old_methods->length(),
|
||||
"sanity check");
|
||||
RC_TRACE(0x00000100, ("EMCP_cnt=%d, obsolete_cnt=%d", *emcp_method_count_p,
|
||||
RC_TRACE(0x00000100, ("EMCP_cnt=%d, obsolete_cnt=%d", emcp_method_count,
|
||||
obsolete_count));
|
||||
return emcp_method_count;
|
||||
}
|
||||
|
||||
// This internal class transfers the native function registration from old methods
|
||||
@ -3379,11 +3378,8 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass,
|
||||
old_constants->set_pool_holder(scratch_class());
|
||||
#endif
|
||||
|
||||
// track which methods are EMCP for add_previous_version() call below
|
||||
BitMap emcp_methods(_old_methods->length());
|
||||
int emcp_method_count = 0;
|
||||
emcp_methods.clear(); // clears 0..(length() - 1)
|
||||
check_methods_and_mark_as_obsolete(&emcp_methods, &emcp_method_count);
|
||||
// track number of methods that are EMCP for add_previous_version() call below
|
||||
int emcp_method_count = check_methods_and_mark_as_obsolete();
|
||||
transfer_old_native_function_registrations(the_class);
|
||||
|
||||
// The class file bytes from before any retransformable agents mucked
|
||||
@ -3471,9 +3467,10 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass,
|
||||
scratch_class->enclosing_method_method_index());
|
||||
scratch_class->set_enclosing_method_indices(old_class_idx, old_method_idx);
|
||||
|
||||
the_class->set_has_been_redefined();
|
||||
|
||||
// keep track of previous versions of this class
|
||||
the_class->add_previous_version(scratch_class, &emcp_methods,
|
||||
emcp_method_count);
|
||||
the_class->add_previous_version(scratch_class, emcp_method_count);
|
||||
|
||||
RC_TIMER_STOP(_timer_rsc_phase1);
|
||||
RC_TIMER_START(_timer_rsc_phase2);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -403,14 +403,9 @@ class VM_RedefineClasses: public VM_Operation {
|
||||
// Change jmethodIDs to point to the new methods
|
||||
void update_jmethod_ids();
|
||||
|
||||
// In addition to marking methods as obsolete, this routine
|
||||
// records which methods are EMCP (Equivalent Module Constant
|
||||
// Pool) in the emcp_methods BitMap and returns the number of
|
||||
// EMCP methods via emcp_method_count_p. This information is
|
||||
// used when information about the previous version of the_class
|
||||
// is squirreled away.
|
||||
void check_methods_and_mark_as_obsolete(BitMap *emcp_methods,
|
||||
int * emcp_method_count_p);
|
||||
// In addition to marking methods as old and/or obsolete, this routine
|
||||
// counts the number of methods that are EMCP (Equivalent Module Constant Pool).
|
||||
int check_methods_and_mark_as_obsolete();
|
||||
void transfer_old_native_function_registrations(instanceKlassHandle the_class);
|
||||
|
||||
// Install the redefinition of a class
|
||||
|
@ -557,6 +557,16 @@ class os: AllStatic {
|
||||
// Unload library
|
||||
static void dll_unload(void *lib);
|
||||
|
||||
// Callback for loaded module information
|
||||
// Input parameters:
|
||||
// char* module_file_name,
|
||||
// address module_base_addr,
|
||||
// address module_top_addr,
|
||||
// void* param
|
||||
typedef int (*LoadedModulesCallbackFunc)(const char *, address, address, void *);
|
||||
|
||||
static int get_loaded_modules_info(LoadedModulesCallbackFunc callback, void *param);
|
||||
|
||||
// Return the handle of this process
|
||||
static void* get_default_process_handle();
|
||||
|
||||
|
@ -35,7 +35,9 @@ import java.net.URLConnection;
|
||||
* @summary "Tests unloading of anonymous classes."
|
||||
* @library /testlibrary /testlibrary/whitebox
|
||||
* @compile TestAnonymousClassUnloading.java
|
||||
* @run main ClassFileInstaller TestAnonymousClassUnloading sun.hotspot.WhiteBox
|
||||
* @run main ClassFileInstaller TestAnonymousClassUnloading
|
||||
* sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-BackgroundCompilation TestAnonymousClassUnloading
|
||||
*/
|
||||
public class TestAnonymousClassUnloading {
|
||||
|
@ -36,7 +36,7 @@ import java.net.URLClassLoader;
|
||||
* @build WorkerClass
|
||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-BackgroundCompilation -XX:-UseCompressedOops -XX:+UseParallelGC -XX:CompileOnly=TestMethodUnloading::doWork TestMethodUnloading
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-BackgroundCompilation -XX:-UseCompressedOops -XX:CompileOnly=TestMethodUnloading::doWork TestMethodUnloading
|
||||
*/
|
||||
public class TestMethodUnloading {
|
||||
private static final String workerClassName = "WorkerClass";
|
||||
|
@ -54,16 +54,19 @@ public class TestPrintPreciseRTMLockingStatisticsOptionOnSupportedConfig
|
||||
CommandLineOptionTest.verifyOptionValueForSameVM(optionName,
|
||||
TestPrintPreciseRTMLockingStatisticsBase.DEFAULT_VALUE,
|
||||
CommandLineOptionTest.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
|
||||
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
|
||||
"-XX:+UseRTMLocking");
|
||||
|
||||
CommandLineOptionTest.verifyOptionValueForSameVM(optionName,
|
||||
TestPrintPreciseRTMLockingStatisticsBase.DEFAULT_VALUE,
|
||||
CommandLineOptionTest.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
|
||||
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
|
||||
"-XX:-UseRTMLocking", prepareOptionValue("true"));
|
||||
|
||||
// verify that option could be turned on
|
||||
CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "true",
|
||||
CommandLineOptionTest.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
|
||||
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
|
||||
"-XX:+UseRTMLocking", prepareOptionValue("true"));
|
||||
}
|
||||
|
||||
|
@ -63,13 +63,16 @@ public class TestUseRTMDeoptOptionOnSupportedConfig
|
||||
// verify default value
|
||||
CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMDeopt",
|
||||
TestUseRTMDeoptOptionOnSupportedConfig.DEFAULT_VALUE,
|
||||
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
|
||||
"-XX:+UseRTMLocking");
|
||||
// verify that option is off when UseRTMLocking is off
|
||||
CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMDeopt",
|
||||
"false", "-XX:-UseRTMLocking", "-XX:+UseRTMDeopt");
|
||||
CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMDeopt", "false",
|
||||
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
|
||||
"-XX:-UseRTMLocking", "-XX:+UseRTMDeopt");
|
||||
// verify that option could be turned on
|
||||
CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMDeopt",
|
||||
"true", "-XX:+UseRTMLocking", "-XX:+UseRTMDeopt");
|
||||
CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMDeopt", "true",
|
||||
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
|
||||
"-XX:+UseRTMLocking", "-XX:+UseRTMDeopt");
|
||||
}
|
||||
|
||||
public static void main(String args[]) throws Throwable {
|
||||
|
@ -59,24 +59,31 @@ public class TestUseRTMLockingOptionOnSupportedConfig
|
||||
new String[]{
|
||||
RTMGenericCommandLineOptionTest.RTM_INSTR_ERROR,
|
||||
unrecongnizedOption
|
||||
}, ExitCode.OK, "-XX:+UseRTMLocking"
|
||||
}, ExitCode.OK,
|
||||
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
|
||||
"-XX:+UseRTMLocking"
|
||||
);
|
||||
|
||||
CommandLineOptionTest.verifySameJVMStartup(null,
|
||||
new String[]{
|
||||
RTMGenericCommandLineOptionTest.RTM_INSTR_ERROR,
|
||||
unrecongnizedOption
|
||||
}, ExitCode.OK, "-XX:-UseRTMLocking"
|
||||
}, ExitCode.OK,
|
||||
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
|
||||
"-XX:-UseRTMLocking"
|
||||
);
|
||||
// verify that UseRTMLocking is of by default
|
||||
CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMLocking",
|
||||
TestUseRTMLockingOptionOnSupportedConfig.DEFAULT_VALUE);
|
||||
TestUseRTMLockingOptionOnSupportedConfig.DEFAULT_VALUE,
|
||||
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS);
|
||||
// verify that we can change UseRTMLocking value
|
||||
CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMLocking",
|
||||
TestUseRTMLockingOptionOnSupportedConfig.DEFAULT_VALUE,
|
||||
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
|
||||
"-XX:-UseRTMLocking");
|
||||
CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMLocking",
|
||||
"true", "-XX:+UseRTMLocking");
|
||||
"true", CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
|
||||
"-XX:+UseRTMLocking");
|
||||
}
|
||||
|
||||
public static void main(String args[]) throws Throwable {
|
||||
|
@ -54,18 +54,22 @@ public class TestUseRTMLockingOptionWithBiasedLocking
|
||||
// verify that we will not get a warning
|
||||
CommandLineOptionTest.verifySameJVMStartup(null,
|
||||
new String[] { warningMessage }, ExitCode.OK,
|
||||
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
|
||||
"-XX:+UseRTMLocking", "-XX:-UseBiasedLocking");
|
||||
// verify that we will get a warning
|
||||
CommandLineOptionTest.verifySameJVMStartup(
|
||||
new String[] { warningMessage }, null, ExitCode.OK,
|
||||
CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
|
||||
"-XX:+UseRTMLocking", "-XX:+UseBiasedLocking");
|
||||
// verify that UseBiasedLocking is false when we use rtm locking
|
||||
CommandLineOptionTest.verifyOptionValueForSameVM("UseBiasedLocking",
|
||||
"false", "-XX:+UseRTMLocking");
|
||||
"false", CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
|
||||
"-XX:+UseRTMLocking");
|
||||
// verify that we can't turn on biased locking when
|
||||
// using rtm locking
|
||||
CommandLineOptionTest.verifyOptionValueForSameVM("UseBiasedLocking",
|
||||
"false", "-XX:+UseRTMLocking", "-XX:+UseBiasedLocking");
|
||||
"false", CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
|
||||
"-XX:+UseRTMLocking", "-XX:+UseBiasedLocking");
|
||||
}
|
||||
|
||||
public static void main(String args[]) throws Throwable {
|
||||
|
@ -22,9 +22,8 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @ignore 8041506, 8041946, 8042051
|
||||
* @test TestHumongousShrinkHeap
|
||||
* @bug 8036025
|
||||
* @bug 8036025 8056043
|
||||
* @summary Verify that heap shrinks after GC in the presence of fragmentation due to humongous objects
|
||||
* @library /testlibrary
|
||||
* @run main/othervm -XX:MinHeapFreeRatio=10 -XX:MaxHeapFreeRatio=50 -XX:+UseG1GC -XX:G1HeapRegionSize=1M -verbose:gc TestHumongousShrinkHeap
|
||||
|
@ -129,8 +129,19 @@ class TestStringDeduplicationTools {
|
||||
return list;
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies that the given list contains expected number of unique strings.
|
||||
* It's possible that deduplication hasn't completed yet, so the method
|
||||
* will perform several attempts to check with a little pause between.
|
||||
* The method throws RuntimeException to signal that verification failed.
|
||||
*
|
||||
* @param list strings to check
|
||||
* @param uniqueExpected expected number of unique strings
|
||||
* @throws RuntimeException if check fails
|
||||
*/
|
||||
private static void verifyStrings(ArrayList<String> list, int uniqueExpected) {
|
||||
for (;;) {
|
||||
boolean passed = false;
|
||||
for (int attempts = 0; attempts < 10; attempts++) {
|
||||
// Check number of deduplicated strings
|
||||
ArrayList<Object> unique = new ArrayList<Object>(uniqueExpected);
|
||||
for (String string: list) {
|
||||
@ -153,11 +164,11 @@ class TestStringDeduplicationTools {
|
||||
", uniqueExpected=" + uniqueExpected);
|
||||
|
||||
if (unique.size() == uniqueExpected) {
|
||||
System.out.println("Deduplication completed");
|
||||
System.out.println("Deduplication completed (as fast as " + attempts + " iterations)");
|
||||
passed = true;
|
||||
break;
|
||||
} else {
|
||||
System.out.println("Deduplication not completed, waiting...");
|
||||
|
||||
// Give the deduplication thread time to complete
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
@ -166,6 +177,9 @@ class TestStringDeduplicationTools {
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!passed) {
|
||||
throw new RuntimeException("String verification failed");
|
||||
}
|
||||
}
|
||||
|
||||
private static OutputAnalyzer runTest(String... extraArgs) throws Exception {
|
||||
@ -247,14 +261,20 @@ class TestStringDeduplicationTools {
|
||||
forceDeduplication(ageThreshold, FullGC);
|
||||
|
||||
// Wait for deduplication to occur
|
||||
while (getValue(dupString1) != getValue(baseString)) {
|
||||
for (int attempts = 0; attempts < 10; attempts++) {
|
||||
if (getValue(dupString1) == getValue(baseString)) {
|
||||
break;
|
||||
}
|
||||
System.out.println("Waiting...");
|
||||
try {
|
||||
Thread.sleep(100);
|
||||
Thread.sleep(1000);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
if (getValue(dupString1) != getValue(baseString)) {
|
||||
throw new RuntimeException("Deduplication has not occurred");
|
||||
}
|
||||
|
||||
// Create a new duplicate of baseString
|
||||
StringBuilder sb2 = new StringBuilder(baseString);
|
||||
|
69
hotspot/test/gc/whitebox/TestWBGC.java
Normal file
69
hotspot/test/gc/whitebox/TestWBGC.java
Normal file
@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test TestWBGC
|
||||
* @bug 8055098
|
||||
* @summary Test verify that WB methods isObjectInOldGen and youngGC works correctly.
|
||||
* @library /testlibrary /testlibrary/whitebox
|
||||
* @build TestWBGC
|
||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* @run driver TestWBGC
|
||||
*/
|
||||
import com.oracle.java.testlibrary.*;
|
||||
import sun.hotspot.WhiteBox;
|
||||
|
||||
public class TestWBGC {
|
||||
|
||||
public static void main(String args[]) throws Exception {
|
||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
|
||||
true,
|
||||
"-Xbootclasspath/a:.",
|
||||
"-XX:+UnlockDiagnosticVMOptions",
|
||||
"-XX:+WhiteBoxAPI",
|
||||
"-XX:MaxTenuringThreshold=1",
|
||||
"-XX:+PrintGC",
|
||||
GCYoungTest.class.getName());
|
||||
|
||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||
System.out.println(output.getStdout());
|
||||
output.shouldHaveExitValue(0);
|
||||
output.shouldContain("WhiteBox Initiated Young GC");
|
||||
output.shouldNotContain("Full");
|
||||
// To be sure that we don't provoke Full GC additionaly to young
|
||||
}
|
||||
|
||||
public static class GCYoungTest {
|
||||
static WhiteBox wb = WhiteBox.getWhiteBox();
|
||||
public static Object obj;
|
||||
|
||||
public static void main(String args[]) {
|
||||
obj = new Object();
|
||||
Asserts.assertFalse(wb.isObjectInOldGen(obj));
|
||||
wb.youngGC();
|
||||
wb.youngGC();
|
||||
// 2 young GC is needed to promote object into OldGen
|
||||
Asserts.assertTrue(wb.isObjectInOldGen(obj));
|
||||
}
|
||||
}
|
||||
}
|
143
hotspot/test/runtime/RedefineTests/RedefineRunningMethods.java
Normal file
143
hotspot/test/runtime/RedefineTests/RedefineRunningMethods.java
Normal file
@ -0,0 +1,143 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8055008
|
||||
* @summary Redefine EMCP and non-EMCP methods that are running in an infinite loop
|
||||
* @library /testlibrary
|
||||
* @build RedefineClassHelper
|
||||
* @run main RedefineClassHelper
|
||||
* @run main/othervm -javaagent:redefineagent.jar -XX:TraceRedefineClasses=0x600 RedefineRunningMethods
|
||||
*/
|
||||
public class RedefineRunningMethods {
|
||||
|
||||
public static String newB =
|
||||
"class RedefineRunningMethods$B {" +
|
||||
" static int count1 = 0;" +
|
||||
" static int count2 = 0;" +
|
||||
" public static volatile boolean stop = false;" +
|
||||
" static void localSleep() { " +
|
||||
" try{ " +
|
||||
" Thread.currentThread().sleep(10);" +
|
||||
" } catch(InterruptedException ie) { " +
|
||||
" } " +
|
||||
" } " +
|
||||
" public static void infinite() { " +
|
||||
" System.out.println(\"infinite called\");" +
|
||||
" }" +
|
||||
" public static void infinite_emcp() { " +
|
||||
" while (!stop) { count2++; localSleep(); }" +
|
||||
" }" +
|
||||
"}";
|
||||
|
||||
public static String evenNewerB =
|
||||
"class RedefineRunningMethods$B {" +
|
||||
" static int count1 = 0;" +
|
||||
" static int count2 = 0;" +
|
||||
" public static volatile boolean stop = false;" +
|
||||
" static void localSleep() { " +
|
||||
" try{ " +
|
||||
" Thread.currentThread().sleep(1);" +
|
||||
" } catch(InterruptedException ie) { " +
|
||||
" } " +
|
||||
" } " +
|
||||
" public static void infinite() { }" +
|
||||
" public static void infinite_emcp() { " +
|
||||
" System.out.println(\"infinite_emcp now obsolete called\");" +
|
||||
" }" +
|
||||
"}";
|
||||
|
||||
static class B {
|
||||
static int count1 = 0;
|
||||
static int count2 = 0;
|
||||
public static volatile boolean stop = false;
|
||||
static void localSleep() {
|
||||
try{
|
||||
Thread.currentThread().sleep(10);//sleep for 10 ms
|
||||
} catch(InterruptedException ie) {
|
||||
}
|
||||
}
|
||||
|
||||
public static void infinite() {
|
||||
while (!stop) { count1++; localSleep(); }
|
||||
}
|
||||
public static void infinite_emcp() {
|
||||
while (!stop) { count2++; localSleep(); }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
||||
new Thread() {
|
||||
public void run() {
|
||||
B.infinite();
|
||||
}
|
||||
}.start();
|
||||
|
||||
new Thread() {
|
||||
public void run() {
|
||||
B.infinite_emcp();
|
||||
}
|
||||
}.start();
|
||||
|
||||
RedefineClassHelper.redefineClass(B.class, newB);
|
||||
|
||||
System.gc();
|
||||
|
||||
B.infinite();
|
||||
|
||||
// Start a thread with the second version of infinite_emcp running
|
||||
new Thread() {
|
||||
public void run() {
|
||||
B.infinite_emcp();
|
||||
}
|
||||
}.start();
|
||||
|
||||
for (int i = 0; i < 20 ; i++) {
|
||||
String s = new String("some garbage");
|
||||
System.gc();
|
||||
}
|
||||
|
||||
RedefineClassHelper.redefineClass(B.class, evenNewerB);
|
||||
System.gc();
|
||||
|
||||
for (int i = 0; i < 20 ; i++) {
|
||||
B.infinite();
|
||||
String s = new String("some garbage");
|
||||
System.gc();
|
||||
}
|
||||
|
||||
B.infinite_emcp();
|
||||
|
||||
// purge should clean everything up.
|
||||
B.stop = true;
|
||||
|
||||
for (int i = 0; i < 20 ; i++) {
|
||||
B.infinite();
|
||||
String s = new String("some garbage");
|
||||
System.gc();
|
||||
}
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@ public class Platform {
|
||||
private static final String osName = System.getProperty("os.name");
|
||||
private static final String dataModel = System.getProperty("sun.arch.data.model");
|
||||
private static final String vmVersion = System.getProperty("java.vm.version");
|
||||
private static final String javaVersion = System.getProperty("java.version");
|
||||
private static final String osArch = System.getProperty("os.arch");
|
||||
private static final String vmName = System.getProperty("java.vm.name");
|
||||
|
||||
@ -83,7 +84,8 @@ public class Platform {
|
||||
}
|
||||
|
||||
public static boolean isDebugBuild() {
|
||||
return vmVersion.toLowerCase().contains("debug");
|
||||
return (vmVersion.toLowerCase().contains("debug") ||
|
||||
javaVersion.toLowerCase().contains("debug"));
|
||||
}
|
||||
|
||||
public static String getVMVersion() {
|
||||
|
Loading…
Reference in New Issue
Block a user