This commit is contained in:
J. Duke 2017-07-05 19:50:57 +02:00
commit 73b6b2c757
672 changed files with 15190 additions and 10012 deletions
.hgtags-top-repo
common/autoconf
hotspot
.hgtags
agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1
make
src
cpu
os
share/vm

@ -265,3 +265,4 @@ cf22a728521f91a4692b433d39d730a0a1b23155 jdk9-b16
ee4fd72b2ec3d92497f37163352f294aa695c6fb jdk9-b20
9052803f4d01feda28b3d65f2b64dd457d21c7b6 jdk9-b21
8e4bdab4c362aadde2d321f968cd503a2f779e2f jdk9-b22
88567461a2cd9b7fb431fee6440005a694df1f47 jdk9-b23

@ -407,11 +407,7 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
C_O_FLAG_HI="-O3"
C_O_FLAG_NORM="-O2"
fi
if test "x$HAS_CFLAG_OPTIMIZE_DEBUG" = "xtrue"; then
C_O_FLAG_DEBUG="$CFLAG_OPTIMIZE_DEBUG_FLAG"
else
C_O_FLAG_DEBUG="-O0"
fi
C_O_FLAG_DEBUG="-O0"
C_O_FLAG_NONE="-O0"
elif test "x$TOOLCHAIN_TYPE" = xclang; then
if test "x$OPENJDK_TARGET_OS" = xmacosx; then

@ -4311,7 +4311,7 @@ TOOLCHAIN_DESCRIPTION_xlc="IBM XL C/C++"
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1403557683
DATE_WHEN_GENERATED=1404942241
###############################################################################
#
@ -42011,11 +42011,7 @@ $as_echo "$ac_cv_c_bigendian" >&6; }
C_O_FLAG_HI="-O3"
C_O_FLAG_NORM="-O2"
fi
if test "x$HAS_CFLAG_OPTIMIZE_DEBUG" = "xtrue"; then
C_O_FLAG_DEBUG="$CFLAG_OPTIMIZE_DEBUG_FLAG"
else
C_O_FLAG_DEBUG="-O0"
fi
C_O_FLAG_DEBUG="-O0"
C_O_FLAG_NONE="-O0"
elif test "x$TOOLCHAIN_TYPE" = xclang; then
if test "x$OPENJDK_TARGET_OS" = xmacosx; then

@ -425,3 +425,4 @@ d4cffb3ae6213c66c7522ebffe0349360a45f0ef jdk9-b19
c1af79d122ec9f715fa29312b5e91763f3a4dfc4 jdk9-b20
17b4a5e831b398738feedb0afe75245744510153 jdk9-b21
518d1fcc0799494f013e00e0a94a91b6f212d54f jdk9-b22
dd472cdacc32e3afc7c5bfa7ef16ea0e0befb7fa jdk9-b23

@ -24,23 +24,26 @@
package sun.jvm.hotspot.gc_implementation.g1;
import java.util.ArrayList;
import java.util.List;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.memory.ContiguousSpace;
import sun.jvm.hotspot.memory.CompactibleSpace;
import sun.jvm.hotspot.memory.MemRegion;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.types.AddressField;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegion. Currently we don't actually include
// any of its fields but only iterate over it (which we get "for free"
// as HeapRegion ultimately inherits from ContiguousSpace).
// any of its fields but only iterate over it.
public class HeapRegion extends ContiguousSpace {
public class HeapRegion extends CompactibleSpace {
// static int GrainBytes;
static private CIntegerField grainBytesField;
static private AddressField topField;
static {
VM.registerVMInitializedObserver(new Observer() {
@ -54,6 +57,8 @@ public class HeapRegion extends ContiguousSpace {
Type type = db.lookupType("HeapRegion");
grainBytesField = type.getCIntegerField("GrainBytes");
topField = type.getAddressField("_top");
}
static public long grainBytes() {
@ -63,4 +68,25 @@ public class HeapRegion extends ContiguousSpace {
public HeapRegion(Address addr) {
super(addr);
}
public Address top() {
return topField.getValue(addr);
}
@Override
public List getLiveRegions() {
List res = new ArrayList();
res.add(new MemRegion(bottom(), top()));
return res;
}
@Override
public long used() {
return top().minus(bottom());
}
@Override
public long free() {
return end().minus(top());
}
}

@ -280,16 +280,7 @@ endif
# optimization control flags (Used by fastdebug and release variants)
OPT_CFLAGS/NOOPT=-O0
ifeq ($(USE_CLANG), true)
# Clang does not support -Og
OPT_CFLAGS/DEBUG=-O0
else ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
# Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
OPT_CFLAGS/DEBUG=-Og
else
# Allow no optimizations.
OPT_CFLAGS/DEBUG=-O0
endif
OPT_CFLAGS/DEBUG=-O0
OPT_CFLAGS/SIZE=-Os
OPT_CFLAGS/SPEED=-O3
@ -457,16 +448,8 @@ ifeq ($(USE_CLANG), true)
CFLAGS += -flimit-debug-info
endif
ifeq ($(USE_CLANG), true)
# Clang does not support -Og
DEBUG_CFLAGS=-O0
else ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
# Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
DEBUG_CFLAGS=-Og
else
# Allow no optimizations.
DEBUG_CFLAGS=-O0
endif
# Allow no optimizations.
DEBUG_CFLAGS=-O0
# DEBUG_BINARIES uses full -g debug information for all configs
ifeq ($(DEBUG_BINARIES), true)

@ -93,6 +93,7 @@ ifeq ($(INCLUDE_ALL_GCS), false)
ageTable.cpp \
collectorCounters.cpp \
cSpaceCounters.cpp \
gcId.cpp \
gcPolicyCounters.cpp \
gcStats.cpp \
gcTimer.cpp \

@ -1,5 +1,5 @@
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -67,8 +67,12 @@ ifndef CC_INTERP
endif
endif
# C1 is not ported on ppc64, so we cannot build a tiered VM:
ifeq ($(ARCH),ppc64)
FORCE_TIERED=0
# Notice: after 8046471 ARCH will be 'ppc' for top-level ppc64 builds but
# 'ppc64' for HotSpot-only ppc64 builds. Need to detect both variants here!
ifneq (,$(findstring $(ARCH), ppc ppc64))
ifeq ($(ARCH_DATA_MODEL), 64)
FORCE_TIERED=0
endif
endif
ifdef LP64

@ -1,5 +1,5 @@
#
# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -108,7 +108,9 @@ ifeq ($(ARCH), arm)
endif
# PPC
ifneq (,$(findstring $(ARCH), ppc))
# Notice: after 8046471 ARCH will be 'ppc' for top-level ppc64 builds but
# 'ppc64' for HotSpot-only ppc64 builds. Need to detect both variants here!
ifneq (,$(findstring $(ARCH), ppc ppc64))
ifeq ($(ARCH_DATA_MODEL), 64)
MAKE_ARGS += LP64=1
PLATFORM = linux-ppc64

@ -40,7 +40,14 @@ else
ifneq ($(ALT_SDT_H),)
SDT_H_FILE = $(ALT_SDT_H)
else
SDT_H_FILE = /usr/include/sys/sdt.h
ifeq ($(USE_CLANG), true)
# Clang doesn't support the -print-sysroot option and there is no known equivalent
# option, so fall back to using / as sysroot
SDT_SYSROOT=
else
SDT_SYSROOT=$(shell $(CXX) -print-sysroot)
endif
SDT_H_FILE = $(SDT_SYSROOT)/usr/include/sys/sdt.h
endif
DTRACE_ENABLED = $(shell test -f $(SDT_H_FILE) && echo $(SDT_H_FILE))

@ -231,13 +231,7 @@ CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))
# optimization control flags (Used by fastdebug and release variants)
OPT_CFLAGS/NOOPT=-O0
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
# Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
OPT_CFLAGS/DEBUG=-Og
else
# Allow no optimizations.
OPT_CFLAGS/DEBUG=-O0
endif
OPT_CFLAGS/DEBUG=-O0
OPT_CFLAGS/SIZE=-Os
OPT_CFLAGS/SPEED=-O3
@ -344,13 +338,8 @@ ifeq ($(USE_CLANG), true)
CFLAGS += -flimit-debug-info
endif
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
# Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
DEBUG_CFLAGS=-Og
else
# Allow no optimizations.
DEBUG_CFLAGS=-O0
endif
# Allow no optimizations.
DEBUG_CFLAGS=-O0
# DEBUG_BINARIES uses full -g debug information for all configs
ifeq ($(DEBUG_BINARIES), true)

@ -127,13 +127,7 @@ CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))
# optimization control flags (Used by fastdebug and release variants)
OPT_CFLAGS/NOOPT=-O0
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
# Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
OPT_CFLAGS/DEBUG=-Og
+else
# Allow no optimizations.
OPT_CFLAGS/DEBUG=-O0
endif
OPT_CFLAGS/DEBUG=-O0
OPT_CFLAGS/SIZE=-Os
OPT_CFLAGS/SPEED=-O3
@ -229,14 +223,8 @@ SHARED_FLAG = -shared
#------------------------------------------------------------------------
# Debug flags
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1"
# Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination)
DEBUG_CFLAGS=-Og
else
# Allow no optimizations.
DEBUG_CFLAGS=-O0
endif
# Allow no optimizations.
DEBUG_CFLAGS=-O0
# Use the stabs format for debugging information (this is the default
# on gcc-2.91). It's good enough, has all the information about line

@ -32,12 +32,6 @@
#include "runtime/stubCodeGenerator.hpp"
#include "utilities/defaultStream.hpp"
#include "vm_version_ppc.hpp"
#ifdef TARGET_OS_FAMILY_aix
# include "os_aix.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
# include <sys/sysinfo.h>

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@
#include "runtime/biasedLocking.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"

@ -28,12 +28,6 @@
#include "runtime/java.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "vm_version_sparc.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
int VM_Version::_features = VM_Version::unknown_m;
const char* VM_Version::_features_str = "";

@ -33,7 +33,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/monitorChunk.hpp"
#include "runtime/os.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"

@ -29,18 +29,6 @@
#include "runtime/java.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "vm_version_x86.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
int VM_Version::_cpu;

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2009 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -29,11 +29,5 @@
#include "runtime/java.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "vm_version_zero.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
// This file is intentionally empty

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/os.hpp"
#include "runtime/os.inline.hpp"
#include "services/attachListener.hpp"
#include "services/dtraceAttacher.hpp"

@ -42,6 +42,7 @@
#include "memory/filemap.hpp"
#include "mutex_aix.inline.hpp"
#include "oops/oop.inline.hpp"
#include "os_aix.inline.hpp"
#include "os_share_aix.hpp"
#include "porting_aix.hpp"
#include "prims/jniFastGetField.hpp"
@ -2807,12 +2808,10 @@ bool os::dont_yield() {
return DontYieldALot;
}
void os::yield() {
void os::naked_yield() {
sched_yield();
}
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
////////////////////////////////////////////////////////////////////////////////
// thread priority support
@ -3069,7 +3068,7 @@ static bool do_suspend(OSThread* osthread) {
for (int n = 0; !osthread->sr.is_suspended(); n++) {
for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
os::yield();
os::naked_yield();
}
// timeout, try to cancel the request
@ -3103,7 +3102,7 @@ static void do_resume(OSThread* osthread) {
if (sr_notify(osthread) == 0) {
for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
os::yield();
os::naked_yield();
}
}
} else {

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,8 +26,6 @@
#ifndef OS_AIX_VM_OS_AIX_INLINE_HPP
#define OS_AIX_VM_OS_AIX_INLINE_HPP
#include "runtime/atomic.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.hpp"
// System includes
@ -45,18 +43,6 @@ inline void* os::thread_local_storage_at(int index) {
return pthread_getspecific((pthread_key_t)index);
}
inline const char* os::file_separator() {
return "/";
}
inline const char* os::line_separator() {
return "\n";
}
inline const char* os::path_separator() {
return ":";
}
// File names are case-sensitive on windows only
inline int os::file_name_strcmp(const char* s1, const char* s2) {
return strcmp(s1, s2);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/os.hpp"
#include "runtime/os.inline.hpp"
#include "services/attachListener.hpp"
#include "services/dtraceAttacher.hpp"

@ -36,6 +36,7 @@
#include "memory/filemap.hpp"
#include "mutex_bsd.inline.hpp"
#include "oops/oop.inline.hpp"
#include "os_bsd.inline.hpp"
#include "os_share_bsd.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"
@ -1171,10 +1172,6 @@ void os::die() {
::abort();
}
// unused on bsd for now.
void os::set_error_file(const char *logfile) {}
// This method is a copy of JDK's sysGetLastErrorString
// from src/solaris/hpi/src/system_md.c
@ -1831,6 +1828,7 @@ void os::jvm_path(char *buf, jint buflen) {
// determine if this is a legacy image or modules image
// modules image doesn't have "jre" subdirectory
len = strlen(buf);
assert(len < buflen, "Ran out of buffer space");
jrelib_p = buf + len;
// Add the appropriate library subdir
@ -1864,7 +1862,7 @@ void os::jvm_path(char *buf, jint buflen) {
}
}
strcpy(saved_jvm_path, buf);
strncpy(saved_jvm_path, buf, MAXPATHLEN);
}
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
@ -2595,12 +2593,10 @@ bool os::dont_yield() {
return DontYieldALot;
}
void os::yield() {
void os::naked_yield() {
sched_yield();
}
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
////////////////////////////////////////////////////////////////////////////////
// thread priority support
@ -4217,22 +4213,12 @@ static struct timespec* compute_abstime(struct timespec* abstime, jlong millis)
return abstime;
}
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
// Conceptually TryPark() should be equivalent to park(0).
int os::PlatformEvent::TryPark() {
for (;;) {
const int v = _Event;
guarantee((v == 0) || (v == 1), "invariant");
if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
}
}
void os::PlatformEvent::park() { // AKA "down()"
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
// TODO: assert that _Assoc != NULL or _Assoc == Self
assert(_nParked == 0, "invariant");
int v;
for (;;) {
v = _Event;
@ -4332,8 +4318,7 @@ void os::PlatformEvent::unpark() {
// 1 :=> 1
// -1 :=> either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
// unpark() calls.
// 0 or 1.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
@ -4540,10 +4525,9 @@ void Parker::park(bool isAbsolute, jlong time) {
}
void Parker::unpark() {
int s, status;
status = pthread_mutex_lock(_mutex);
int status = pthread_mutex_lock(_mutex);
assert(status == 0, "invariant");
s = _counter;
const int s = _counter;
_counter = 1;
if (s < 1) {
if (WorkAroundNPTLTimedWaitHang) {

@ -219,7 +219,6 @@ class PlatformEvent : public CHeapObj<mtInternal> {
int fired() { return _Event; }
void park();
void unpark();
int TryPark();
int park(jlong millis);
void SetAssociation(Thread * a) { _Assoc = a; }
};

@ -25,8 +25,6 @@
#ifndef OS_BSD_VM_OS_BSD_INLINE_HPP
#define OS_BSD_VM_OS_BSD_INLINE_HPP
#include "runtime/atomic.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.hpp"
// System includes
@ -40,18 +38,6 @@ inline void* os::thread_local_storage_at(int index) {
return pthread_getspecific((pthread_key_t)index);
}
inline const char* os::file_separator() {
return "/";
}
inline const char* os::line_separator() {
return "\n";
}
inline const char* os::path_separator() {
return ":";
}
// File names are case-sensitive on windows only
inline int os::file_name_strcmp(const char* s1, const char* s2) {
return strcmp(s1, s2);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/os.hpp"
#include "runtime/os.inline.hpp"
#include "services/attachListener.hpp"
#include "services/dtraceAttacher.hpp"

@ -36,6 +36,7 @@
#include "memory/filemap.hpp"
#include "mutex_linux.inline.hpp"
#include "oops/oop.inline.hpp"
#include "os_linux.inline.hpp"
#include "os_share_linux.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"
@ -1552,9 +1553,6 @@ void os::die() {
::abort();
}
// unused on linux for now.
void os::set_error_file(const char *logfile) {}
// This method is a copy of JDK's sysGetLastErrorString
// from src/solaris/hpi/src/system_md.c
@ -2344,6 +2342,7 @@ void os::jvm_path(char *buf, jint buflen) {
// determine if this is a legacy image or modules image
// modules image doesn't have "jre" subdirectory
len = strlen(buf);
assert(len < buflen, "Ran out of buffer room");
jrelib_p = buf + len;
snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
if (0 != access(buf, F_OK)) {
@ -2364,7 +2363,7 @@ void os::jvm_path(char *buf, jint buflen) {
}
}
strcpy(saved_jvm_path, buf);
strncpy(saved_jvm_path, buf, MAXPATHLEN);
}
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
@ -3790,12 +3789,10 @@ bool os::dont_yield() {
return DontYieldALot;
}
void os::yield() {
void os::naked_yield() {
sched_yield();
}
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
////////////////////////////////////////////////////////////////////////////////
// thread priority support
@ -5456,22 +5453,12 @@ static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
return abstime;
}
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
// Conceptually TryPark() should be equivalent to park(0).
int os::PlatformEvent::TryPark() {
for (;;) {
const int v = _Event;
guarantee((v == 0) || (v == 1), "invariant");
if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
}
}
void os::PlatformEvent::park() { // AKA "down()"
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
// TODO: assert that _Assoc != NULL or _Assoc == Self
assert(_nParked == 0, "invariant");
int v;
for (;;) {
v = _Event;
@ -5571,8 +5558,7 @@ void os::PlatformEvent::unpark() {
// 1 :=> 1
// -1 :=> either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
// unpark() calls.
// 0 or 1.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
@ -5800,10 +5786,9 @@ void Parker::park(bool isAbsolute, jlong time) {
}
void Parker::unpark() {
int s, status;
status = pthread_mutex_lock(_mutex);
int status = pthread_mutex_lock(_mutex);
assert(status == 0, "invariant");
s = _counter;
const int s = _counter;
_counter = 1;
if (s < 1) {
// thread might be parked

@ -315,7 +315,6 @@ class PlatformEvent : public CHeapObj<mtInternal> {
int fired() { return _Event; }
void park();
void unpark();
int TryPark();
int park(jlong millis); // relative timed-wait only
void SetAssociation(Thread * a) { _Assoc = a; }
};

@ -25,8 +25,6 @@
#ifndef OS_LINUX_VM_OS_LINUX_INLINE_HPP
#define OS_LINUX_VM_OS_LINUX_INLINE_HPP
#include "runtime/atomic.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.hpp"
// System includes
@ -40,18 +38,6 @@ inline void* os::thread_local_storage_at(int index) {
return pthread_getspecific((pthread_key_t)index);
}
inline const char* os::file_separator() {
return "/";
}
inline const char* os::line_separator() {
return "\n";
}
inline const char* os::path_separator() {
return ":";
}
// File names are case-sensitive on windows only
inline int os::file_name_strcmp(const char* s1, const char* s2) {
return strcmp(s1, s2);

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,8 +22,16 @@
*
*/
#include "runtime/os.hpp"
#ifndef OS_POSIX_VM_OS_POSIX_HPP
#define OS_POSIX_VM_OS_POSIX_HPP
// File conventions
static const char* file_separator() { return "/"; }
static const char* line_separator() { return "\n"; }
static const char* path_separator() { return ":"; }
class Posix {
friend class os;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/os.hpp"
#include "runtime/os.inline.hpp"
#include "services/attachListener.hpp"
#include "services/dtraceAttacher.hpp"

@ -37,6 +37,7 @@
#include "mutex_solaris.inline.hpp"
#include "oops/oop.inline.hpp"
#include "os_share_solaris.hpp"
#include "os_solaris.inline.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"
#include "prims/jvm_misc.hpp"
@ -1542,9 +1543,6 @@ void os::die() {
::abort(); // dump core (for debugging)
}
// unused
void os::set_error_file(const char *logfile) {}
// DLL functions
const char* os::dll_file_extension() { return ".so"; }
@ -2184,6 +2182,7 @@ void os::jvm_path(char *buf, jint buflen) {
// determine if this is a legacy image or modules image
// modules image doesn't have "jre" subdirectory
len = strlen(buf);
assert(len < buflen, "Ran out of buffer space");
jrelib_p = buf + len;
snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
if (0 != access(buf, F_OK)) {
@ -2202,7 +2201,7 @@ void os::jvm_path(char *buf, jint buflen) {
}
}
strcpy(saved_jvm_path, buf);
strncpy(saved_jvm_path, buf, MAXPATHLEN);
}
@ -3172,20 +3171,14 @@ bool os::dont_yield() {
}
}
// Caveat: Solaris os::yield() causes a thread-state transition whereas
// the linux and win32 implementations do not. This should be checked.
void os::yield() {
// Yields to all threads with same or greater priority
os::sleep(Thread::current(), 0, false);
}
// Note that yield semantics are defined by the scheduling class to which
// the thread currently belongs. Typically, yield will _not yield to
// other equal or higher priority threads that reside on the dispatch queues
// of other CPUs.
os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
void os::naked_yield() {
thr_yield();
}
// Interface for setting lwp priorities. If we are using T2 libthread,
// which forces the use of BoundThreads or we manually set UseBoundThreads,
@ -5439,20 +5432,11 @@ static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
return abstime;
}
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
// Conceptually TryPark() should be equivalent to park(0).
int os::PlatformEvent::TryPark() {
for (;;) {
const int v = _Event;
guarantee((v == 0) || (v == 1), "invariant");
if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
}
}
void os::PlatformEvent::park() { // AKA: down()
// Invariant: Only the thread associated with the Event/PlatformEvent
// may call park().
assert(_nParked == 0, "invariant");
int v;
for (;;) {
v = _Event;
@ -5539,8 +5523,7 @@ void os::PlatformEvent::unpark() {
// 1 :=> 1
// -1 :=> either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
// unpark() calls.
// 0 or 1.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means
@ -5744,10 +5727,9 @@ void Parker::park(bool isAbsolute, jlong time) {
}
void Parker::unpark() {
int s, status;
status = os::Solaris::mutex_lock(_mutex);
int status = os::Solaris::mutex_lock(_mutex);
assert(status == 0, "invariant");
s = _counter;
const int s = _counter;
_counter = 1;
status = os::Solaris::mutex_unlock(_mutex);
assert(status == 0, "invariant");

@ -332,7 +332,6 @@ class PlatformEvent : public CHeapObj<mtInternal> {
int fired() { return _Event; }
void park();
int park(jlong millis);
int TryPark();
void unpark();
};

@ -25,8 +25,6 @@
#ifndef OS_SOLARIS_VM_OS_SOLARIS_INLINE_HPP
#define OS_SOLARIS_VM_OS_SOLARIS_INLINE_HPP
#include "runtime/atomic.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.hpp"
// System includes
@ -39,10 +37,6 @@
#include <netdb.h>
#include <setjmp.h>
inline const char* os::file_separator() { return "/"; }
inline const char* os::line_separator() { return "\n"; }
inline const char* os::path_separator() { return ":"; }
// File names are case-sensitive on windows only
inline int os::file_name_strcmp(const char* s1, const char* s2) {
return strcmp(s1, s2);

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "prims/jvm.h"
#include "runtime/arguments.hpp"
#include "runtime/os.hpp"
#include "decoder_windows.hpp"
WindowsDecoder::WindowsDecoder() {

@ -40,6 +40,7 @@
#include "mutex_windows.inline.hpp"
#include "oops/oop.inline.hpp"
#include "os_share_windows.hpp"
#include "os_windows.inline.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"
#include "prims/jvm_misc.hpp"
@ -1823,7 +1824,9 @@ void os::jvm_path(char *buf, jint buflen) {
// looks like jvm.dll is installed there (append a fake suffix
// hotspot/jvm.dll).
char* java_home_var = ::getenv("JAVA_HOME");
if (java_home_var != NULL && java_home_var[0] != 0) {
if (java_home_var != NULL && java_home_var[0] != 0 &&
strlen(java_home_var) < (size_t)buflen) {
strncpy(buf, java_home_var, buflen);
// determine if this is a legacy image or modules image
@ -1842,7 +1845,7 @@ void os::jvm_path(char *buf, jint buflen) {
if (buf[0] == '\0') {
GetModuleFileName(vm_lib_handle, buf, buflen);
}
strcpy(saved_jvm_path, buf);
strncpy(saved_jvm_path, buf, MAX_PATH);
}
@ -2290,17 +2293,6 @@ LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
return EXCEPTION_CONTINUE_SEARCH;
}
// Fatal error reporting is single threaded so we can make this a
// static and preallocated. If it's more than MAX_PATH silently ignore
// it.
static char saved_error_file[MAX_PATH] = {0};
void os::set_error_file(const char *logfile) {
if (strlen(logfile) <= MAX_PATH) {
strncpy(saved_error_file, logfile, MAX_PATH);
}
}
static inline void report_error(Thread* t, DWORD exception_code,
address addr, void* siginfo, void* context) {
VMError err(t, exception_code, addr, siginfo, context);
@ -3514,19 +3506,16 @@ void os::infinite_sleep() {
typedef BOOL (WINAPI * STTSignature)(void);
os::YieldResult os::NakedYield() {
void os::naked_yield() {
// Use either SwitchToThread() or Sleep(0)
// Consider passing back the return value from SwitchToThread().
if (os::Kernel32Dll::SwitchToThreadAvailable()) {
return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY;
SwitchToThread();
} else {
Sleep(0);
}
return os::YIELD_UNKNOWN;
}
void os::yield() { os::NakedYield(); }
// Win32 only gives you access to seven real priorities at a time,
// so we compress Java's ten down to seven. It would be better
// if we dynamically adjusted relative priorities.
@ -4875,8 +4864,7 @@ void os::PlatformEvent::unpark() {
// 1 :=> 1
// -1 :=> either 0 or 1; must signal target thread
// That is, we can safely transition _Event from -1 to either
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
// unpark() calls.
// 0 or 1.
// See also: "Semaphores in Plan 9" by Mullender & Cox
//
// Note: Forcing a transition from "-1" to "1" on an unpark() means

@ -29,6 +29,11 @@
// Information about the protection of the page at address '0' on this os.
static bool zero_page_read_protected() { return true; }
// File conventions
static const char* file_separator() { return "\\"; }
static const char* line_separator() { return "\r\n"; }
static const char* path_separator() { return ";"; }
class win32 {
friend class os;

@ -25,13 +25,8 @@
#ifndef OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
#define OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
#include "runtime/atomic.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.hpp"
inline const char* os::file_separator() { return "\\"; }
inline const char* os::line_separator() { return "\r\n"; }
inline const char* os::path_separator() { return ";"; }
inline const char* os::dll_file_extension() { return ".dll"; }
inline const int os::default_file_open_flags() { return O_BINARY | O_NOINHERIT;}

@ -1997,7 +1997,13 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
if (!UseInlineCaches && is_loaded && code == Bytecodes::_invokevirtual
&& !target->can_be_statically_bound()) {
// Find a vtable index if one is available
vtable_index = target->resolve_vtable_index(calling_klass, callee_holder);
// For arrays, callee_holder is Object. Resolving the call with
// Object would allow an illegal call to finalize() on an
// array. We use holder instead: illegal calls to finalize() won't
// be compiled as vtable calls (IC call resolution will catch the
// illegal call) and the few legal calls on array types won't be
// either.
vtable_index = target->resolve_vtable_index(calling_klass, holder);
}
#endif

@ -1050,6 +1050,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
n_copy->set_data((intx) (load_klass()));
} else {
assert(mirror() != NULL, "klass not set");
// Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
n_copy->set_data(cast_from_oop<intx>(mirror()));
}

@ -185,6 +185,10 @@ private:
}
}
void ensure_metadata_alive(ciMetadata* m) {
_factory->ensure_metadata_alive(m);
}
ciInstance* get_instance(oop o) {
if (o == NULL) return NULL;
return get_object(o)->as_instance();

@ -43,6 +43,7 @@ class ciKlass : public ciType {
friend class ciMethod;
friend class ciMethodData;
friend class ciObjArrayKlass;
friend class ciReceiverTypeData;
private:
ciSymbol* _name;

@ -170,6 +170,7 @@ void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) {
Klass* k = data->as_ReceiverTypeData()->receiver(row);
if (k != NULL) {
ciKlass* klass = CURRENT_ENV->get_klass(k);
CURRENT_ENV->ensure_metadata_alive(klass);
set_receiver(row, klass);
}
}
@ -191,6 +192,7 @@ void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) {
void ciSpeculativeTrapData::translate_from(const ProfileData* data) {
Method* m = data->as_SpeculativeTrapData()->method();
ciMethod* ci_m = CURRENT_ENV->get_method(m);
CURRENT_ENV->ensure_metadata_alive(ci_m);
set_method(ci_m);
}

@ -70,6 +70,7 @@ protected:
Klass* v = TypeEntries::valid_klass(k);
if (v != NULL) {
ciKlass* klass = CURRENT_ENV->get_klass(v);
CURRENT_ENV->ensure_metadata_alive(klass);
return with_status(klass, k);
}
return with_status(NULL, k);

@ -46,6 +46,9 @@
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/fieldType.hpp"
#if INCLUDE_ALL_GCS
# include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#endif
// ciObjectFactory
//
@ -374,6 +377,37 @@ ciMetadata* ciObjectFactory::create_new_object(Metadata* o) {
return NULL;
}
// ------------------------------------------------------------------
// ciObjectFactory::ensure_metadata_alive
//
// Ensure that the metadata wrapped by the ciMetadata is kept alive by GC.
// This is primarily useful for metadata which is considered as weak roots
// by the GC but need to be strong roots if reachable from a current compilation.
//
void ciObjectFactory::ensure_metadata_alive(ciMetadata* m) {
ASSERT_IN_VM; // We're handling raw oops here.
#if INCLUDE_ALL_GCS
if (!UseG1GC) {
return;
}
Klass* metadata_owner_klass;
if (m->is_klass()) {
metadata_owner_klass = m->as_klass()->get_Klass();
} else if (m->is_method()) {
metadata_owner_klass = m->as_method()->get_Method()->constants()->pool_holder();
} else {
fatal("Not implemented for other types of metadata");
}
oop metadata_holder = metadata_owner_klass->klass_holder();
if (metadata_holder != NULL) {
G1SATBCardTableModRefBS::enqueue(metadata_holder);
}
#endif
}
//------------------------------------------------------------------
// ciObjectFactory::get_unloaded_method
//

@ -75,6 +75,8 @@ private:
ciObject* create_new_object(oop o);
ciMetadata* create_new_object(Metadata* o);
void ensure_metadata_alive(ciMetadata* m);
static bool is_equal(NonPermObject* p, oop key) {
return p->object()->get_oop() == key;
}

@ -919,7 +919,7 @@ void ClassFileParser::parse_field_attributes(u2 attributes_count,
"Wrong size %u for field's Signature attribute in class file %s",
attribute_length, CHECK);
}
generic_signature_index = cfs->get_u2(CHECK);
generic_signature_index = parse_generic_signature_attribute(CHECK);
} else if (attribute_name == vmSymbols::tag_runtime_visible_annotations()) {
if (runtime_visible_annotations != NULL) {
classfile_parse_error(
@ -2306,8 +2306,7 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
"Invalid Signature attribute length %u in class file %s",
method_attribute_length, CHECK_(nullHandle));
}
cfs->guarantee_more(2, CHECK_(nullHandle)); // generic_signature_index
generic_signature_index = cfs->get_u2_fast();
generic_signature_index = parse_generic_signature_attribute(CHECK_(nullHandle));
} else if (method_attribute_name == vmSymbols::tag_runtime_visible_annotations()) {
if (runtime_visible_annotations != NULL) {
classfile_parse_error(
@ -2644,6 +2643,17 @@ intArray* ClassFileParser::sort_methods(Array<Method*>* methods) {
return method_ordering;
}
// Parse generic_signature attribute for methods and fields
u2 ClassFileParser::parse_generic_signature_attribute(TRAPS) {
ClassFileStream* cfs = stream();
cfs->guarantee_more(2, CHECK_0); // generic_signature_index
u2 generic_signature_index = cfs->get_u2_fast();
check_property(
valid_symbol_at(generic_signature_index),
"Invalid Signature attribute at constant pool index %u in class file %s",
generic_signature_index, CHECK_0);
return generic_signature_index;
}
void ClassFileParser::parse_classfile_sourcefile_attribute(TRAPS) {
ClassFileStream* cfs = stream();
@ -2798,17 +2808,19 @@ void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_b
ClassFileStream* cfs = stream();
u1* current_start = cfs->current();
cfs->guarantee_more(2, CHECK); // length
guarantee_property(attribute_byte_length >= sizeof(u2),
"Invalid BootstrapMethods attribute length %u in class file %s",
attribute_byte_length,
CHECK);
cfs->guarantee_more(attribute_byte_length, CHECK);
int attribute_array_length = cfs->get_u2_fast();
guarantee_property(_max_bootstrap_specifier_index < attribute_array_length,
"Short length on BootstrapMethods in class file %s",
CHECK);
guarantee_property(attribute_byte_length >= sizeof(u2),
"Invalid BootstrapMethods attribute length %u in class file %s",
attribute_byte_length,
CHECK);
// The attribute contains a counted array of counted tuples of shorts,
// represending bootstrap specifiers:
@ -4590,8 +4602,9 @@ void ClassFileParser::check_final_method_override(instanceKlassHandle this_klass
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_VerifyError(),
"class %s overrides final method %s.%s",
"class %s overrides final method %s.%s%s",
this_klass->external_name(),
super_m->method_holder()->external_name(),
name->as_C_string(),
signature->as_C_string()
);

@ -266,6 +266,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
u1* parse_stackmap_table(u4 code_attribute_length, TRAPS);
// Classfile attribute parsing
u2 parse_generic_signature_attribute(TRAPS);
void parse_classfile_sourcefile_attribute(TRAPS);
void parse_classfile_source_debug_extension_attribute(int length, TRAPS);
u2 parse_classfile_inner_classes_attribute(u1* inner_classes_attribute_start,

@ -52,6 +52,7 @@
#include "runtime/interfaceSupport.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/os.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
#include "services/management.hpp"
@ -59,22 +60,6 @@
#include "utilities/events.hpp"
#include "utilities/hashtable.hpp"
#include "utilities/hashtable.inline.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_aix
# include "os_aix.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
// Entry points in zip.dll for loading zip/jar file entries

@ -332,6 +332,27 @@ void ClassLoaderData::unload() {
}
}
#ifdef ASSERT
class AllAliveClosure : public OopClosure {
BoolObjectClosure* _is_alive_closure;
bool _found_dead;
public:
AllAliveClosure(BoolObjectClosure* is_alive_closure) : _is_alive_closure(is_alive_closure), _found_dead(false) {}
template <typename T> void do_oop_work(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if (!_is_alive_closure->do_object_b(obj)) {
_found_dead = true;
}
}
}
void do_oop(oop* p) { do_oop_work<oop>(p); }
void do_oop(narrowOop* p) { do_oop_work<narrowOop>(p); }
bool found_dead() { return _found_dead; }
};
#endif
oop ClassLoaderData::keep_alive_object() const {
assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
return is_anonymous() ? _klasses->java_mirror() : class_loader();
@ -341,7 +362,15 @@ bool ClassLoaderData::is_alive(BoolObjectClosure* is_alive_closure) const {
bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
|| is_alive_closure->do_object_b(keep_alive_object());
assert(!alive || claimed(), "must be claimed");
#ifdef ASSERT
if (alive) {
AllAliveClosure all_alive_closure(is_alive_closure);
KlassToOopClosure klass_closure(&all_alive_closure);
const_cast<ClassLoaderData*>(this)->oops_do(&all_alive_closure, &klass_closure, false);
assert(!all_alive_closure.found_dead(), err_msg("Found dead oop in alive cld: " PTR_FORMAT, p2i(this)));
}
#endif
return alive;
}
@ -620,9 +649,9 @@ void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, KlassClosure* klass
void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
if (ClassUnloading) {
ClassLoaderDataGraph::keep_alive_oops_do(f, klass_closure, must_claim);
keep_alive_oops_do(f, klass_closure, must_claim);
} else {
ClassLoaderDataGraph::oops_do(f, klass_closure, must_claim);
oops_do(f, klass_closure, must_claim);
}
}
@ -632,6 +661,27 @@ void ClassLoaderDataGraph::cld_do(CLDClosure* cl) {
}
}
void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->_next) {
CLDClosure* closure = cld->keep_alive() ? strong : weak;
if (closure != NULL) {
closure->do_cld(cld);
}
}
}
void ClassLoaderDataGraph::keep_alive_cld_do(CLDClosure* cl) {
roots_cld_do(cl, NULL);
}
void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {
if (ClassUnloading) {
keep_alive_cld_do(cl);
} else {
cld_do(cl);
}
}
void ClassLoaderDataGraph::classes_do(KlassClosure* klass_closure) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->classes_do(klass_closure);
@ -689,6 +739,16 @@ GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
return array;
}
bool ClassLoaderDataGraph::unload_list_contains(const void* x) {
assert(SafepointSynchronize::is_at_safepoint(), "only safe to call at safepoint");
for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
if (cld->metaspace_or_null() != NULL && cld->metaspace_or_null()->contains(x)) {
return true;
}
}
return false;
}
#ifndef PRODUCT
bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
@ -809,6 +869,60 @@ Metaspace* ClassLoaderData::rw_metaspace() {
return _rw_metaspace;
}
ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic()
: _next_klass(NULL) {
ClassLoaderData* cld = ClassLoaderDataGraph::_head;
Klass* klass = NULL;
// Find the first klass in the CLDG.
while (cld != NULL) {
klass = cld->_klasses;
if (klass != NULL) {
_next_klass = klass;
return;
}
cld = cld->next();
}
}
Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass) {
Klass* next = klass->next_link();
if (next != NULL) {
return next;
}
// No more klasses in the current CLD. Time to find a new CLD.
ClassLoaderData* cld = klass->class_loader_data();
while (next == NULL) {
cld = cld->next();
if (cld == NULL) {
break;
}
next = cld->_klasses;
}
return next;
}
Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
Klass* head = (Klass*)_next_klass;
while (head != NULL) {
Klass* next = next_klass_in_cldg(head);
Klass* old_head = (Klass*)Atomic::cmpxchg_ptr(next, &_next_klass, head);
if (old_head == head) {
return head; // Won the CAS.
}
head = old_head;
}
// Nothing more for the iterator to hand out.
assert(head == NULL, err_msg("head is " PTR_FORMAT ", expected not null:", p2i(head)));
return NULL;
}
ClassLoaderDataGraphMetaspaceIterator::ClassLoaderDataGraphMetaspaceIterator() {
_data = ClassLoaderDataGraph::_head;

@ -31,7 +31,6 @@
#include "memory/metaspaceCounters.hpp"
#include "runtime/mutex.hpp"
#include "utilities/growableArray.hpp"
#if INCLUDE_TRACE
# include "utilities/ticks.hpp"
#endif
@ -59,6 +58,7 @@ class Metadebug;
class ClassLoaderDataGraph : public AllStatic {
friend class ClassLoaderData;
friend class ClassLoaderDataGraphMetaspaceIterator;
friend class ClassLoaderDataGraphKlassIteratorAtomic;
friend class VMStructs;
private:
// All CLDs (except the null CLD) can be reached by walking _head->_next->...
@ -75,10 +75,16 @@ class ClassLoaderDataGraph : public AllStatic {
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
static void purge();
static void clear_claimed_marks();
// oops do
static void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);
static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
// cld do
static void cld_do(CLDClosure* cl);
static void roots_cld_do(CLDClosure* strong, CLDClosure* weak);
static void keep_alive_cld_do(CLDClosure* cl);
static void always_strong_cld_do(CLDClosure* cl);
// klass do
static void classes_do(KlassClosure* klass_closure);
static void classes_do(void f(Klass* const));
static void methods_do(void f(Method*));
@ -104,6 +110,7 @@ class ClassLoaderDataGraph : public AllStatic {
static void dump() { dump_on(tty); }
static void verify();
static bool unload_list_contains(const void* x);
#ifndef PRODUCT
static bool contains_loader_data(ClassLoaderData* loader_data);
#endif
@ -136,6 +143,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
};
friend class ClassLoaderDataGraph;
friend class ClassLoaderDataGraphKlassIteratorAtomic;
friend class ClassLoaderDataGraphMetaspaceIterator;
friend class MetaDataFactory;
friend class Method;
@ -195,7 +203,6 @@ class ClassLoaderData : public CHeapObj<mtClass> {
void unload();
bool keep_alive() const { return _keep_alive; }
bool is_alive(BoolObjectClosure* is_alive_closure) const;
void classes_do(void f(Klass*));
void loaded_classes_do(KlassClosure* klass_closure);
void classes_do(void f(InstanceKlass*));
@ -208,6 +215,9 @@ class ClassLoaderData : public CHeapObj<mtClass> {
MetaWord* allocate(size_t size);
public:
bool is_alive(BoolObjectClosure* is_alive_closure) const;
// Accessors
Metaspace* metaspace_or_null() const { return _metaspace; }
@ -293,6 +303,16 @@ class ClassLoaderData : public CHeapObj<mtClass> {
void initialize_shared_metaspaces();
};
// An iterator that distributes Klasses to parallel worker threads.
class ClassLoaderDataGraphKlassIteratorAtomic : public StackObj {
volatile Klass* _next_klass;
public:
ClassLoaderDataGraphKlassIteratorAtomic();
Klass* next_klass();
private:
static Klass* next_klass_in_cldg(Klass* klass);
};
class ClassLoaderDataGraphMetaspaceIterator : public StackObj {
ClassLoaderData* _data;
public:

@ -199,6 +199,26 @@ bool Dictionary::do_unloading() {
return class_was_unloaded;
}
void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
// Skip the strong roots probe marking if the closures are the same.
if (strong == weak) {
oops_do(strong);
return;
}
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry *probe = bucket(index);
probe != NULL;
probe = probe->next()) {
Klass* e = probe->klass();
ClassLoaderData* loader_data = probe->loader_data();
if (is_strongly_reachable(loader_data, e)) {
probe->set_strongly_reachable();
}
}
}
_pd_cache_table->roots_oops_do(strong, weak);
}
void Dictionary::always_strong_oops_do(OopClosure* blk) {
// Follow all system classes and temporary placeholders in dictionary; only
@ -490,6 +510,23 @@ void ProtectionDomainCacheTable::oops_do(OopClosure* f) {
}
}
void ProtectionDomainCacheTable::roots_oops_do(OopClosure* strong, OopClosure* weak) {
for (int index = 0; index < table_size(); index++) {
for (ProtectionDomainCacheEntry* probe = bucket(index);
probe != NULL;
probe = probe->next()) {
if (probe->is_strongly_reachable()) {
probe->reset_strongly_reachable();
probe->oops_do(strong);
} else {
if (weak != NULL) {
probe->oops_do(weak);
}
}
}
}
}
uint ProtectionDomainCacheTable::bucket_size() {
return sizeof(ProtectionDomainCacheEntry);
}

@ -89,6 +89,7 @@ public:
// GC support
void oops_do(OopClosure* f);
void always_strong_oops_do(OopClosure* blk);
void roots_oops_do(OopClosure* strong, OopClosure* weak);
void always_strong_classes_do(KlassClosure* closure);
@ -218,6 +219,7 @@ public:
// GC support
void oops_do(OopClosure* f);
void always_strong_oops_do(OopClosure* f);
void roots_oops_do(OopClosure* strong, OopClosure* weak);
static uint bucket_size();

@ -618,6 +618,8 @@ void java_lang_Class::create_mirror(KlassHandle k, Handle class_loader,
assert(comp_mirror.not_null(), "must have a mirror");
// Two-way link between the array klass and its component mirror:
// (array_klass) k -> mirror -> component_mirror -> array_klass -> k
set_component_mirror(mirror(), comp_mirror());
ArrayKlass::cast(k())->set_component_mirror(comp_mirror());
set_array_klass(comp_mirror(), k());
} else {
@ -679,6 +681,16 @@ void java_lang_Class::set_protection_domain(oop java_class, oop pd) {
java_class->obj_field_put(_protection_domain_offset, pd);
}
void java_lang_Class::set_component_mirror(oop java_class, oop comp_mirror) {
if (_component_mirror_offset != 0) {
java_class->obj_field_put(_component_mirror_offset, comp_mirror);
}
}
oop java_lang_Class::component_mirror(oop java_class) {
assert(_component_mirror_offset != 0, "must be set");
return java_class->obj_field(_component_mirror_offset);
}
oop java_lang_Class::init_lock(oop java_class) {
assert(_init_lock_offset != 0, "must be set");
return java_class->obj_field(_init_lock_offset);
@ -875,6 +887,10 @@ void java_lang_Class::compute_offsets() {
klass_oop, vmSymbols::classLoader_name(),
vmSymbols::classloader_signature());
compute_optional_offset(_component_mirror_offset,
klass_oop, vmSymbols::componentType_name(),
vmSymbols::class_signature());
CLASS_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
}
@ -3097,6 +3113,7 @@ int java_lang_Class::_oop_size_offset;
int java_lang_Class::_static_oop_field_count_offset;
int java_lang_Class::_class_loader_offset;
int java_lang_Class::_protection_domain_offset;
int java_lang_Class::_component_mirror_offset;
int java_lang_Class::_init_lock_offset;
int java_lang_Class::_signers_offset;
GrowableArray<Klass*>* java_lang_Class::_fixup_mirror_list = NULL;

@ -241,6 +241,7 @@ class java_lang_Class : AllStatic {
static int _init_lock_offset;
static int _signers_offset;
static int _class_loader_offset;
static int _component_mirror_offset;
static bool offsets_computed;
static int classRedefinedCount_offset;
@ -250,6 +251,7 @@ class java_lang_Class : AllStatic {
static void set_init_lock(oop java_class, oop init_lock);
static void set_protection_domain(oop java_class, oop protection_domain);
static void set_class_loader(oop java_class, oop class_loader);
static void set_component_mirror(oop java_class, oop comp_mirror);
static void initialize_mirror_fields(KlassHandle k, Handle mirror, Handle protection_domain, TRAPS);
public:
static void compute_offsets();
@ -291,6 +293,7 @@ class java_lang_Class : AllStatic {
// Support for embedded per-class oops
static oop protection_domain(oop java_class);
static oop init_lock(oop java_class);
static oop component_mirror(oop java_class);
static objArrayOop signers(oop java_class);
static void set_signers(oop java_class, objArrayOop signers);

@ -47,8 +47,11 @@ MetadataOnStackMark::MetadataOnStackMark() {
if (_marked_objects == NULL) {
_marked_objects = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(1000, true);
}
Threads::metadata_do(Metadata::mark_on_stack);
CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
if (JvmtiExport::has_redefined_a_class()) {
CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
}
CompileBroker::mark_on_stack();
JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
ThreadService::metadata_do(Metadata::mark_on_stack);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -134,6 +134,7 @@ void StackMapTable::check_jump_target(
}
// check if uninitialized objects exist on backward branches
check_new_object(frame, target, CHECK_VERIFY(frame->verifier()));
frame->verifier()->update_furthest_jump(target);
}
void StackMapTable::check_new_object(

@ -37,6 +37,7 @@
#include "runtime/mutexLocker.hpp"
#include "utilities/hashtable.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#endif
@ -157,11 +158,26 @@ oop StringTable::lookup(Symbol* symbol) {
return lookup(chars, length);
}
// Tell the GC that this string was looked up in the StringTable.
static void ensure_string_alive(oop string) {
// A lookup in the StringTable could return an object that was previously
// considered dead. The SATB part of G1 needs to get notified about this
// potential resurrection, otherwise the marking might not find the object.
#if INCLUDE_ALL_GCS
if (UseG1GC && string != NULL) {
G1SATBCardTableModRefBS::enqueue(string);
}
#endif
}
oop StringTable::lookup(jchar* name, int len) {
unsigned int hash = hash_string(name, len);
int index = the_table()->hash_to_index(hash);
return the_table()->lookup(index, name, len, hash);
oop string = the_table()->lookup(index, name, len, hash);
ensure_string_alive(string);
return string;
}
@ -172,7 +188,10 @@ oop StringTable::intern(Handle string_or_null, jchar* name,
oop found_string = the_table()->lookup(index, name, len, hashValue);
// Found
if (found_string != NULL) return found_string;
if (found_string != NULL) {
ensure_string_alive(found_string);
return found_string;
}
debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
assert(!Universe::heap()->is_in_reserved(name),
@ -197,11 +216,17 @@ oop StringTable::intern(Handle string_or_null, jchar* name,
// Grab the StringTable_lock before getting the_table() because it could
// change at safepoint.
MutexLocker ml(StringTable_lock, THREAD);
oop added_or_found;
{
MutexLocker ml(StringTable_lock, THREAD);
// Otherwise, add to symbol to table
added_or_found = the_table()->basic_add(index, string, name, len,
hashValue, CHECK_NULL);
}
// Otherwise, add to symbol to table
return the_table()->basic_add(index, string, name, len,
hashValue, CHECK_NULL);
ensure_string_alive(added_or_found);
return added_or_found;
}
oop StringTable::intern(Symbol* symbol, TRAPS) {

@ -1612,13 +1612,7 @@ void SystemDictionary::add_to_hierarchy(instanceKlassHandle k, TRAPS) {
// system dictionary and follows the remaining classes' contents.
void SystemDictionary::always_strong_oops_do(OopClosure* blk) {
blk->do_oop(&_java_system_loader);
blk->do_oop(&_system_loader_lock_obj);
dictionary()->always_strong_oops_do(blk);
// Visit extra methods
invoke_method_table()->oops_do(blk);
roots_oops_do(blk, NULL);
}
void SystemDictionary::always_strong_classes_do(KlassClosure* closure) {
@ -1685,6 +1679,17 @@ bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
return unloading_occurred;
}
void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
strong->do_oop(&_java_system_loader);
strong->do_oop(&_system_loader_lock_obj);
// Adjust dictionary
dictionary()->roots_oops_do(strong, weak);
// Visit extra methods
invoke_method_table()->oops_do(strong);
}
void SystemDictionary::oops_do(OopClosure* f) {
f->do_oop(&_java_system_loader);
f->do_oop(&_system_loader_lock_obj);

@ -330,6 +330,7 @@ public:
// Applies "f->do_oop" to all root oops in the system dictionary.
static void oops_do(OopClosure* f);
static void roots_oops_do(OopClosure* strong, OopClosure* weak);
// System loader lock
static oop system_loader_lock() { return _system_loader_lock_obj; }

@ -633,6 +633,9 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
bool no_control_flow = false; // Set to true when there is no direct control
// flow from current instruction to the next
// instruction in sequence
set_furthest_jump(0);
Bytecodes::Code opcode;
while (!bcs.is_last_bytecode()) {
// Check for recursive re-verification before each bytecode.
@ -2248,6 +2251,29 @@ void ClassVerifier::verify_invoke_init(
"Bad <init> method call");
return;
}
// Make sure that this call is not jumped over.
if (bci < furthest_jump()) {
verify_error(ErrorContext::bad_code(bci),
"Bad <init> method call from inside of a branch");
return;
}
// Make sure that this call is not done from within a TRY block because
// that can result in returning an incomplete object. Simply checking
// (bci >= start_pc) also ensures that this call is not done after a TRY
// block. That is also illegal because this call must be the first Java
// statement in the constructor.
ExceptionTable exhandlers(_method());
int exlength = exhandlers.length();
for(int i = 0; i < exlength; i++) {
if (bci >= exhandlers.start_pc(i)) {
verify_error(ErrorContext::bad_code(bci),
"Bad <init> method call from after the start of a try block");
return;
}
}
current_frame->initialize_object(type, current_type());
*this_uninit = true;
} else if (type.is_uninitialized()) {
@ -2285,16 +2311,19 @@ void ClassVerifier::verify_invoke_init(
vmSymbols::object_initializer_name(),
cp->signature_ref_at(bcs->get_index_u2()),
Klass::normal);
instanceKlassHandle mh(THREAD, m->method_holder());
if (m->is_protected() && !mh->is_same_class_package(_klass())) {
bool assignable = current_type().is_assignable_from(
objectref_type, this, CHECK_VERIFY(this));
if (!assignable) {
verify_error(ErrorContext::bad_type(bci,
TypeOrigin::cp(new_class_index, objectref_type),
TypeOrigin::implicit(current_type())),
"Bad access to protected <init> method");
return;
// Do nothing if method is not found. Let resolution detect the error.
if (m != NULL) {
instanceKlassHandle mh(THREAD, m->method_holder());
if (m->is_protected() && !mh->is_same_class_package(_klass())) {
bool assignable = current_type().is_assignable_from(
objectref_type, this, CHECK_VERIFY(this));
if (!assignable) {
verify_error(ErrorContext::bad_type(bci,
TypeOrigin::cp(new_class_index, objectref_type),
TypeOrigin::implicit(current_type())),
"Bad access to protected <init> method");
return;
}
}
}
}

@ -258,6 +258,9 @@ class ClassVerifier : public StackObj {
ErrorContext _error_context; // contains information about an error
// Used to detect illegal jumps over calls to super() nd this() in ctors.
int32_t _furthest_jump;
void verify_method(methodHandle method, TRAPS);
char* generate_code_data(methodHandle m, u4 code_length, TRAPS);
void verify_exception_handler_table(u4 code_length, char* code_data,
@ -403,6 +406,20 @@ class ClassVerifier : public StackObj {
Symbol* create_temporary_symbol(const char *s, int length, TRAPS);
TypeOrigin ref_ctx(const char* str, TRAPS);
// Keep track of the furthest branch done in a method to make sure that
// there are no branches over calls to super() or this() from inside of
// a constructor.
int32_t furthest_jump() { return _furthest_jump; }
void set_furthest_jump(int32_t target) {
_furthest_jump = target;
}
void update_furthest_jump(int32_t target) {
if (target > _furthest_jump) _furthest_jump = target;
}
};
inline int ClassVerifier::change_sig_to_verificationType(

@ -573,6 +573,7 @@
template(serializeAgentPropertiesToByteArray_name, "serializeAgentPropertiesToByteArray") \
template(classRedefinedCount_name, "classRedefinedCount") \
template(classLoader_name, "classLoader") \
template(componentType_name, "componentType") \
\
/* trace signatures */ \
TRACE_TEMPLATES(template) \

@ -331,6 +331,11 @@ void CodeCache::blobs_do(CodeBlobClosure* f) {
// Walk the list of methods which might contain non-perm oops.
void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
assert_locked_or_safepoint(CodeCache_lock);
if (UseG1GC) {
return;
}
debug_only(mark_scavenge_root_nmethods());
for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
@ -356,6 +361,11 @@ void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
if (UseG1GC) {
return;
}
nm->set_on_scavenge_root_list();
nm->set_scavenge_root_link(_scavenge_root_nmethods);
set_scavenge_root_nmethods(nm);
@ -364,6 +374,11 @@ void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
if (UseG1GC) {
return;
}
print_trace("drop_scavenge_root", nm);
nmethod* last = NULL;
nmethod* cur = scavenge_root_nmethods();
@ -385,6 +400,11 @@ void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
void CodeCache::prune_scavenge_root_nmethods() {
assert_locked_or_safepoint(CodeCache_lock);
if (UseG1GC) {
return;
}
debug_only(mark_scavenge_root_nmethods());
nmethod* last = NULL;
@ -417,6 +437,10 @@ void CodeCache::prune_scavenge_root_nmethods() {
#ifndef PRODUCT
void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
if (UseG1GC) {
return;
}
// While we are here, verify the integrity of the list.
mark_scavenge_root_nmethods();
for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
@ -457,9 +481,36 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
}
#endif //PRODUCT
void CodeCache::verify_clean_inline_caches() {
#ifdef ASSERT
FOR_ALL_ALIVE_BLOBS(cb) {
if (cb->is_nmethod()) {
nmethod* nm = (nmethod*)cb;
assert(!nm->is_unloaded(), "Tautology");
nm->verify_clean_inline_caches();
nm->verify();
}
}
#endif
}
void CodeCache::verify_icholder_relocations() {
#ifdef ASSERT
// make sure that we aren't leaking icholders
int count = 0;
FOR_ALL_BLOBS(cb) {
if (cb->is_nmethod()) {
nmethod* nm = (nmethod*)cb;
count += nm->verify_icholder_relocations();
}
}
assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
CompiledICHolder::live_count(), "must agree");
#endif
}
void CodeCache::gc_prologue() {
assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
}
void CodeCache::gc_epilogue() {
@ -472,41 +523,15 @@ void CodeCache::gc_epilogue() {
nm->cleanup_inline_caches();
}
DEBUG_ONLY(nm->verify());
nm->fix_oop_relocations();
DEBUG_ONLY(nm->verify_oop_relocations());
}
}
set_needs_cache_clean(false);
prune_scavenge_root_nmethods();
assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
#ifdef ASSERT
// make sure that we aren't leaking icholders
int count = 0;
FOR_ALL_BLOBS(cb) {
if (cb->is_nmethod()) {
RelocIterator iter((nmethod*)cb);
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
CompiledIC *ic = CompiledIC_at(iter.reloc());
if (TraceCompiledIC) {
tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
ic->print();
}
assert(ic->cached_icholder() != NULL, "must be non-NULL");
count++;
}
}
}
}
}
assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
CompiledICHolder::live_count(), "must agree");
#endif
verify_icholder_relocations();
}
void CodeCache::verify_oops() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
VerifyOopClosure voc;

@ -134,10 +134,6 @@ class CodeCache : AllStatic {
// to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading"
// to "true" iff some code got unloaded.
static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
static void oops_do(OopClosure* f) {
CodeBlobToOopClosure oopc(f, /*do_marking=*/ false);
blobs_do(&oopc);
}
static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
static void scavenge_root_nmethods_do(CodeBlobClosure* f);
@ -173,6 +169,9 @@ class CodeCache : AllStatic {
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
static void clear_inline_caches(); // clear all inline caches
static void verify_clean_inline_caches();
static void verify_icholder_relocations();
// Deoptimization
static int mark_for_deoptimization(DepChange& changes);
#ifdef HOTSWAP

@ -99,13 +99,13 @@ void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub
}
{
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT
CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
#endif
_ic_call->set_destination_mt_safe(entry_point);
}
_ic_call->set_destination_mt_safe(entry_point);
}
if (is_optimized() || is_icstub) {
// Optimized call sites don't have a cache value and ICStub call
@ -159,10 +159,24 @@ address CompiledIC::stub_address() const {
//-----------------------------------------------------------------------------
// High-level access to an inline cache. Guaranteed to be MT-safe.
void CompiledIC::initialize_from_iter(RelocIterator* iter) {
assert(iter->addr() == _ic_call->instruction_address(), "must find ic_call");
if (iter->type() == relocInfo::virtual_call_type) {
virtual_call_Relocation* r = iter->virtual_call_reloc();
_is_optimized = false;
_value = nativeMovConstReg_at(r->cached_value());
} else {
assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
_is_optimized = true;
_value = NULL;
}
}
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
: _ic_call(call)
{
address ic_call = call->instruction_address();
address ic_call = _ic_call->instruction_address();
assert(ic_call != NULL, "ic_call address must be set");
assert(nm != NULL, "must pass nmethod");
@ -173,15 +187,21 @@ CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
bool ret = iter.next();
assert(ret == true, "relocInfo must exist at this address");
assert(iter.addr() == ic_call, "must find ic_call");
if (iter.type() == relocInfo::virtual_call_type) {
virtual_call_Relocation* r = iter.virtual_call_reloc();
_is_optimized = false;
_value = nativeMovConstReg_at(r->cached_value());
} else {
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
_is_optimized = true;
_value = NULL;
}
initialize_from_iter(&iter);
}
CompiledIC::CompiledIC(RelocIterator* iter)
: _ic_call(nativeCall_at(iter->addr()))
{
address ic_call = _ic_call->instruction_address();
nmethod* nm = iter->code();
assert(ic_call != NULL, "ic_call address must be set");
assert(nm != NULL, "must pass nmethod");
assert(nm->contains(ic_call), "must be in nmethod");
initialize_from_iter(iter);
}
bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
@ -509,7 +529,7 @@ bool CompiledIC::is_icholder_entry(address entry) {
void CompiledStaticCall::set_to_clean() {
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
// Reset call site
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT
CodeBlob* cb = CodeCache::find_blob_unsafe(this);
assert(cb != NULL && cb->is_nmethod(), "must be nmethod");

@ -150,6 +150,9 @@ class CompiledIC: public ResourceObj {
bool _is_optimized; // an optimized virtual call (i.e., no compiled IC)
CompiledIC(nmethod* nm, NativeCall* ic_call);
CompiledIC(RelocIterator* iter);
void initialize_from_iter(RelocIterator* iter);
static bool is_icholder_entry(address entry);
@ -183,6 +186,7 @@ class CompiledIC: public ResourceObj {
friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
friend CompiledIC* CompiledIC_at(Relocation* call_site);
friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
// This is used to release CompiledICHolder*s from nmethods that
// are about to be freed. The callsite might contain other stale
@ -263,6 +267,13 @@ inline CompiledIC* CompiledIC_at(Relocation* call_site) {
return c_ic;
}
inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
assert(reloc_iter->type() == relocInfo::virtual_call_type ||
reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
CompiledIC* c_ic = new CompiledIC(reloc_iter);
c_ic->verify();
return c_ic;
}
//-----------------------------------------------------------------------------
// The CompiledStaticCall represents a call to a static method in the compiled

@ -51,6 +51,8 @@
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
unsigned char nmethod::_global_unloading_clock = 0;
#ifdef DTRACE_ENABLED
// Only bother with this argument setup if dtrace is available
@ -446,6 +448,7 @@ const char* nmethod::compile_kind() const {
// Fill in default values for various flag fields
void nmethod::init_defaults() {
_state = in_use;
_unloading_clock = 0;
_marked_for_reclamation = 0;
_has_flushed_dependencies = 0;
_has_unsafe_access = 0;
@ -464,7 +467,11 @@ void nmethod::init_defaults() {
_oops_do_mark_link = NULL;
_jmethod_id = NULL;
_osr_link = NULL;
_scavenge_root_link = NULL;
if (UseG1GC) {
_unloading_next = NULL;
} else {
_scavenge_root_link = NULL;
}
_scavenge_root_state = 0;
_compiler = NULL;
#if INCLUDE_RTM_OPT
@ -1146,7 +1153,7 @@ void nmethod::cleanup_inline_caches() {
switch(iter.type()) {
case relocInfo::virtual_call_type:
case relocInfo::opt_virtual_call_type: {
CompiledIC *ic = CompiledIC_at(iter.reloc());
CompiledIC *ic = CompiledIC_at(&iter);
// Ok, to lookup references to zombies here
CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
if( cb != NULL && cb->is_nmethod() ) {
@ -1170,6 +1177,77 @@ void nmethod::cleanup_inline_caches() {
}
}
void nmethod::verify_clean_inline_caches() {
assert_locked_or_safepoint(CompiledIC_lock);
// If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
address low_boundary = verified_entry_point();
if (!is_in_use()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// This means that the low_boundary is going to be a little too high.
// This shouldn't matter, since oops of non-entrant methods are never used.
// In fact, why are we bothering to look at oops in a non-entrant method??
}
ResourceMark rm;
RelocIterator iter(this, low_boundary);
while(iter.next()) {
switch(iter.type()) {
case relocInfo::virtual_call_type:
case relocInfo::opt_virtual_call_type: {
CompiledIC *ic = CompiledIC_at(&iter);
// Ok, to lookup references to zombies here
CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
if( cb != NULL && cb->is_nmethod() ) {
nmethod* nm = (nmethod*)cb;
// Verify that inline caches pointing to both zombie and not_entrant methods are clean
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
assert(ic->is_clean(), "IC should be clean");
}
}
break;
}
case relocInfo::static_call_type: {
CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
if( cb != NULL && cb->is_nmethod() ) {
nmethod* nm = (nmethod*)cb;
// Verify that inline caches pointing to both zombie and not_entrant methods are clean
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
assert(csc->is_clean(), "IC should be clean");
}
}
break;
}
}
}
}
int nmethod::verify_icholder_relocations() {
int count = 0;
RelocIterator iter(this);
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
CompiledIC *ic = CompiledIC_at(&iter);
if (TraceCompiledIC) {
tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
ic->print();
}
assert(ic->cached_icholder() != NULL, "must be non-NULL");
count++;
}
}
}
return count;
}
// This is a private interface with the sweeper.
void nmethod::mark_as_seen_on_stack() {
assert(is_alive(), "Must be an alive method");
@ -1202,6 +1280,23 @@ void nmethod::inc_decompile_count() {
mdo->inc_decompile_count();
}
void nmethod::increase_unloading_clock() {
_global_unloading_clock++;
if (_global_unloading_clock == 0) {
// _nmethods are allocated with _unloading_clock == 0,
// so 0 is never used as a clock value.
_global_unloading_clock = 1;
}
}
void nmethod::set_unloading_clock(unsigned char unloading_clock) {
OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
}
unsigned char nmethod::unloading_clock() {
return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
}
void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
post_compiled_method_unload();
@ -1247,6 +1342,10 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
// for later on.
CodeCache::set_needs_cache_clean(true);
}
// Unregister must be done before the state change
Universe::heap()->unregister_nmethod(this);
_state = unloaded;
// Log the unloading.
@ -1590,6 +1689,35 @@ void nmethod::post_compiled_method_unload() {
set_unload_reported();
}
void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
if (ic->is_icholder_call()) {
// The only exception is compiledICHolder oops which may
// yet be marked below. (We check this further below).
CompiledICHolder* cichk_oop = ic->cached_icholder();
if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
return;
}
} else {
Metadata* ic_oop = ic->cached_metadata();
if (ic_oop != NULL) {
if (ic_oop->is_klass()) {
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
return;
}
} else if (ic_oop->is_method()) {
if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
return;
}
} else {
ShouldNotReachHere();
}
}
}
ic->set_to_clean();
}
// This is called at the end of the strong tracing/marking phase of a
// GC to unload an nmethod if it contains otherwise unreachable
// oops.
@ -1632,32 +1760,8 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred)
RelocIterator iter(this, low_boundary);
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
CompiledIC *ic = CompiledIC_at(iter.reloc());
if (ic->is_icholder_call()) {
// The only exception is compiledICHolder oops which may
// yet be marked below. (We check this further below).
CompiledICHolder* cichk_oop = ic->cached_icholder();
if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
continue;
}
} else {
Metadata* ic_oop = ic->cached_metadata();
if (ic_oop != NULL) {
if (ic_oop->is_klass()) {
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
continue;
}
} else if (ic_oop->is_method()) {
if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
continue;
}
} else {
ShouldNotReachHere();
}
}
}
ic->set_to_clean();
CompiledIC *ic = CompiledIC_at(&iter);
clean_ic_if_metadata_is_dead(ic, is_alive);
}
}
}
@ -1695,6 +1799,175 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred)
verify_metadata_loaders(low_boundary, is_alive);
}
template <class CompiledICorStaticCall>
static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
// Ok, to lookup references to zombies here
CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
if (cb != NULL && cb->is_nmethod()) {
nmethod* nm = (nmethod*)cb;
if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
// The nmethod has not been processed yet.
return true;
}
// Clean inline caches pointing to both zombie and not_entrant methods
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
ic->set_to_clean();
assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
}
}
return false;
}
static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
}
static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
}
bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
ResourceMark rm;
// Make sure the oop's ready to receive visitors
assert(!is_zombie() && !is_unloaded(),
"should not call follow on zombie or unloaded nmethod");
// If the method is not entrant then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
address low_boundary = verified_entry_point();
if (is_not_entrant()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// (See comment above.)
}
// The RedefineClasses() API can cause the class unloading invariant
// to no longer be true. See jvmtiExport.hpp for details.
// Also, leave a debugging breadcrumb in local flag.
bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
if (a_class_was_redefined) {
// This set of the unloading_occurred flag is done before the
// call to post_compiled_method_unload() so that the unloading
// of this nmethod is reported.
unloading_occurred = true;
}
// Exception cache
clean_exception_cache(is_alive);
bool is_unloaded = false;
bool postponed = false;
RelocIterator iter(this, low_boundary);
while(iter.next()) {
switch (iter.type()) {
case relocInfo::virtual_call_type:
if (unloading_occurred) {
// If class unloading occurred we first iterate over all inline caches and
// clear ICs where the cached oop is referring to an unloaded klass or method.
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
}
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
break;
case relocInfo::opt_virtual_call_type:
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
break;
case relocInfo::static_call_type:
postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
break;
case relocInfo::oop_type:
if (!is_unloaded) {
// Unload check
oop_Relocation* r = iter.oop_reloc();
// Traverse those oops directly embedded in the code.
// Other oops (oop_index>0) are seen as part of scopes_oops.
assert(1 == (r->oop_is_immediate()) +
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
"oop must be found in exactly one place");
if (r->oop_is_immediate() && r->oop_value() != NULL) {
if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
is_unloaded = true;
}
}
}
break;
}
}
if (is_unloaded) {
return postponed;
}
// Scopes
for (oop* p = oops_begin(); p < oops_end(); p++) {
if (*p == Universe::non_oop_word()) continue; // skip non-oops
if (can_unload(is_alive, p, unloading_occurred)) {
is_unloaded = true;
break;
}
}
if (is_unloaded) {
return postponed;
}
// Ensure that all metadata is still alive
verify_metadata_loaders(low_boundary, is_alive);
return postponed;
}
void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
ResourceMark rm;
// Make sure the oop's ready to receive visitors
assert(!is_zombie(),
"should not call follow on zombie nmethod");
// If the method is not entrant then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
address low_boundary = verified_entry_point();
if (is_not_entrant()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// (See comment above.)
}
RelocIterator iter(this, low_boundary);
while(iter.next()) {
switch (iter.type()) {
case relocInfo::virtual_call_type:
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
break;
case relocInfo::opt_virtual_call_type:
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
break;
case relocInfo::static_call_type:
clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
break;
}
}
}
#ifdef ASSERT
class CheckClass : AllStatic {
@ -1741,7 +2014,7 @@ void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* i
// compiled code is maintaining a link to dead metadata.
address static_call_addr = NULL;
if (iter.type() == relocInfo::opt_virtual_call_type) {
CompiledIC* cic = CompiledIC_at(iter.reloc());
CompiledIC* cic = CompiledIC_at(&iter);
if (!cic->is_call_to_interpreted()) {
static_call_addr = iter.addr();
}
@ -1793,7 +2066,7 @@ void nmethod::metadata_do(void f(Metadata*)) {
}
} else if (iter.type() == relocInfo::virtual_call_type) {
// Check compiledIC holders associated with this nmethod
CompiledIC *ic = CompiledIC_at(iter.reloc());
CompiledIC *ic = CompiledIC_at(&iter);
if (ic->is_icholder_call()) {
CompiledICHolder* cichk = ic->cached_icholder();
f(cichk->holder_method());
@ -1911,7 +2184,7 @@ void nmethod::oops_do_marking_epilogue() {
assert(cur != NULL, "not NULL-terminated");
nmethod* next = cur->_oops_do_mark_link;
cur->_oops_do_mark_link = NULL;
cur->fix_oop_relocations();
cur->verify_oop_relocations();
NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
cur = next;
}
@ -2479,6 +2752,10 @@ public:
};
void nmethod::verify_scavenge_root_oops() {
if (UseG1GC) {
return;
}
if (!on_scavenge_root_list()) {
// Actually look inside, to verify the claim that it's clean.
DebugScavengeRoot debug_scavenge_root(this);
@ -2922,7 +3199,7 @@ void nmethod::print_calls(outputStream* st) {
case relocInfo::virtual_call_type:
case relocInfo::opt_virtual_call_type: {
VerifyMutexLocker mc(CompiledIC_lock);
CompiledIC_at(iter.reloc())->print();
CompiledIC_at(&iter)->print();
break;
}
case relocInfo::static_call_type:

@ -111,6 +111,11 @@ class nmethod : public CodeBlob {
friend class NMethodSweeper;
friend class CodeCache; // scavengable oops
private:
// GC support to help figure out if an nmethod has been
// cleaned/unloaded by the current GC.
static unsigned char _global_unloading_clock;
// Shared fields for all nmethod's
Method* _method;
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
@ -118,7 +123,13 @@ class nmethod : public CodeBlob {
// To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
union {
// Used by G1 to chain nmethods.
nmethod* _unloading_next;
// Used by non-G1 GCs to chain nmethods.
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
};
static nmethod* volatile _oops_do_mark_nmethods;
nmethod* volatile _oops_do_mark_link;
@ -180,6 +191,8 @@ class nmethod : public CodeBlob {
// Protected by Patching_lock
volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded}
volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
#ifdef ASSERT
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
#endif
@ -437,6 +450,15 @@ class nmethod : public CodeBlob {
bool unload_reported() { return _unload_reported; }
void set_unload_reported() { _unload_reported = true; }
void set_unloading_next(nmethod* next) { _unloading_next = next; }
nmethod* unloading_next() { return _unloading_next; }
static unsigned char global_unloading_clock() { return _global_unloading_clock; }
static void increase_unloading_clock();
void set_unloading_clock(unsigned char unloading_clock);
unsigned char unloading_clock();
bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; }
void mark_for_deoptimization() { _marked_for_deoptimization = true; }
@ -552,6 +574,10 @@ public:
return (addr >= code_begin() && addr < verified_entry_point());
}
// Verify calls to dead methods have been cleaned.
void verify_clean_inline_caches();
// Verify and count cached icholder relocations.
int verify_icholder_relocations();
// Check that all metadata is still alive
void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
@ -577,6 +603,10 @@ public:
// GC support
void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
// The parallel versions are used by G1.
bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
// Unload a nmethod if the *root object is dead.
bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,21 +27,6 @@
#include "asm/codeBuffer.hpp"
#include "memory/allocation.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_aix
# include "os_aix.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
// The classes in this file provide a simple framework for the
// management of little pieces of machine code - or stubs -

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
@ -62,6 +63,7 @@ void* VtableStub::operator new(size_t size, int code_size) throw() {
// If changing the name, update the other file accordingly.
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if (blob == NULL) {
CompileBroker::handle_full_code_cache();
return NULL;
}
_chunk = blob->content_begin();

@ -1048,7 +1048,7 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
}
// Let go of Threads_lock before yielding
os::yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
return compiler_thread;
}
@ -2123,6 +2123,7 @@ void CompileBroker::set_last_compile(CompilerThread* thread, methodHandle method
ResourceMark rm;
char* method_name = method->name()->as_C_string();
strncpy(_last_method_compiled, method_name, CompileBroker::name_buffer_length);
_last_method_compiled[CompileBroker::name_buffer_length - 1] = '\0'; // ensure null terminated
char current_method[CompilerCounters::cmname_buffer_length];
size_t maxLen = CompilerCounters::cmname_buffer_length;

@ -30,6 +30,7 @@
#include "memory/cardTableModRefBS.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#ifdef TARGET_ARCH_x86

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,21 +27,6 @@
#include "asm/codeBuffer.hpp"
#include "runtime/globals.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_aix
# include "os_aix.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
class decode_env;

@ -29,6 +29,7 @@
#include "memory/sharedHeap.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/vmThread.hpp"
template <>

@ -1,477 +0,0 @@
/*
* Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
#include "runtime/timer.hpp"
// This class keeps statistical information and computes the
// size of the heap for the concurrent mark sweep collector.
//
// Cost for garbage collector include cost for
// minor collection
// concurrent collection
// stop-the-world component
// concurrent component
// major compacting collection
// uses decaying cost
// Forward decls
class elapsedTimer;
class CMSAdaptiveSizePolicy : public AdaptiveSizePolicy {
friend class CMSGCAdaptivePolicyCounters;
friend class CMSCollector;
private:
// Total number of processors available
int _processor_count;
// Number of processors used by the concurrent phases of GC
// This number is assumed to be the same for all concurrent
// phases.
int _concurrent_processor_count;
// Time that the mutators run exclusive of a particular
// phase. For example, the time the mutators run excluding
// the time during which the cms collector runs concurrently
// with the mutators.
// Between end of most recent cms reset and start of initial mark
// This may be redundant
double _latest_cms_reset_end_to_initial_mark_start_secs;
// Between end of the most recent initial mark and start of remark
double _latest_cms_initial_mark_end_to_remark_start_secs;
// Between end of most recent collection and start of
// a concurrent collection
double _latest_cms_collection_end_to_collection_start_secs;
// Times of the concurrent phases of the most recent
// concurrent collection
double _latest_cms_concurrent_marking_time_secs;
double _latest_cms_concurrent_precleaning_time_secs;
double _latest_cms_concurrent_sweeping_time_secs;
// Between end of most recent STW MSC and start of next STW MSC
double _latest_cms_msc_end_to_msc_start_time_secs;
// Between end of most recent MS and start of next MS
// This does not include any time spent during a concurrent
// collection.
double _latest_cms_ms_end_to_ms_start;
// Between start and end of the initial mark of the most recent
// concurrent collection.
double _latest_cms_initial_mark_start_to_end_time_secs;
// Between start and end of the remark phase of the most recent
// concurrent collection
double _latest_cms_remark_start_to_end_time_secs;
// Between start and end of the most recent MS STW marking phase
double _latest_cms_ms_marking_start_to_end_time_secs;
// Pause time timers
static elapsedTimer _STW_timer;
// Concurrent collection timer. Used for total of all concurrent phases
// during 1 collection cycle.
static elapsedTimer _concurrent_timer;
// When the size of the generation is changed, the size
// of the change will rounded up or down (depending on the
// type of change) by this value.
size_t _generation_alignment;
// If this variable is true, the size of the young generation
// may be changed in order to reduce the pause(s) of the
// collection of the tenured generation in order to meet the
// pause time goal. It is common to change the size of the
// tenured generation in order to meet the pause time goal
// for the tenured generation. With the CMS collector for
// the tenured generation, the size of the young generation
// can have an significant affect on the pause times for collecting the
// tenured generation.
// This is a duplicate of a variable in PSAdaptiveSizePolicy. It
// is duplicated because it is not clear that it is general enough
// to go into AdaptiveSizePolicy.
int _change_young_gen_for_maj_pauses;
// Variable that is set to true after a collection.
bool _first_after_collection;
// Fraction of collections that are of each type
double concurrent_fraction() const;
double STW_msc_fraction() const;
double STW_ms_fraction() const;
// This call cannot be put into the epilogue as long as some
// of the counters can be set during concurrent phases.
virtual void clear_generation_free_space_flags();
void set_first_after_collection() { _first_after_collection = true; }
protected:
// Average of the sum of the concurrent times for
// one collection in seconds.
AdaptiveWeightedAverage* _avg_concurrent_time;
// Average time between concurrent collections in seconds.
AdaptiveWeightedAverage* _avg_concurrent_interval;
// Average cost of the concurrent part of a collection
// in seconds.
AdaptiveWeightedAverage* _avg_concurrent_gc_cost;
// Average of the initial pause of a concurrent collection in seconds.
AdaptivePaddedAverage* _avg_initial_pause;
// Average of the remark pause of a concurrent collection in seconds.
AdaptivePaddedAverage* _avg_remark_pause;
// Average of the stop-the-world (STW) (initial mark + remark)
// times in seconds for concurrent collections.
AdaptiveWeightedAverage* _avg_cms_STW_time;
// Average of the STW collection cost for concurrent collections.
AdaptiveWeightedAverage* _avg_cms_STW_gc_cost;
// Average of the bytes free at the start of the sweep.
AdaptiveWeightedAverage* _avg_cms_free_at_sweep;
// Average of the bytes free at the end of the collection.
AdaptiveWeightedAverage* _avg_cms_free;
// Average of the bytes promoted between cms collections.
AdaptiveWeightedAverage* _avg_cms_promo;
// stop-the-world (STW) mark-sweep-compact
// Average of the pause time in seconds for STW mark-sweep-compact
// collections.
AdaptiveWeightedAverage* _avg_msc_pause;
// Average of the interval in seconds between STW mark-sweep-compact
// collections.
AdaptiveWeightedAverage* _avg_msc_interval;
// Average of the collection costs for STW mark-sweep-compact
// collections.
AdaptiveWeightedAverage* _avg_msc_gc_cost;
// Averages for mark-sweep collections.
// The collection may have started as a background collection
// that completes in a stop-the-world (STW) collection.
// Average of the pause time in seconds for mark-sweep
// collections.
AdaptiveWeightedAverage* _avg_ms_pause;
// Average of the interval in seconds between mark-sweep
// collections.
AdaptiveWeightedAverage* _avg_ms_interval;
// Average of the collection costs for mark-sweep
// collections.
AdaptiveWeightedAverage* _avg_ms_gc_cost;
// These variables contain a linear fit of
// a generation size as the independent variable
// and a pause time as the dependent variable.
// For example _remark_pause_old_estimator
// is a fit of the old generation size as the
// independent variable and the remark pause
// as the dependent variable.
// remark pause time vs. cms gen size
LinearLeastSquareFit* _remark_pause_old_estimator;
// initial pause time vs. cms gen size
LinearLeastSquareFit* _initial_pause_old_estimator;
// remark pause time vs. young gen size
LinearLeastSquareFit* _remark_pause_young_estimator;
// initial pause time vs. young gen size
LinearLeastSquareFit* _initial_pause_young_estimator;
// Accessors
int processor_count() const { return _processor_count; }
int concurrent_processor_count() const { return _concurrent_processor_count; }
AdaptiveWeightedAverage* avg_concurrent_time() const {
return _avg_concurrent_time;
}
AdaptiveWeightedAverage* avg_concurrent_interval() const {
return _avg_concurrent_interval;
}
AdaptiveWeightedAverage* avg_concurrent_gc_cost() const {
return _avg_concurrent_gc_cost;
}
AdaptiveWeightedAverage* avg_cms_STW_time() const {
return _avg_cms_STW_time;
}
AdaptiveWeightedAverage* avg_cms_STW_gc_cost() const {
return _avg_cms_STW_gc_cost;
}
AdaptivePaddedAverage* avg_initial_pause() const {
return _avg_initial_pause;
}
AdaptivePaddedAverage* avg_remark_pause() const {
return _avg_remark_pause;
}
AdaptiveWeightedAverage* avg_cms_free() const {
return _avg_cms_free;
}
AdaptiveWeightedAverage* avg_cms_free_at_sweep() const {
return _avg_cms_free_at_sweep;
}
AdaptiveWeightedAverage* avg_msc_pause() const {
return _avg_msc_pause;
}
AdaptiveWeightedAverage* avg_msc_interval() const {
return _avg_msc_interval;
}
AdaptiveWeightedAverage* avg_msc_gc_cost() const {
return _avg_msc_gc_cost;
}
AdaptiveWeightedAverage* avg_ms_pause() const {
return _avg_ms_pause;
}
AdaptiveWeightedAverage* avg_ms_interval() const {
return _avg_ms_interval;
}
AdaptiveWeightedAverage* avg_ms_gc_cost() const {
return _avg_ms_gc_cost;
}
LinearLeastSquareFit* remark_pause_old_estimator() {
return _remark_pause_old_estimator;
}
LinearLeastSquareFit* initial_pause_old_estimator() {
return _initial_pause_old_estimator;
}
LinearLeastSquareFit* remark_pause_young_estimator() {
return _remark_pause_young_estimator;
}
LinearLeastSquareFit* initial_pause_young_estimator() {
return _initial_pause_young_estimator;
}
// These *slope() methods return the slope
// m for the linear fit of an independent
// variable vs. a dependent variable. For
// example
// remark_pause = m * old_generation_size + c
// These may be used to determine if an
// adjustment should be made to achieve a goal.
// For example, if remark_pause_old_slope() is
// positive, a reduction of the old generation
// size has on average resulted in the reduction
// of the remark pause.
float remark_pause_old_slope() {
return _remark_pause_old_estimator->slope();
}
float initial_pause_old_slope() {
return _initial_pause_old_estimator->slope();
}
float remark_pause_young_slope() {
return _remark_pause_young_estimator->slope();
}
float initial_pause_young_slope() {
return _initial_pause_young_estimator->slope();
}
// Update estimators
void update_minor_pause_old_estimator(double minor_pause_in_ms);
// Fraction of processors used by the concurrent phases.
double concurrent_processor_fraction();
// Returns the total times for the concurrent part of the
// latest collection in seconds.
double concurrent_collection_time();
// Return the total times for the concurrent part of the
// latest collection in seconds where the times of the various
// concurrent phases are scaled by the processor fraction used
// during the phase.
double scaled_concurrent_collection_time();
// Dimensionless concurrent GC cost for all the concurrent phases.
double concurrent_collection_cost(double interval_in_seconds);
// Dimensionless GC cost
double collection_cost(double pause_in_seconds, double interval_in_seconds);
virtual GCPolicyKind kind() const { return _gc_cms_adaptive_size_policy; }
virtual double time_since_major_gc() const;
// This returns the maximum average for the concurrent, ms, and
// msc collections. This is meant to be used for the calculation
// of the decayed major gc cost and is not in general the
// average of all the different types of major collections.
virtual double major_gc_interval_average_for_decay() const;
public:
CMSAdaptiveSizePolicy(size_t init_eden_size,
size_t init_promo_size,
size_t init_survivor_size,
double max_gc_minor_pause_sec,
double max_gc_pause_sec,
uint gc_cost_ratio);
// The timers for the stop-the-world phases measure a total
// stop-the-world time. The timer is started and stopped
// for each phase but is only reset after the final checkpoint.
void checkpoint_roots_initial_begin();
void checkpoint_roots_initial_end(GCCause::Cause gc_cause);
void checkpoint_roots_final_begin();
void checkpoint_roots_final_end(GCCause::Cause gc_cause);
// Methods for gathering information about the
// concurrent marking phase of the collection.
// Records the mutator times and
// resets the concurrent timer.
void concurrent_marking_begin();
// Resets concurrent phase timer in the begin methods and
// saves the time for a phase in the end methods.
void concurrent_marking_end();
void concurrent_sweeping_begin();
void concurrent_sweeping_end();
// Similar to the above (e.g., concurrent_marking_end()) and
// is used for both the precleaning an abortable precleaning
// phases.
void concurrent_precleaning_begin();
void concurrent_precleaning_end();
// Stops the concurrent phases time. Gathers
// information and resets the timer.
void concurrent_phases_end(GCCause::Cause gc_cause,
size_t cur_eden,
size_t cur_promo);
// Methods for gather information about STW Mark-Sweep-Compact
void msc_collection_begin();
void msc_collection_end(GCCause::Cause gc_cause);
// Methods for gather information about Mark-Sweep done
// in the foreground.
void ms_collection_begin();
void ms_collection_end(GCCause::Cause gc_cause);
// Cost for a mark-sweep tenured gen collection done in the foreground
double ms_gc_cost() const {
return MAX2(0.0F, _avg_ms_gc_cost->average());
}
// Cost of collecting the tenured generation. Includes
// concurrent collection and STW collection costs
double cms_gc_cost() const;
// Cost of STW mark-sweep-compact tenured gen collection.
double msc_gc_cost() const {
return MAX2(0.0F, _avg_msc_gc_cost->average());
}
//
double compacting_gc_cost() const {
double result = MIN2(1.0, minor_gc_cost() + msc_gc_cost());
assert(result >= 0.0, "Both minor and major costs are non-negative");
return result;
}
// Restarts the concurrent phases timer.
void concurrent_phases_resume();
// Time beginning and end of the marking phase for
// a synchronous MS collection. A MS collection
// that finishes in the foreground can have started
// in the background. These methods capture the
// completion of the marking (after the initial
// marking) that is done in the foreground.
void ms_collection_marking_begin();
void ms_collection_marking_end(GCCause::Cause gc_cause);
static elapsedTimer* concurrent_timer_ptr() {
return &_concurrent_timer;
}
AdaptiveWeightedAverage* avg_cms_promo() const {
return _avg_cms_promo;
}
int change_young_gen_for_maj_pauses() {
return _change_young_gen_for_maj_pauses;
}
void set_change_young_gen_for_maj_pauses(int v) {
_change_young_gen_for_maj_pauses = v;
}
void clear_internal_time_intervals();
// Either calculated_promo_size_in_bytes() or promo_size()
// should be deleted.
size_t promo_size() { return _promo_size; }
void set_promo_size(size_t v) { _promo_size = v; }
// Cost of GC for all types of collections.
virtual double gc_cost() const;
size_t generation_alignment() { return _generation_alignment; }
virtual void compute_eden_space_size(size_t cur_eden,
size_t max_eden_size);
// Calculates new survivor space size; returns a new tenuring threshold
// value. Stores new survivor size in _survivor_size.
virtual uint compute_survivor_space_size_and_threshold(
bool is_survivor_overflow,
uint tenuring_threshold,
size_t survivor_limit);
virtual void compute_tenured_generation_free_space(size_t cur_tenured_free,
size_t max_tenured_available,
size_t cur_eden);
size_t eden_decrement_aligned_down(size_t cur_eden);
size_t eden_increment_aligned_up(size_t cur_eden);
size_t adjust_eden_for_pause_time(size_t cur_eden);
size_t adjust_eden_for_throughput(size_t cur_eden);
size_t adjust_eden_for_footprint(size_t cur_eden);
size_t promo_decrement_aligned_down(size_t cur_promo);
size_t promo_increment_aligned_up(size_t cur_promo);
size_t adjust_promo_for_pause_time(size_t cur_promo);
size_t adjust_promo_for_throughput(size_t cur_promo);
size_t adjust_promo_for_footprint(size_t cur_promo, size_t cur_eden);
// Scale down the input size by the ratio of the cost to collect the
// generation to the total GC cost.
size_t scale_by_gen_gc_cost(size_t base_change, double gen_gc_cost);
// Return the value and clear it.
bool get_and_clear_first_after_collection();
// Printing support
virtual bool print_adaptive_size_policy_on(outputStream* st) const;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP

@ -23,9 +23,8 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
#include "gc_implementation/parNew/parNewGeneration.hpp"
#include "gc_implementation/shared/gcPolicyCounters.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
@ -57,25 +56,12 @@ void ConcurrentMarkSweepPolicy::initialize_generations() {
if (_generations == NULL)
vm_exit_during_initialization("Unable to allocate gen spec");
if (UseParNewGC) {
if (UseAdaptiveSizePolicy) {
_generations[0] = new GenerationSpec(Generation::ASParNew,
_initial_young_size, _max_young_size);
} else {
_generations[0] = new GenerationSpec(Generation::ParNew,
_initial_young_size, _max_young_size);
}
} else {
_generations[0] = new GenerationSpec(Generation::DefNew,
_initial_young_size, _max_young_size);
}
if (UseAdaptiveSizePolicy) {
_generations[1] = new GenerationSpec(Generation::ASConcurrentMarkSweep,
_initial_old_size, _max_old_size);
} else {
_generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
_initial_old_size, _max_old_size);
}
Generation::Name yg_name =
UseParNewGC ? Generation::ParNew : Generation::DefNew;
_generations[0] = new GenerationSpec(yg_name, _initial_young_size,
_max_young_size);
_generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
_initial_old_size, _max_old_size);
if (_generations[0] == NULL || _generations[1] == NULL) {
vm_exit_during_initialization("Unable to allocate gen spec");
@ -85,14 +71,12 @@ void ConcurrentMarkSweepPolicy::initialize_generations() {
void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
size_t init_promo_size,
size_t init_survivor_size) {
double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
_size_policy = new CMSAdaptiveSizePolicy(init_eden_size,
init_promo_size,
init_survivor_size,
max_gc_minor_pause_sec,
max_gc_pause_sec,
GCTimeRatio);
_size_policy = new AdaptiveSizePolicy(init_eden_size,
init_promo_size,
init_survivor_size,
max_gc_pause_sec,
GCTimeRatio);
}
void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
@ -110,22 +94,3 @@ bool ConcurrentMarkSweepPolicy::has_soft_ended_eden()
{
return CMSIncrementalMode;
}
//
// ASConcurrentMarkSweepPolicy methods
//
void ASConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
assert(size_policy() != NULL, "A size policy is required");
// initialize the policy counters - 2 collectors, 3 generations
if (UseParNewGC) {
_gc_policy_counters = new CMSGCAdaptivePolicyCounters("ParNew:CMS", 2, 3,
size_policy());
}
else {
_gc_policy_counters = new CMSGCAdaptivePolicyCounters("Copy:CMS", 2, 3,
size_policy());
}
}

@ -47,19 +47,4 @@ class ConcurrentMarkSweepPolicy : public GenCollectorPolicy {
virtual bool has_soft_ended_eden();
};
class ASConcurrentMarkSweepPolicy : public ConcurrentMarkSweepPolicy {
public:
// Initialize the jstat counters. This method requires a
// size policy. The size policy is expected to be created
// after the generations are fully initialized so the
// initialization of the counters need to be done post
// the initialization of the generations.
void initialize_gc_policy_counters();
virtual CollectorPolicy::Name kind() {
return CollectorPolicy::ASConcurrentMarkSweepPolicyKind;
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP

@ -1,303 +0,0 @@
/*
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
#include "memory/resourceArea.hpp"
CMSGCAdaptivePolicyCounters::CMSGCAdaptivePolicyCounters(const char* name_arg,
int collectors,
int generations,
AdaptiveSizePolicy* size_policy_arg)
: GCAdaptivePolicyCounters(name_arg,
collectors,
generations,
size_policy_arg) {
if (UsePerfData) {
EXCEPTION_MARK;
ResourceMark rm;
const char* cname =
PerfDataManager::counter_name(name_space(), "cmsCapacity");
_cms_capacity_counter = PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Bytes, (jlong) OldSize, CHECK);
#ifdef NOT_PRODUCT
cname =
PerfDataManager::counter_name(name_space(), "initialPause");
_initial_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_initial_pause()->last_sample(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "remarkPause");
_remark_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_remark_pause()->last_sample(),
CHECK);
#endif
cname =
PerfDataManager::counter_name(name_space(), "avgInitialPause");
_avg_initial_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_initial_pause()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgRemarkPause");
_avg_remark_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_remark_pause()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgSTWGcCost");
_avg_cms_STW_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_cms_STW_gc_cost()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgSTWTime");
_avg_cms_STW_time_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_cms_STW_time()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgConcurrentTime");
_avg_concurrent_time_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_concurrent_time()->average(),
CHECK);
cname =
PerfDataManager::counter_name(name_space(), "avgConcurrentInterval");
_avg_concurrent_interval_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_concurrent_interval()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgConcurrentGcCost");
_avg_concurrent_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_concurrent_gc_cost()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgCMSFreeAtSweep");
_avg_cms_free_at_sweep_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_cms_free_at_sweep()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgCMSFree");
_avg_cms_free_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_cms_free()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgCMSPromo");
_avg_cms_promo_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_cms_promo()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgMscPause");
_avg_msc_pause_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_msc_pause()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgMscInterval");
_avg_msc_interval_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_msc_interval()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "mscGcCost");
_msc_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_msc_gc_cost()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgMsPause");
_avg_ms_pause_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_ms_pause()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgMsInterval");
_avg_ms_interval_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_ms_interval()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "msGcCost");
_ms_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_ms_gc_cost()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "majorGcCost");
_major_gc_cost_counter = PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Ticks, (jlong) cms_size_policy()->cms_gc_cost(), CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgPromotedAvg");
_promoted_avg_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
cms_size_policy()->calculated_promo_size_in_bytes(), CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgPromotedDev");
_promoted_avg_dev_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
(jlong) 0 , CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgPromotedPaddedAvg");
_promoted_padded_avg_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
cms_size_policy()->calculated_promo_size_in_bytes(), CHECK);
cname = PerfDataManager::counter_name(name_space(),
"changeYoungGenForMajPauses");
_change_young_gen_for_maj_pauses_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Events,
(jlong)0, CHECK);
cname = PerfDataManager::counter_name(name_space(), "remarkPauseOldSlope");
_remark_pause_old_slope_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
(jlong) cms_size_policy()->remark_pause_old_slope(), CHECK);
cname = PerfDataManager::counter_name(name_space(), "initialPauseOldSlope");
_initial_pause_old_slope_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
(jlong) cms_size_policy()->initial_pause_old_slope(), CHECK);
cname =
PerfDataManager::counter_name(name_space(), "remarkPauseYoungSlope") ;
_remark_pause_young_slope_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
(jlong) cms_size_policy()->remark_pause_young_slope(), CHECK);
cname =
PerfDataManager::counter_name(name_space(), "initialPauseYoungSlope");
_initial_pause_young_slope_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
(jlong) cms_size_policy()->initial_pause_young_slope(), CHECK);
}
assert(size_policy()->is_gc_cms_adaptive_size_policy(),
"Wrong type of size policy");
}
void CMSGCAdaptivePolicyCounters::update_counters() {
if (UsePerfData) {
GCAdaptivePolicyCounters::update_counters_from_policy();
update_counters_from_policy();
}
}
void CMSGCAdaptivePolicyCounters::update_counters(CMSGCStats* gc_stats) {
if (UsePerfData) {
update_counters();
update_promoted((size_t) gc_stats->avg_promoted()->last_sample());
update_avg_promoted_avg(gc_stats);
update_avg_promoted_dev(gc_stats);
update_avg_promoted_padded_avg(gc_stats);
}
}
void CMSGCAdaptivePolicyCounters::update_counters_from_policy() {
if (UsePerfData && (cms_size_policy() != NULL)) {
GCAdaptivePolicyCounters::update_counters_from_policy();
update_major_gc_cost_counter();
update_mutator_cost_counter();
update_eden_size();
update_promo_size();
// If these updates from the last_sample() work,
// revise the update methods for these counters
// (both here and in PS).
update_survived((size_t) cms_size_policy()->avg_survived()->last_sample());
update_avg_concurrent_time_counter();
update_avg_concurrent_interval_counter();
update_avg_concurrent_gc_cost_counter();
#ifdef NOT_PRODUCT
update_initial_pause_counter();
update_remark_pause_counter();
#endif
update_avg_initial_pause_counter();
update_avg_remark_pause_counter();
update_avg_cms_STW_time_counter();
update_avg_cms_STW_gc_cost_counter();
update_avg_cms_free_counter();
update_avg_cms_free_at_sweep_counter();
update_avg_cms_promo_counter();
update_avg_msc_pause_counter();
update_avg_msc_interval_counter();
update_msc_gc_cost_counter();
update_avg_ms_pause_counter();
update_avg_ms_interval_counter();
update_ms_gc_cost_counter();
update_avg_old_live_counter();
update_survivor_size_counters();
update_avg_survived_avg_counters();
update_avg_survived_dev_counters();
update_decrement_tenuring_threshold_for_gc_cost();
update_increment_tenuring_threshold_for_gc_cost();
update_decrement_tenuring_threshold_for_survivor_limit();
update_change_young_gen_for_maj_pauses();
update_major_collection_slope_counter();
update_remark_pause_old_slope_counter();
update_initial_pause_old_slope_counter();
update_remark_pause_young_slope_counter();
update_initial_pause_young_slope_counter();
update_decide_at_full_gc_counter();
}
}

@ -1,308 +0,0 @@
/*
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
#include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp"
#include "gc_implementation/shared/gcStats.hpp"
#include "runtime/perfData.hpp"
// CMSGCAdaptivePolicyCounters is a holder class for performance counters
// that track the data and decisions for the ergonomics policy for the
// concurrent mark sweep collector
class CMSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
friend class VMStructs;
private:
// Capacity of tenured generation recorded at the end of
// any collection.
PerfVariable* _cms_capacity_counter; // Make this common with PS _old_capacity
// Average stop-the-world pause time for both initial and
// remark pauses sampled at the end of the checkpointRootsFinalWork.
PerfVariable* _avg_cms_STW_time_counter;
// Average stop-the-world (STW) GC cost for the STW pause time
// _avg_cms_STW_time_counter.
PerfVariable* _avg_cms_STW_gc_cost_counter;
#ifdef NOT_PRODUCT
// These are useful to see how the most recent values of these
// counters compare to their respective averages but
// do not control behavior.
PerfVariable* _initial_pause_counter;
PerfVariable* _remark_pause_counter;
#endif
// Average of the initial marking pause for a concurrent collection.
PerfVariable* _avg_initial_pause_counter;
// Average of the remark pause for a concurrent collection.
PerfVariable* _avg_remark_pause_counter;
// Average for the sum of all the concurrent times per collection.
PerfVariable* _avg_concurrent_time_counter;
// Average for the time between the most recent end of a
// concurrent collection and the beginning of the next
// concurrent collection.
PerfVariable* _avg_concurrent_interval_counter;
// Average of the concurrent GC costs based on _avg_concurrent_time_counter
// and _avg_concurrent_interval_counter.
PerfVariable* _avg_concurrent_gc_cost_counter;
// Average of the free space in the tenured generation at the
// end of the sweep of the tenured generation.
PerfVariable* _avg_cms_free_counter;
// Average of the free space in the tenured generation at the
// start of the sweep of the tenured generation.
PerfVariable* _avg_cms_free_at_sweep_counter;
// Average of the free space in the tenured generation at the
// after any resizing of the tenured generation at the end
// of a collection of the tenured generation.
PerfVariable* _avg_cms_promo_counter;
// Average of the mark-sweep-compact (MSC) pause time for a collection
// of the tenured generation.
PerfVariable* _avg_msc_pause_counter;
// Average for the time between the most recent end of a
// MSC collection and the beginning of the next MSC collection.
PerfVariable* _avg_msc_interval_counter;
// Average for the GC cost of a MSC collection based on
// _avg_msc_pause_counter and _avg_msc_interval_counter.
PerfVariable* _msc_gc_cost_counter;
// Average of the mark-sweep (MS) pause time for a collection
// of the tenured generation.
PerfVariable* _avg_ms_pause_counter;
// Average for the time between the most recent end of a
// MS collection and the beginning of the next MS collection.
PerfVariable* _avg_ms_interval_counter;
// Average for the GC cost of a MS collection based on
// _avg_ms_pause_counter and _avg_ms_interval_counter.
PerfVariable* _ms_gc_cost_counter;
// Average of the bytes promoted per minor collection.
PerfVariable* _promoted_avg_counter;
// Average of the deviation of the promoted average.
PerfVariable* _promoted_avg_dev_counter;
// Padded average of the bytes promoted per minor collection.
PerfVariable* _promoted_padded_avg_counter;
// See description of the _change_young_gen_for_maj_pauses
// variable recently in cmsAdaptiveSizePolicy.hpp.
PerfVariable* _change_young_gen_for_maj_pauses_counter;
// See descriptions of _remark_pause_old_slope, _initial_pause_old_slope,
// etc. variables recently in cmsAdaptiveSizePolicy.hpp.
PerfVariable* _remark_pause_old_slope_counter;
PerfVariable* _initial_pause_old_slope_counter;
PerfVariable* _remark_pause_young_slope_counter;
PerfVariable* _initial_pause_young_slope_counter;
CMSAdaptiveSizePolicy* cms_size_policy() {
assert(_size_policy->kind() ==
AdaptiveSizePolicy::_gc_cms_adaptive_size_policy,
"Wrong size policy");
return (CMSAdaptiveSizePolicy*)_size_policy;
}
inline void update_avg_cms_STW_time_counter() {
_avg_cms_STW_time_counter->set_value(
(jlong) (cms_size_policy()->avg_cms_STW_time()->average() *
(double) MILLIUNITS));
}
inline void update_avg_cms_STW_gc_cost_counter() {
_avg_cms_STW_gc_cost_counter->set_value(
(jlong) (cms_size_policy()->avg_cms_STW_gc_cost()->average() * 100.0));
}
inline void update_avg_initial_pause_counter() {
_avg_initial_pause_counter->set_value(
(jlong) (cms_size_policy()->avg_initial_pause()->average() *
(double) MILLIUNITS));
}
#ifdef NOT_PRODUCT
inline void update_avg_remark_pause_counter() {
_avg_remark_pause_counter->set_value(
(jlong) (cms_size_policy()-> avg_remark_pause()->average() *
(double) MILLIUNITS));
}
inline void update_initial_pause_counter() {
_initial_pause_counter->set_value(
(jlong) (cms_size_policy()->avg_initial_pause()->average() *
(double) MILLIUNITS));
}
#endif
inline void update_remark_pause_counter() {
_remark_pause_counter->set_value(
(jlong) (cms_size_policy()-> avg_remark_pause()->last_sample() *
(double) MILLIUNITS));
}
inline void update_avg_concurrent_time_counter() {
_avg_concurrent_time_counter->set_value(
(jlong) (cms_size_policy()->avg_concurrent_time()->last_sample() *
(double) MILLIUNITS));
}
inline void update_avg_concurrent_interval_counter() {
_avg_concurrent_interval_counter->set_value(
(jlong) (cms_size_policy()->avg_concurrent_interval()->average() *
(double) MILLIUNITS));
}
inline void update_avg_concurrent_gc_cost_counter() {
_avg_concurrent_gc_cost_counter->set_value(
(jlong) (cms_size_policy()->avg_concurrent_gc_cost()->average() * 100.0));
}
inline void update_avg_cms_free_counter() {
_avg_cms_free_counter->set_value(
(jlong) cms_size_policy()->avg_cms_free()->average());
}
inline void update_avg_cms_free_at_sweep_counter() {
_avg_cms_free_at_sweep_counter->set_value(
(jlong) cms_size_policy()->avg_cms_free_at_sweep()->average());
}
inline void update_avg_cms_promo_counter() {
_avg_cms_promo_counter->set_value(
(jlong) cms_size_policy()->avg_cms_promo()->average());
}
inline void update_avg_old_live_counter() {
_avg_old_live_counter->set_value(
(jlong)(cms_size_policy()->avg_old_live()->average())
);
}
inline void update_avg_msc_pause_counter() {
_avg_msc_pause_counter->set_value(
(jlong) (cms_size_policy()->avg_msc_pause()->average() *
(double) MILLIUNITS));
}
inline void update_avg_msc_interval_counter() {
_avg_msc_interval_counter->set_value(
(jlong) (cms_size_policy()->avg_msc_interval()->average() *
(double) MILLIUNITS));
}
inline void update_msc_gc_cost_counter() {
_msc_gc_cost_counter->set_value(
(jlong) (cms_size_policy()->avg_msc_gc_cost()->average() * 100.0));
}
inline void update_avg_ms_pause_counter() {
_avg_ms_pause_counter->set_value(
(jlong) (cms_size_policy()->avg_ms_pause()->average() *
(double) MILLIUNITS));
}
inline void update_avg_ms_interval_counter() {
_avg_ms_interval_counter->set_value(
(jlong) (cms_size_policy()->avg_ms_interval()->average() *
(double) MILLIUNITS));
}
inline void update_ms_gc_cost_counter() {
_ms_gc_cost_counter->set_value(
(jlong) (cms_size_policy()->avg_ms_gc_cost()->average() * 100.0));
}
inline void update_major_gc_cost_counter() {
_major_gc_cost_counter->set_value(
(jlong)(cms_size_policy()->cms_gc_cost() * 100.0)
);
}
inline void update_mutator_cost_counter() {
_mutator_cost_counter->set_value(
(jlong)(cms_size_policy()->mutator_cost() * 100.0)
);
}
inline void update_avg_promoted_avg(CMSGCStats* gc_stats) {
_promoted_avg_counter->set_value(
(jlong)(gc_stats->avg_promoted()->average())
);
}
inline void update_avg_promoted_dev(CMSGCStats* gc_stats) {
_promoted_avg_dev_counter->set_value(
(jlong)(gc_stats->avg_promoted()->deviation())
);
}
inline void update_avg_promoted_padded_avg(CMSGCStats* gc_stats) {
_promoted_padded_avg_counter->set_value(
(jlong)(gc_stats->avg_promoted()->padded_average())
);
}
inline void update_remark_pause_old_slope_counter() {
_remark_pause_old_slope_counter->set_value(
(jlong)(cms_size_policy()->remark_pause_old_slope() * 1000)
);
}
inline void update_initial_pause_old_slope_counter() {
_initial_pause_old_slope_counter->set_value(
(jlong)(cms_size_policy()->initial_pause_old_slope() * 1000)
);
}
inline void update_remark_pause_young_slope_counter() {
_remark_pause_young_slope_counter->set_value(
(jlong)(cms_size_policy()->remark_pause_young_slope() * 1000)
);
}
inline void update_initial_pause_young_slope_counter() {
_initial_pause_young_slope_counter->set_value(
(jlong)(cms_size_policy()->initial_pause_young_slope() * 1000)
);
}
inline void update_change_young_gen_for_maj_pauses() {
_change_young_gen_for_maj_pauses_counter->set_value(
cms_size_policy()->change_young_gen_for_maj_pauses());
}
public:
CMSGCAdaptivePolicyCounters(const char* name, int collectors, int generations,
AdaptiveSizePolicy* size_policy);
// update counters
void update_counters();
void update_counters(CMSGCStats* gc_stats);
void update_counters_from_policy();
inline void update_cms_capacity_counter(size_t size_in_bytes) {
_cms_capacity_counter->set_value(size_in_bytes);
}
virtual GCPolicyCounters::Name kind() const {
return GCPolicyCounters::CMSGCAdaptivePolicyCountersKind;
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP

@ -70,7 +70,6 @@ class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
class CompactibleFreeListSpace: public CompactibleSpace {
friend class VMStructs;
friend class ConcurrentMarkSweepGeneration;
friend class ASConcurrentMarkSweepGeneration;
friend class CMSCollector;
// Local alloc buffer for promotion into this space.
friend class CFLS_LAB;

@ -27,9 +27,8 @@
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
@ -319,27 +318,13 @@ void CMSCollector::ref_processor_init() {
}
}
CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
AdaptiveSizePolicy* CMSCollector::size_policy() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
"Wrong type of heap");
CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
gch->gen_policy()->size_policy();
assert(sp->is_gc_cms_adaptive_size_policy(),
"Wrong type of size policy");
return sp;
return gch->gen_policy()->size_policy();
}
CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
CMSGCAdaptivePolicyCounters* results =
(CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
assert(
results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
"Wrong gc policy counter kind");
return results;
}
void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
const char* gen_name = "old";
@ -1573,11 +1558,11 @@ bool CMSCollector::shouldConcurrentCollect() {
}
if (MetaspaceGC::should_concurrent_collect()) {
if (Verbose && PrintGCDetails) {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
}
return true;
}
return true;
}
// CMSTriggerInterval starts a CMS cycle if enough time has passed.
if (CMSTriggerInterval >= 0) {
@ -2031,11 +2016,6 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
"collections passed to foreground collector", _full_gcs_since_conc_gc);
}
// Sample collection interval time and reset for collection pause.
if (UseAdaptiveSizePolicy) {
size_policy()->msc_collection_begin();
}
// Temporarily widen the span of the weak reference processing to
// the entire heap.
MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
@ -2111,11 +2091,6 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
_inter_sweep_timer.reset();
_inter_sweep_timer.start();
// Sample collection pause time and reset for collection interval.
if (UseAdaptiveSizePolicy) {
size_policy()->msc_collection_end(gch->gc_cause());
}
gc_timer->register_gc_end();
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
@ -2373,26 +2348,14 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Caus
}
break;
case Precleaning:
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_precleaning_begin();
}
// marking from roots in markFromRoots has been completed
preclean();
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_precleaning_end();
}
assert(_collectorState == AbortablePreclean ||
_collectorState == FinalMarking,
"Collector state should have changed");
break;
case AbortablePreclean:
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_phases_resume();
}
abortable_preclean();
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_precleaning_end();
}
assert(_collectorState == FinalMarking, "Collector state should "
"have changed");
break;
@ -2406,23 +2369,12 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Caus
assert(_foregroundGCShouldWait, "block post-condition");
break;
case Sweeping:
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_sweeping_begin();
}
// final marking in checkpointRootsFinal has been completed
sweep(true);
assert(_collectorState == Resizing, "Collector state change "
"to Resizing must be done under the free_list_lock");
_full_gcs_since_conc_gc = 0;
// Stop the timers for adaptive size policy for the concurrent phases
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_sweeping_end();
size_policy()->concurrent_phases_end(gch->gc_cause(),
gch->prev_gen(_cmsGen)->capacity(),
_cmsGen->free());
}
case Resizing: {
// Sweeping has been completed...
// At this point the background collection has completed.
@ -2539,9 +2491,6 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Caus
const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
true, NULL, gc_id);)
if (UseAdaptiveSizePolicy) {
size_policy()->ms_collection_begin();
}
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
HandleMark hm; // Discard invalid handles created during verification
@ -2633,11 +2582,6 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Caus
}
}
if (UseAdaptiveSizePolicy) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
size_policy()->ms_collection_end(gch->gc_cause());
}
if (VerifyAfterGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
Universe::verify();
@ -3053,20 +2997,21 @@ void CMSCollector::verify_after_remark_work_1() {
HandleMark hm;
GenCollectedHeap* gch = GenCollectedHeap::heap();
// Get a clear set of claim bits for the strong roots processing to work with.
// Get a clear set of claim bits for the roots processing to work with.
ClassLoaderDataGraph::clear_claimed_marks();
// Mark from roots one level into CMS
MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
gch->gen_process_strong_roots(_cmsGen->level(),
true, // younger gens are roots
true, // activate StrongRootsScope
SharedHeap::ScanningOption(roots_scanning_options()),
&notOlder,
NULL,
NULL); // SSS: Provide correct closure
gch->gen_process_roots(_cmsGen->level(),
true, // younger gens are roots
true, // activate StrongRootsScope
SharedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
&notOlder,
NULL,
NULL); // SSS: Provide correct closure
// Now mark from the roots
MarkFromRootsClosure markFromRootsClosure(this, _span,
@ -3117,22 +3062,24 @@ void CMSCollector::verify_after_remark_work_2() {
HandleMark hm;
GenCollectedHeap* gch = GenCollectedHeap::heap();
// Get a clear set of claim bits for the strong roots processing to work with.
// Get a clear set of claim bits for the roots processing to work with.
ClassLoaderDataGraph::clear_claimed_marks();
// Mark from roots one level into CMS
MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
markBitMap());
KlassToOopClosure klass_closure(&notOlder);
CLDToOopClosure cld_closure(&notOlder, true);
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
gch->gen_process_strong_roots(_cmsGen->level(),
true, // younger gens are roots
true, // activate StrongRootsScope
SharedHeap::ScanningOption(roots_scanning_options()),
&notOlder,
NULL,
&klass_closure);
gch->gen_process_roots(_cmsGen->level(),
true, // younger gens are roots
true, // activate StrongRootsScope
SharedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
&notOlder,
NULL,
&cld_closure);
// Now mark from the roots
MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
@ -3319,12 +3266,10 @@ bool ConcurrentMarkSweepGeneration::is_too_full() const {
void CMSCollector::setup_cms_unloading_and_verification_state() {
const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
|| VerifyBeforeExit;
const int rso = SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache;
const int rso = SharedHeap::SO_AllCodeCache;
// We set the proper root for this CMS cycle here.
if (should_unload_classes()) { // Should unload classes this cycle
remove_root_scanning_option(SharedHeap::SO_AllClasses);
add_root_scanning_option(SharedHeap::SO_SystemClasses);
remove_root_scanning_option(rso); // Shrink the root set appropriately
set_verifying(should_verify); // Set verification state for this cycle
return; // Nothing else needs to be done at this time
@ -3332,8 +3277,6 @@ void CMSCollector::setup_cms_unloading_and_verification_state() {
// Not unloading classes this cycle
assert(!should_unload_classes(), "Inconsistency!");
remove_root_scanning_option(SharedHeap::SO_SystemClasses);
add_root_scanning_option(SharedHeap::SO_AllClasses);
if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
// Include symbols, strings and code cache elements to prevent their resurrection.
@ -3687,9 +3630,6 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
if (UseAdaptiveSizePolicy) {
size_policy()->checkpoint_roots_initial_begin();
}
// Reset all the PLAB chunk arrays if necessary.
if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
@ -3744,15 +3684,16 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
gch->set_par_threads(0);
} else {
// The serial version.
KlassToOopClosure klass_closure(&notOlder);
CLDToOopClosure cld_closure(&notOlder, true);
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
gch->gen_process_strong_roots(_cmsGen->level(),
true, // younger gens are roots
true, // activate StrongRootsScope
SharedHeap::ScanningOption(roots_scanning_options()),
&notOlder,
NULL,
&klass_closure);
gch->gen_process_roots(_cmsGen->level(),
true, // younger gens are roots
true, // activate StrongRootsScope
SharedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
&notOlder,
NULL,
&cld_closure);
}
}
@ -3769,9 +3710,6 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
// Save the end of the used_region of the constituent generations
// to be used to limit the extent of sweep in each generation.
save_sweep_limits();
if (UseAdaptiveSizePolicy) {
size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
}
verify_overflow_empty();
}
@ -3788,15 +3726,6 @@ bool CMSCollector::markFromRoots(bool asynch) {
bool res;
if (asynch) {
// Start the timers for adaptive size policy for the concurrent phases
// Do it here so that the foreground MS can use the concurrent
// timer since a foreground MS might has the sweep done concurrently
// or STW.
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_marking_begin();
}
// Weak ref discovery note: We may be discovering weak
// refs in this generation concurrent (but interleaved) with
// weak ref discovery by a younger generation collector.
@ -3814,22 +3743,12 @@ bool CMSCollector::markFromRoots(bool asynch) {
gclog_or_tty->print_cr("bailing out to foreground collection");
}
}
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_marking_end();
}
} else {
assert(SafepointSynchronize::is_at_safepoint(),
"inconsistent with asynch == false");
if (UseAdaptiveSizePolicy) {
size_policy()->ms_collection_marking_begin();
}
// already have locks
res = markFromRootsWork(asynch);
_collectorState = FinalMarking;
if (UseAdaptiveSizePolicy) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
size_policy()->ms_collection_marking_end(gch->gc_cause());
}
}
verify_overflow_empty();
return res;
@ -4705,8 +4624,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
if (clean_survivor) { // preclean the active survivor space(s)
assert(_young_gen->kind() == Generation::DefNew ||
_young_gen->kind() == Generation::ParNew ||
_young_gen->kind() == Generation::ASParNew,
_young_gen->kind() == Generation::ParNew,
"incorrect type for cast");
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
PushAndMarkClosure pam_cl(this, _span, ref_processor(),
@ -5077,10 +4995,6 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
assert(haveFreelistLocks(), "must have free list locks");
assert_lock_strong(bitMapLock());
if (UseAdaptiveSizePolicy) {
size_policy()->checkpoint_roots_final_begin();
}
ResourceMark rm;
HandleMark hm;
@ -5214,9 +5128,6 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
"Should be clear by end of the final marking");
assert(_ct->klass_rem_set()->mod_union_is_clear(),
"Should be clear by end of the final marking");
if (UseAdaptiveSizePolicy) {
size_policy()->checkpoint_roots_final_end(gch->gc_cause());
}
}
void CMSParInitialMarkTask::work(uint worker_id) {
@ -5228,7 +5139,6 @@ void CMSParInitialMarkTask::work(uint worker_id) {
_timer.start();
GenCollectedHeap* gch = GenCollectedHeap::heap();
Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
KlassToOopClosure klass_closure(&par_mri_cl);
// ---------- young gen roots --------------
{
@ -5244,13 +5154,17 @@ void CMSParInitialMarkTask::work(uint worker_id) {
// ---------- remaining roots --------------
_timer.reset();
_timer.start();
gch->gen_process_strong_roots(_collector->_cmsGen->level(),
false, // yg was scanned above
false, // this is parallel code
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
&par_mri_cl,
NULL,
&klass_closure);
CLDToOopClosure cld_closure(&par_mri_cl, true);
gch->gen_process_roots(_collector->_cmsGen->level(),
false, // yg was scanned above
false, // this is parallel code
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
_collector->should_unload_classes(),
&par_mri_cl,
NULL,
&cld_closure);
assert(_collector->should_unload_classes()
|| (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
@ -5379,13 +5293,15 @@ void CMSParRemarkTask::work(uint worker_id) {
// ---------- remaining roots --------------
_timer.reset();
_timer.start();
gch->gen_process_strong_roots(_collector->_cmsGen->level(),
false, // yg was scanned above
false, // this is parallel code
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
&par_mrias_cl,
NULL,
NULL); // The dirty klasses will be handled below
gch->gen_process_roots(_collector->_cmsGen->level(),
false, // yg was scanned above
false, // this is parallel code
SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
_collector->should_unload_classes(),
&par_mrias_cl,
NULL,
NULL); // The dirty klasses will be handled below
assert(_collector->should_unload_classes()
|| (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
"if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
@ -5440,7 +5356,7 @@ void CMSParRemarkTask::work(uint worker_id) {
// We might have added oops to ClassLoaderData::_handles during the
// concurrent marking phase. These oops point to newly allocated objects
// that are guaranteed to be kept alive. Either by the direct allocation
// code, or when the young collector processes the strong roots. Hence,
// code, or when the young collector processes the roots. Hence,
// we don't have to revisit the _handles block during the remark phase.
// ---------- rescan dirty cards ------------
@ -5862,7 +5778,7 @@ void CMSCollector::do_remark_parallel() {
cms_space,
n_workers, workers, task_queues());
// Set up for parallel process_strong_roots work.
// Set up for parallel process_roots work.
gch->set_par_threads(n_workers);
// We won't be iterating over the cards in the card table updating
// the younger_gen cards, so we shouldn't call the following else
@ -5871,7 +5787,7 @@ void CMSCollector::do_remark_parallel() {
// gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
// The young gen rescan work will not be done as part of
// process_strong_roots (which currently doesn't knw how to
// process_roots (which currently doesn't know how to
// parallelize such a scan), but rather will be broken up into
// a set of parallel tasks (via the sampling that the [abortable]
// preclean phase did of EdenSpace, plus the [two] tasks of
@ -5968,13 +5884,15 @@ void CMSCollector::do_remark_non_parallel() {
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
GenCollectedHeap::StrongRootsScope srs(gch);
gch->gen_process_strong_roots(_cmsGen->level(),
true, // younger gens as roots
false, // use the local StrongRootsScope
SharedHeap::ScanningOption(roots_scanning_options()),
&mrias_cl,
NULL,
NULL); // The dirty klasses will be handled below
gch->gen_process_roots(_cmsGen->level(),
true, // younger gens as roots
false, // use the local StrongRootsScope
SharedHeap::ScanningOption(roots_scanning_options()),
should_unload_classes(),
&mrias_cl,
NULL,
NULL); // The dirty klasses will be handled below
assert(should_unload_classes()
|| (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
@ -6014,7 +5932,7 @@ void CMSCollector::do_remark_non_parallel() {
// We might have added oops to ClassLoaderData::_handles during the
// concurrent marking phase. These oops point to newly allocated objects
// that are guaranteed to be kept alive. Either by the direct allocation
// code, or when the young collector processes the strong roots. Hence,
// code, or when the young collector processes the roots. Hence,
// we don't have to revisit the _handles block during the remark phase.
verify_work_stacks_empty();
@ -6264,15 +6182,14 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
{
GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
// Delete entries for dead interned strings.
StringTable::unlink(&_is_alive_closure);
}
}
// CMS doesn't use the StringTable as hard roots when class unloading is turned off.
// Need to check if we really scanned the StringTable.
if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
// Delete entries for dead interned strings.
StringTable::unlink(&_is_alive_closure);
}
// Restore any preserved marks as a result of mark stack or
// work queue overflow
@ -6329,7 +6246,6 @@ void CMSCollector::sweep(bool asynch) {
_inter_sweep_timer.stop();
_inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
assert(!_intra_sweep_timer.is_active(), "Should not be active");
_intra_sweep_timer.reset();
@ -6454,17 +6370,6 @@ void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
}
}
CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
"Wrong type of heap");
CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
gch->gen_policy()->size_policy();
assert(sp->is_gc_cms_adaptive_size_policy(),
"Wrong type of size policy");
return sp;
}
void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
@ -6540,9 +6445,6 @@ void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
// Reset CMS data structures (for now just the marking bit map)
// preparatory for the next cycle.
void CMSCollector::reset(bool asynch) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSAdaptiveSizePolicy* sp = size_policy();
AdaptiveSizePolicyOutput(sp, gch->total_collections());
if (asynch) {
CMSTokenSyncWithLocks ts(true, bitMapLock());
@ -6597,7 +6499,7 @@ void CMSCollector::reset(bool asynch) {
// Because only the full (i.e., concurrent mode failure) collections
// are being measured for gc overhead limits, clean the "near" flag
// and count.
sp->reset_gc_overhead_limit_count();
size_policy()->reset_gc_overhead_limit_count();
_collectorState = Idling;
} else {
// already have the lock
@ -7064,7 +6966,6 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
@ -7225,7 +7126,6 @@ void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
@ -7298,7 +7198,6 @@ void SurvivorSpacePrecleanClosure::do_yield_work() {
ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
@ -7457,7 +7356,6 @@ void MarkFromRootsClosure::do_yield_work() {
ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
@ -8099,7 +7997,6 @@ void CMSPrecleanRefsYieldClosure::do_yield_work() {
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
@ -8780,7 +8677,6 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
@ -9327,172 +9223,6 @@ bool CMSCollector::no_preserved_marks() const {
}
#endif
CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
{
GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
CMSAdaptiveSizePolicy* size_policy =
(CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
assert(size_policy->is_gc_cms_adaptive_size_policy(),
"Wrong type for size policy");
return size_policy;
}
void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
size_t desired_promo_size) {
if (cur_promo_size < desired_promo_size) {
size_t expand_bytes = desired_promo_size - cur_promo_size;
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
"Expanding tenured generation by " SIZE_FORMAT " (bytes)",
expand_bytes);
}
expand(expand_bytes,
MinHeapDeltaBytes,
CMSExpansionCause::_adaptive_size_policy);
} else if (desired_promo_size < cur_promo_size) {
size_t shrink_bytes = cur_promo_size - desired_promo_size;
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
"Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
shrink_bytes);
}
shrink(shrink_bytes);
}
}
CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSGCAdaptivePolicyCounters* counters =
(CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
"Wrong kind of counters");
return counters;
}
void ASConcurrentMarkSweepGeneration::update_counters() {
if (UsePerfData) {
_space_counters->update_all();
_gen_counters->update_all();
CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
"Wrong gc statistics type");
counters->update_counters(gc_stats_l);
}
}
void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
if (UsePerfData) {
_space_counters->update_used(used);
_space_counters->update_capacity();
_gen_counters->update_all();
CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
"Wrong gc statistics type");
counters->update_counters(gc_stats_l);
}
}
void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
assert_locked_or_safepoint(Heap_lock);
assert_lock_strong(freelistLock());
HeapWord* old_end = _cmsSpace->end();
HeapWord* unallocated_start = _cmsSpace->unallocated_block();
assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
FreeChunk* chunk_at_end = find_chunk_at_end();
if (chunk_at_end == NULL) {
// No room to shrink
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("No room to shrink: old_end "
PTR_FORMAT " unallocated_start " PTR_FORMAT
" chunk_at_end " PTR_FORMAT,
old_end, unallocated_start, chunk_at_end);
}
return;
} else {
// Find the chunk at the end of the space and determine
// how much it can be shrunk.
size_t shrinkable_size_in_bytes = chunk_at_end->size();
size_t aligned_shrinkable_size_in_bytes =
align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
"Inconsistent chunk at end of space");
size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
size_t word_size_before = heap_word_size(_virtual_space.committed_size());
// Shrink the underlying space
_virtual_space.shrink_by(bytes);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
" desired_bytes " SIZE_FORMAT
" shrinkable_size_in_bytes " SIZE_FORMAT
" aligned_shrinkable_size_in_bytes " SIZE_FORMAT
" bytes " SIZE_FORMAT,
desired_bytes, shrinkable_size_in_bytes,
aligned_shrinkable_size_in_bytes, bytes);
gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
" unallocated_start " SIZE_FORMAT,
old_end, unallocated_start);
}
// If the space did shrink (shrinking is not guaranteed),
// shrink the chunk at the end by the appropriate amount.
if (((HeapWord*)_virtual_space.high()) < old_end) {
size_t new_word_size =
heap_word_size(_virtual_space.committed_size());
// Have to remove the chunk from the dictionary because it is changing
// size and might be someplace elsewhere in the dictionary.
// Get the chunk at end, shrink it, and put it
// back.
_cmsSpace->removeChunkFromDictionary(chunk_at_end);
size_t word_size_change = word_size_before - new_word_size;
size_t chunk_at_end_old_size = chunk_at_end->size();
assert(chunk_at_end_old_size >= word_size_change,
"Shrink is too large");
chunk_at_end->set_size(chunk_at_end_old_size -
word_size_change);
_cmsSpace->freed((HeapWord*) chunk_at_end->end(),
word_size_change);
_cmsSpace->returnChunkToDictionary(chunk_at_end);
MemRegion mr(_cmsSpace->bottom(), new_word_size);
_bts->resize(new_word_size); // resize the block offset shared array
Universe::heap()->barrier_set()->resize_covered_region(mr);
_cmsSpace->assert_locked();
_cmsSpace->set_end((HeapWord*)_virtual_space.high());
NOT_PRODUCT(_cmsSpace->dictionary()->verify());
// update the space and generation capacity counters
if (UsePerfData) {
_space_counters->update_capacity();
_gen_counters->update_all();
}
if (Verbose && PrintGCDetails) {
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size + bytes;
gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, bytes/K, new_mem_size/K);
}
}
assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
"Inconsistency at end of space");
assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
"Shrinking is inconsistent");
return;
}
}
// Transfer some number of overflown objects to usual marking
// stack. Return true if some objects were transferred.
bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {

@ -32,6 +32,7 @@
#include "gc_implementation/shared/generationCounters.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/generation.hpp"
#include "memory/iterator.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/virtualspace.hpp"
#include "services/memoryService.hpp"
@ -52,7 +53,7 @@
// Concurrent mode failures are currently handled by
// means of a sliding mark-compact.
class CMSAdaptiveSizePolicy;
class AdaptiveSizePolicy;
class CMSConcMarkingTask;
class CMSGCAdaptivePolicyCounters;
class CMSTracer;
@ -1009,8 +1010,7 @@ class CMSCollector: public CHeapObj<mtGC> {
void icms_wait(); // Called at yield points.
// Adaptive size policy
CMSAdaptiveSizePolicy* size_policy();
CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
AdaptiveSizePolicy* size_policy();
static void print_on_error(outputStream* st);
@ -1150,9 +1150,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
// Adaptive size policy
CMSAdaptiveSizePolicy* size_policy();
void set_did_compact(bool v) { _did_compact = v; }
bool refs_discovery_is_atomic() const { return false; }
@ -1346,37 +1343,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
void rotate_debug_collection_type();
};
class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
// Return the size policy from the heap's collector
// policy casted to CMSAdaptiveSizePolicy*.
CMSAdaptiveSizePolicy* cms_size_policy() const;
// Resize the generation based on the adaptive size
// policy.
void resize(size_t cur_promo, size_t desired_promo);
// Return the GC counters from the collector policy
CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
virtual void shrink_by(size_t bytes);
public:
ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
int level, CardTableRS* ct,
bool use_adaptive_freelists,
FreeBlockDictionary<FreeChunk>::DictionaryChoice
dictionaryChoice) :
ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
use_adaptive_freelists, dictionaryChoice) {}
virtual const char* short_name() const { return "ASCMS"; }
virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
virtual void update_counters();
virtual void update_counters(size_t used);
};
//
// Closures of various sorts used by CMS to accomplish its work
//

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "memory/memRegion.hpp"
#include "oops/markOop.hpp"
#include "runtime/mutex.hpp"
#include "runtime/orderAccess.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/symbolTable.hpp"
#include "code/codeCache.hpp"
#include "gc_implementation/g1/concurrentMark.inline.hpp"
#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
@ -39,6 +40,7 @@
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcTraceTime.hpp"
#include "memory/allocation.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
@ -58,8 +60,8 @@ CMBitMapRO::CMBitMapRO(int shifter) :
_bmWordSize = 0;
}
HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
HeapWord* limit) const {
HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
const HeapWord* limit) const {
// First we must round addr *up* to a possible object boundary.
addr = (HeapWord*)align_size_up((intptr_t)addr,
HeapWordSize << _shifter);
@ -76,8 +78,8 @@ HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
return nextAddr;
}
HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
HeapWord* limit) const {
HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
const HeapWord* limit) const {
size_t addrOffset = heapWordToOffset(addr);
if (limit == NULL) {
limit = _bmStartWord + _bmWordSize;
@ -1223,6 +1225,9 @@ public:
};
void ConcurrentMark::scanRootRegions() {
// Start of concurrent marking.
ClassLoaderDataGraph::clear_claimed_marks();
// scan_in_progress() will have been set to true only if there was
// at least one root region to scan. So, if it's false, we
// should not attempt to do any further work.
@ -1271,7 +1276,7 @@ void ConcurrentMark::markFromRoots() {
CMConcurrentMarkingTask markingTask(this, cmThread());
if (use_parallel_marking_threads()) {
_parallel_workers->set_active_workers((int)active_workers);
// Don't set _n_par_threads because it affects MT in process_strong_roots()
// Don't set _n_par_threads because it affects MT in process_roots()
// and the decisions on that MT processing is made elsewhere.
assert(_parallel_workers->active_workers() > 0, "Should have been set");
_parallel_workers->run_task(&markingTask);
@ -2142,23 +2147,29 @@ void ConcurrentMark::cleanup() {
// Update the soft reference policy with the new heap occupancy.
Universe::update_heap_info_at_gc();
// We need to make this be a "collection" so any collection pause that
// races with it goes around and waits for completeCleanup to finish.
g1h->increment_total_collections();
// We reclaimed old regions so we should calculate the sizes to make
// sure we update the old gen/space data.
g1h->g1mm()->update_sizes();
if (VerifyDuringGC) {
HandleMark hm; // handle scope
Universe::heap()->prepare_for_verify();
Universe::verify(VerifyOption_G1UsePrevMarking,
" VerifyDuringGC:(after)");
}
g1h->check_bitmaps("Cleanup End");
g1h->verify_region_sets_optional();
// We need to make this be a "collection" so any collection pause that
// races with it goes around and waits for completeCleanup to finish.
g1h->increment_total_collections();
// Clean out dead classes and update Metaspace sizes.
ClassLoaderDataGraph::purge();
MetaspaceGC::compute_new_size();
// We reclaimed old regions so we should calculate the sizes to make
// sure we update the old gen/space data.
g1h->g1mm()->update_sizes();
g1h->trace_heap_after_concurrent_cycle();
}
@ -2445,6 +2456,26 @@ void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
_g1h->set_par_threads(0);
}
void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
}
// Helper class to get rid of some boilerplate code.
class G1RemarkGCTraceTime : public GCTraceTime {
static bool doit_and_prepend(bool doit) {
if (doit) {
gclog_or_tty->put(' ');
}
return doit;
}
public:
G1RemarkGCTraceTime(const char* title, bool doit)
: GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
}
};
void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
if (has_overflown()) {
// Skip processing the discovered references if we have
@ -2557,9 +2588,28 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
return;
}
g1h->unlink_string_and_symbol_table(&g1_is_alive,
/* process_strings */ false, // currently strings are always roots
/* process_symbols */ true);
assert(_markStack.isEmpty(), "Marking should have completed");
// Unload Klasses, String, Symbols, Code Cache, etc.
G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
bool purged_classes;
{
G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
}
{
G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
}
if (G1StringDedup::is_enabled()) {
G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
G1StringDedup::unlink(&g1_is_alive);
}
}
void ConcurrentMark::swapMarkBitMaps() {
@ -2568,6 +2618,57 @@ void ConcurrentMark::swapMarkBitMaps() {
_nextMarkBitMap = (CMBitMap*) temp;
}
class CMObjectClosure;
// Closure for iterating over objects, currently only used for
// processing SATB buffers.
class CMObjectClosure : public ObjectClosure {
private:
CMTask* _task;
public:
void do_object(oop obj) {
_task->deal_with_reference(obj);
}
CMObjectClosure(CMTask* task) : _task(task) { }
};
class G1RemarkThreadsClosure : public ThreadClosure {
CMObjectClosure _cm_obj;
G1CMOopClosure _cm_cl;
MarkingCodeBlobClosure _code_cl;
int _thread_parity;
bool _is_par;
public:
G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
_cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
_thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
void do_thread(Thread* thread) {
if (thread->is_Java_thread()) {
if (thread->claim_oops_do(_is_par, _thread_parity)) {
JavaThread* jt = (JavaThread*)thread;
// In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
// however the liveness of oops reachable from nmethods have very complex lifecycles:
// * Alive if on the stack of an executing method
// * Weakly reachable otherwise
// Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
// live by the SATB invariant but other oops recorded in nmethods may behave differently.
jt->nmethods_do(&_code_cl);
jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
}
} else if (thread->is_VM_thread()) {
if (thread->claim_oops_do(_is_par, _thread_parity)) {
JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
}
}
}
};
class CMRemarkTask: public AbstractGangTask {
private:
ConcurrentMark* _cm;
@ -2579,6 +2680,14 @@ public:
if (worker_id < _cm->active_tasks()) {
CMTask* task = _cm->task(worker_id);
task->record_start_time();
{
ResourceMark rm;
HandleMark hm;
G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
Threads::threads_do(&threads_f);
}
do {
task->do_marking_step(1000000000.0 /* something very large */,
true /* do_termination */,
@ -2601,6 +2710,8 @@ void ConcurrentMark::checkpointRootsFinalWork() {
HandleMark hm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
g1h->ensure_parsability(false);
if (G1CollectedHeap::use_parallel_gc_threads()) {
@ -3430,20 +3541,6 @@ public:
}
};
// Closure for iterating over objects, currently only used for
// processing SATB buffers.
class CMObjectClosure : public ObjectClosure {
private:
CMTask* _task;
public:
void do_object(oop obj) {
_task->deal_with_reference(obj);
}
CMObjectClosure(CMTask* task) : _task(task) { }
};
G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
ConcurrentMark* cm,
CMTask* task)
@ -3908,15 +4005,6 @@ void CMTask::drain_satb_buffers() {
}
}
if (!concurrent() && !has_aborted()) {
// We should only do this during remark.
if (G1CollectedHeap::use_parallel_gc_threads()) {
satb_mq_set.par_iterate_closure_all_threads(_worker_id);
} else {
satb_mq_set.iterate_closure_all_threads();
}
}
_draining_satb_buffers = false;
assert(has_aborted() ||

@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
#include "classfile/javaClasses.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "gc_implementation/shared/gcId.hpp"
#include "utilities/taskqueue.hpp"
@ -86,19 +87,19 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
// Return the address corresponding to the next marked bit at or after
// "addr", and before "limit", if "limit" is non-NULL. If there is no
// such bit, returns "limit" if that is non-NULL, or else "endWord()".
HeapWord* getNextMarkedWordAddress(HeapWord* addr,
HeapWord* limit = NULL) const;
HeapWord* getNextMarkedWordAddress(const HeapWord* addr,
const HeapWord* limit = NULL) const;
// Return the address corresponding to the next unmarked bit at or after
// "addr", and before "limit", if "limit" is non-NULL. If there is no
// such bit, returns "limit" if that is non-NULL, or else "endWord()".
HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
HeapWord* limit = NULL) const;
HeapWord* getNextUnmarkedWordAddress(const HeapWord* addr,
const HeapWord* limit = NULL) const;
// conversion utilities
HeapWord* offsetToHeapWord(size_t offset) const {
return _bmStartWord + (offset << _shifter);
}
size_t heapWordToOffset(HeapWord* addr) const {
size_t heapWordToOffset(const HeapWord* addr) const {
return pointer_delta(addr, _bmStartWord) >> _shifter;
}
int heapWordDiffToOffsetDiff(size_t diff) const;
@ -476,6 +477,7 @@ protected:
ForceOverflowSettings _force_overflow_conc;
ForceOverflowSettings _force_overflow_stw;
void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
void weakRefsWork(bool clear_all_soft_refs);
void swapMarkBitMaps();

@ -26,6 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region,
size_t word_size,

@ -426,7 +426,7 @@ G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
q = n;
oop obj = oop(q);
if (obj->klass_or_null() == NULL) return q;
n += obj->size();
n += block_size(q);
}
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
// [q, n) is the block that crosses the boundary.

@ -26,7 +26,8 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
#include "gc_implementation/g1/g1BlockOffsetTable.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "memory/space.hpp"
inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
@ -112,7 +113,7 @@ forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
q = n;
oop obj = oop(q);
if (obj->klass_or_null() == NULL) return q;
n += obj->size();
n += block_size(q);
}
assert(q <= n, "wrong order for q and addr");
assert(addr < n, "wrong order for addr and n");

@ -30,23 +30,52 @@
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL) {
G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL), _free(NULL) {
_top = bottom();
}
void G1CodeRootChunk::reset() {
_next = _prev = NULL;
_free = NULL;
_top = bottom();
}
void G1CodeRootChunk::nmethods_do(CodeBlobClosure* cl) {
nmethod** cur = bottom();
NmethodOrLink* cur = bottom();
while (cur != _top) {
cl->do_code_blob(*cur);
if (is_nmethod(cur)) {
cl->do_code_blob(cur->_nmethod);
}
cur++;
}
}
bool G1CodeRootChunk::remove_lock_free(nmethod* method) {
NmethodOrLink* cur = bottom();
for (NmethodOrLink* cur = bottom(); cur != _top; cur++) {
if (cur->_nmethod == method) {
bool result = Atomic::cmpxchg_ptr(NULL, &cur->_nmethod, method) == method;
if (!result) {
// Someone else cleared out this entry.
return false;
}
// The method was cleared. Time to link it into the free list.
NmethodOrLink* prev_free;
do {
prev_free = (NmethodOrLink*)_free;
cur->_link = prev_free;
} while (Atomic::cmpxchg_ptr(cur, &_free, prev_free) != prev_free);
return true;
}
}
return false;
}
G1CodeRootChunkManager::G1CodeRootChunkManager() : _free_list(), _num_chunks_handed_out(0) {
_free_list.initialize();
_free_list.set_size(G1CodeRootChunk::word_size());
@ -140,34 +169,43 @@ G1CodeRootSet::~G1CodeRootSet() {
void G1CodeRootSet::add(nmethod* method) {
if (!contains(method)) {
// Try to add the nmethod. If there is not enough space, get a new chunk.
if (_list.head() == NULL || _list.head()->is_full()) {
G1CodeRootChunk* cur = new_chunk();
// Find the first chunk that isn't full.
G1CodeRootChunk* cur = _list.head();
while (cur != NULL) {
if (!cur->is_full()) {
break;
}
cur = cur->next();
}
// All chunks are full, get a new chunk.
if (cur == NULL) {
cur = new_chunk();
_list.return_chunk_at_head(cur);
}
bool result = _list.head()->add(method);
// Add the nmethod.
bool result = cur->add(method);
guarantee(result, err_msg("Not able to add nmethod "PTR_FORMAT" to newly allocated chunk.", method));
_length++;
}
}
void G1CodeRootSet::remove(nmethod* method) {
void G1CodeRootSet::remove_lock_free(nmethod* method) {
G1CodeRootChunk* found = find(method);
if (found != NULL) {
bool result = found->remove(method);
guarantee(result, err_msg("could not find nmethod "PTR_FORMAT" during removal although we previously found it", method));
// eventually free completely emptied chunk
if (found->is_empty()) {
_list.remove_chunk(found);
free(found);
bool result = found->remove_lock_free(method);
if (result) {
Atomic::dec_ptr((volatile intptr_t*)&_length);
}
_length--;
}
assert(!contains(method), err_msg(PTR_FORMAT" still contains nmethod "PTR_FORMAT, this, method));
}
nmethod* G1CodeRootSet::pop() {
do {
while (true) {
G1CodeRootChunk* cur = _list.head();
if (cur == NULL) {
assert(_length == 0, "when there are no chunks, there should be no elements");
@ -180,7 +218,7 @@ nmethod* G1CodeRootSet::pop() {
} else {
free(_list.get_chunk_at_head());
}
} while (true);
}
}
G1CodeRootChunk* G1CodeRootSet::find(nmethod* method) {

@ -31,6 +31,14 @@
class CodeBlobClosure;
// The elements of the G1CodeRootChunk is either:
// 1) nmethod pointers
// 2) nodes in an internally chained free list
typedef union {
nmethod* _nmethod;
void* _link;
} NmethodOrLink;
class G1CodeRootChunk : public CHeapObj<mtGC> {
private:
static const int NUM_ENTRIES = 32;
@ -38,16 +46,28 @@ class G1CodeRootChunk : public CHeapObj<mtGC> {
G1CodeRootChunk* _next;
G1CodeRootChunk* _prev;
nmethod** _top;
NmethodOrLink* _top;
// First free position within the chunk.
volatile NmethodOrLink* _free;
nmethod* _data[NUM_ENTRIES];
NmethodOrLink _data[NUM_ENTRIES];
nmethod** bottom() const {
return (nmethod**) &(_data[0]);
NmethodOrLink* bottom() const {
return (NmethodOrLink*) &(_data[0]);
}
nmethod** end() const {
return (nmethod**) &(_data[NUM_ENTRIES]);
NmethodOrLink* end() const {
return (NmethodOrLink*) &(_data[NUM_ENTRIES]);
}
bool is_link(NmethodOrLink* nmethod_or_link) {
return nmethod_or_link->_link == NULL ||
(bottom() <= nmethod_or_link->_link
&& nmethod_or_link->_link < end());
}
bool is_nmethod(NmethodOrLink* nmethod_or_link) {
return !is_link(nmethod_or_link);
}
public:
@ -85,46 +105,55 @@ class G1CodeRootChunk : public CHeapObj<mtGC> {
}
bool is_full() const {
return _top == (nmethod**)end();
return _top == end() && _free == NULL;
}
bool contains(nmethod* method) {
nmethod** cur = bottom();
NmethodOrLink* cur = bottom();
while (cur != _top) {
if (*cur == method) return true;
if (cur->_nmethod == method) return true;
cur++;
}
return false;
}
bool add(nmethod* method) {
if (is_full()) return false;
*_top = method;
_top++;
if (is_full()) {
return false;
}
if (_free != NULL) {
// Take from internally chained free list
NmethodOrLink* first_free = (NmethodOrLink*)_free;
_free = (NmethodOrLink*)_free->_link;
first_free->_nmethod = method;
} else {
// Take from top.
_top->_nmethod = method;
_top++;
}
return true;
}
bool remove(nmethod* method) {
nmethod** cur = bottom();
while (cur != _top) {
if (*cur == method) {
memmove(cur, cur + 1, (_top - (cur + 1)) * sizeof(nmethod**));
_top--;
return true;
}
cur++;
}
return false;
}
bool remove_lock_free(nmethod* method);
void nmethods_do(CodeBlobClosure* blk);
nmethod* pop() {
if (is_empty()) {
return NULL;
if (_free != NULL) {
// Kill the free list.
_free = NULL;
}
_top--;
return *_top;
while (!is_empty()) {
_top--;
if (is_nmethod(_top)) {
return _top->_nmethod;
}
}
return NULL;
}
};
@ -193,7 +222,7 @@ class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
// method is likely to be repeatedly called with the same nmethod.
void add(nmethod* method);
void remove(nmethod* method);
void remove_lock_free(nmethod* method);
nmethod* pop();
bool contains(nmethod* method);

File diff suppressed because it is too large Load Diff

@ -31,7 +31,6 @@
#include "gc_implementation/g1/g1BiasedArray.hpp"
#include "gc_implementation/g1/g1HRPrinter.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
@ -211,6 +210,7 @@ class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<bool> {
class RefineCardTableEntryClosure;
class G1CollectedHeap : public SharedHeap {
friend class VM_CollectForMetadataAllocation;
friend class VM_G1CollectForAllocation;
friend class VM_G1CollectFull;
friend class VM_G1IncCollectionPause;
@ -220,7 +220,7 @@ class G1CollectedHeap : public SharedHeap {
friend class OldGCAllocRegion;
// Closures used in implementation.
template <G1Barrier barrier, bool do_mark_object>
template <G1Barrier barrier, G1Mark do_mark_object>
friend class G1ParCopyClosure;
friend class G1IsAliveClosure;
friend class G1EvacuateFollowersClosure;
@ -347,6 +347,9 @@ private:
// It initializes the GC alloc regions at the start of a GC.
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
// Setup the retained old gc alloc region as the currrent old gc alloc region.
void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
// It releases the GC alloc regions at the end of a GC.
void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
@ -828,12 +831,13 @@ protected:
// param is for use with parallel roots processing, and should be
// the "i" of the calling parallel worker thread's work(i) function.
// In the sequential case this param will be ignored.
void g1_process_strong_roots(bool is_scavenging,
ScanningOption so,
OopClosure* scan_non_heap_roots,
OopsInHeapRegionClosure* scan_rs,
G1KlassScanClosure* scan_klasses,
uint worker_i);
void g1_process_roots(OopClosure* scan_non_heap_roots,
OopClosure* scan_non_heap_weak_roots,
OopsInHeapRegionClosure* scan_rs,
CLDClosure* scan_strong_clds,
CLDClosure* scan_weak_clds,
CodeBlobClosure* scan_strong_code,
uint worker_i);
// Notifies all the necessary spaces that the committed space has
// been updated (either expanded or shrunk). It should be called
@ -1026,7 +1030,7 @@ protected:
// of G1CollectedHeap::_gc_time_stamp.
unsigned int* _worker_cset_start_region_time_stamp;
enum G1H_process_strong_roots_tasks {
enum G1H_process_roots_tasks {
G1H_PS_filter_satb_buffers,
G1H_PS_refProcessor_oops_do,
// Leave this one last.
@ -1608,10 +1612,6 @@ public:
// Free up superfluous code root memory.
void purge_code_root_memory();
// During an initial mark pause, mark all the code roots that
// point into regions *not* in the collection set.
void mark_strong_code_roots(uint worker_id);
// Rebuild the strong code root lists for each region
// after a full GC.
void rebuild_strong_code_roots();
@ -1620,6 +1620,9 @@ public:
// in symbol table, possibly in parallel.
void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
// Parallel phase of unloading/cleaning after G1 concurrent mark.
void parallel_cleaning(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool class_unloading_occurred);
// Redirty logged cards in the refinement queue.
void redirty_logged_cards();
// Verification
@ -1715,256 +1718,4 @@ public:
}
};
class G1ParScanThreadState : public StackObj {
protected:
G1CollectedHeap* _g1h;
RefToScanQueue* _refs;
DirtyCardQueue _dcq;
G1SATBCardTableModRefBS* _ct_bs;
G1RemSet* _g1_rem;
G1ParGCAllocBuffer _surviving_alloc_buffer;
G1ParGCAllocBuffer _tenured_alloc_buffer;
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
ageTable _age_table;
G1ParScanClosure _scanner;
size_t _alloc_buffer_waste;
size_t _undo_waste;
OopsInHeapRegionClosure* _evac_failure_cl;
int _hash_seed;
uint _queue_num;
size_t _term_attempts;
double _start;
double _start_strong_roots;
double _strong_roots_time;
double _start_term;
double _term_time;
// Map from young-age-index (0 == not young, 1 is youngest) to
// surviving words. base is what we get back from the malloc call
size_t* _surviving_young_words_base;
// this points into the array, as we use the first few entries for padding
size_t* _surviving_young_words;
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
DirtyCardQueue& dirty_card_queue() { return _dcq; }
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
// If the new value of the field points to the same region or
// is the to-space, we don't need to include it in the Rset updates.
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
size_t card_index = ctbs()->index_for(p);
// If the card hasn't been added to the buffer, do it.
if (ctbs()->mark_card_deferred(card_index)) {
dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
}
}
}
public:
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
~G1ParScanThreadState() {
retire_alloc_buffers();
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
}
RefToScanQueue* refs() { return _refs; }
ageTable* age_table() { return &_age_table; }
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
return _alloc_buffers[purpose];
}
size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
size_t undo_waste() const { return _undo_waste; }
#ifdef ASSERT
bool verify_ref(narrowOop* ref) const;
bool verify_ref(oop* ref) const;
bool verify_task(StarTask ref) const;
#endif // ASSERT
template <class T> void push_on_queue(T* ref) {
assert(verify_ref(ref), "sanity");
refs()->push(ref);
}
template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
if (buf == NULL) return NULL; // Let caller handle allocation failure.
// Otherwise.
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
} else {
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
}
return obj;
}
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
if (obj != NULL) return obj;
return allocate_slow(purpose, word_sz);
}
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
if (alloc_buffer(purpose)->contains(obj)) {
assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
"should contain whole object");
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
} else {
CollectedHeap::fill_with_object(obj, word_sz);
add_to_undo_waste(word_sz);
}
}
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
_evac_failure_cl = evac_failure_cl;
}
OopsInHeapRegionClosure* evac_failure_closure() {
return _evac_failure_cl;
}
int* hash_seed() { return &_hash_seed; }
uint queue_num() { return _queue_num; }
size_t term_attempts() const { return _term_attempts; }
void note_term_attempt() { _term_attempts++; }
void start_strong_roots() {
_start_strong_roots = os::elapsedTime();
}
void end_strong_roots() {
_strong_roots_time += (os::elapsedTime() - _start_strong_roots);
}
double strong_roots_time() const { return _strong_roots_time; }
void start_term_time() {
note_term_attempt();
_start_term = os::elapsedTime();
}
void end_term_time() {
_term_time += (os::elapsedTime() - _start_term);
}
double term_time() const { return _term_time; }
double elapsed_time() const {
return os::elapsedTime() - _start;
}
static void
print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
void
print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
size_t* surviving_young_words() {
// We add on to hide entry 0 which accumulates surviving words for
// age -1 regions (i.e. non-young ones)
return _surviving_young_words;
}
private:
void retire_alloc_buffers() {
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
size_t waste = _alloc_buffers[ap]->words_remaining();
add_to_alloc_buffer_waste(waste);
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
true /* end_of_gc */,
false /* retain */);
}
}
#define G1_PARTIAL_ARRAY_MASK 0x2
inline bool has_partial_array_mask(oop* ref) const {
return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
}
// We never encode partial array oops as narrowOop*, so return false immediately.
// This allows the compiler to create optimized code when popping references from
// the work queue.
inline bool has_partial_array_mask(narrowOop* ref) const {
assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
return false;
}
// Only implement set_partial_array_mask() for regular oops, not for narrowOops.
// We always encode partial arrays as regular oop, to allow the
// specialization for has_partial_array_mask() for narrowOops above.
// This means that unintentional use of this method with narrowOops are caught
// by the compiler.
inline oop* set_partial_array_mask(oop obj) const {
assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
}
inline oop clear_partial_array_mask(oop* ref) const {
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
}
inline void do_oop_partial_array(oop* p);
// This method is applied to the fields of the objects that have just been copied.
template <class T> void do_oop_evac(T* p, HeapRegion* from) {
assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
"Reference should not be NULL here as such are never pushed to the task queue.");
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// Although we never intentionally push references outside of the collection
// set, due to (benign) races in the claim mechanism during RSet scanning more
// than one thread might claim the same card. So the same card may be
// processed multiple times. So redo this check.
if (_g1h->in_cset_fast_test(obj)) {
oop forwardee;
if (obj->is_forwarded()) {
forwardee = obj->forwardee();
} else {
forwardee = copy_to_survivor_space(obj);
}
assert(forwardee != NULL, "forwardee should not be NULL");
oopDesc::encode_store_heap_oop(p, forwardee);
}
assert(obj != NULL, "Must be");
update_rs(from, p, queue_num());
}
public:
oop copy_to_survivor_space(oop const obj);
template <class T> inline void deal_with_reference(T* ref_to_scan);
inline void deal_with_reference(StarTask ref);
public:
void trim_queue();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP

@ -29,7 +29,6 @@
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
@ -289,89 +288,4 @@ inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
return is_obj_ill(obj, heap_region_containing(obj));
}
template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
if (!from->is_survivor()) {
_g1_rem->par_write_ref(from, p, tid);
}
}
template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
if (G1DeferredRSUpdate) {
deferred_rs_update(from, p, tid);
} else {
immediate_rs_update(from, p, tid);
}
}
inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
assert(has_partial_array_mask(p), "invariant");
oop from_obj = clear_partial_array_mask(p);
assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
assert(from_obj->is_objArray(), "must be obj array");
objArrayOop from_obj_array = objArrayOop(from_obj);
// The from-space object contains the real length.
int length = from_obj_array->length();
assert(from_obj->is_forwarded(), "must be forwarded");
oop to_obj = from_obj->forwardee();
assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
objArrayOop to_obj_array = objArrayOop(to_obj);
// We keep track of the next start index in the length field of the
// to-space object.
int next_index = to_obj_array->length();
assert(0 <= next_index && next_index < length,
err_msg("invariant, next index: %d, length: %d", next_index, length));
int start = next_index;
int end = length;
int remainder = end - start;
// We'll try not to push a range that's smaller than ParGCArrayScanChunk.
if (remainder > 2 * ParGCArrayScanChunk) {
end = start + ParGCArrayScanChunk;
to_obj_array->set_length(end);
// Push the remainder before we process the range in case another
// worker has run out of things to do and can steal it.
oop* from_obj_p = set_partial_array_mask(from_obj);
push_on_queue(from_obj_p);
} else {
assert(length == end, "sanity");
// We'll process the final range for this object. Restore the length
// so that the heap remains parsable in case of evacuation failure.
to_obj_array->set_length(end);
}
_scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
// Process indexes [start,end). It will also process the header
// along with the first chunk (i.e., the chunk with start == 0).
// Note that at this point the length field of to_obj_array is not
// correct given that we are using it to keep track of the next
// start index. oop_iterate_range() (thankfully!) ignores the length
// field and only relies on the start / end parameters. It does
// however return the size of the object which will be incorrect. So
// we have to ignore it even if we wanted to use it.
to_obj_array->oop_iterate_range(&_scanner, start, end);
}
template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
if (!has_partial_array_mask(ref_to_scan)) {
// Note: we can use "raw" versions of "region_containing" because
// "obj_to_scan" is definitely in the heap, and is not in a
// humongous region.
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
do_oop_evac(ref_to_scan, r);
} else {
do_oop_partial_array((oop*)ref_to_scan);
}
}
inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
assert(verify_task(ref), "sanity");
if (ref.is_narrow()) {
deal_with_reference((narrowOop*)ref);
} else {
deal_with_reference((oop*)ref);
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP

@ -71,6 +71,9 @@ private:
bool _during_initial_mark;
bool _during_conc_mark;
uint _worker_id;
HeapWord* _end_of_last_gap;
HeapWord* _last_gap_threshold;
HeapWord* _last_obj_threshold;
public:
RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
@ -83,7 +86,10 @@ public:
_update_rset_cl(update_rset_cl),
_during_initial_mark(during_initial_mark),
_during_conc_mark(during_conc_mark),
_worker_id(worker_id) { }
_worker_id(worker_id),
_end_of_last_gap(hr->bottom()),
_last_gap_threshold(hr->bottom()),
_last_obj_threshold(hr->bottom()) { }
size_t marked_bytes() { return _marked_bytes; }
@ -107,7 +113,12 @@ public:
HeapWord* obj_addr = (HeapWord*) obj;
assert(_hr->is_in(obj_addr), "sanity");
size_t obj_size = obj->size();
_hr->update_bot_for_object(obj_addr, obj_size);
HeapWord* obj_end = obj_addr + obj_size;
if (_end_of_last_gap != obj_addr) {
// there was a gap before obj_addr
_last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr);
}
if (obj->is_forwarded() && obj->forwardee() == obj) {
// The object failed to move.
@ -115,7 +126,9 @@ public:
// We consider all objects that we find self-forwarded to be
// live. What we'll do is that we'll update the prev marking
// info so that they are all under PTAMS and explicitly marked.
_cm->markPrev(obj);
if (!_cm->isPrevMarked(obj)) {
_cm->markPrev(obj);
}
if (_during_initial_mark) {
// For the next marking info we'll only mark the
// self-forwarded objects explicitly if we are during
@ -145,13 +158,18 @@ public:
// remembered set entries missing given that we skipped cards on
// the collection set. So, we'll recreate such entries now.
obj->oop_iterate(_update_rset_cl);
assert(_cm->isPrevMarked(obj), "Should be marked!");
} else {
// The object has been either evacuated or is dead. Fill it with a
// dummy object.
MemRegion mr((HeapWord*) obj, obj_size);
MemRegion mr(obj_addr, obj_size);
CollectedHeap::fill_with_object(mr);
// must nuke all dead objects which we skipped when iterating over the region
_cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
}
_end_of_last_gap = obj_end;
_last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
}
};
@ -182,13 +200,6 @@ public:
during_conc_mark,
_worker_id);
MemRegion mr(hr->bottom(), hr->end());
// We'll recreate the prev marking info so we'll first clear
// the prev bitmap range for this region. We never mark any
// CSet objects explicitly so the next bitmap range should be
// cleared anyway.
_cm->clearRangePrevBitmap(mr);
hr->note_self_forwarding_removal_start(during_initial_mark,
during_conc_mark);
_g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);

@ -167,7 +167,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_last_update_rs_processed_buffers(_max_gc_threads, "%d"),
_last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
_last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"),
_last_strong_code_root_mark_times_ms(_max_gc_threads, "%.1lf"),
_last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
_last_termination_times_ms(_max_gc_threads, "%.1lf"),
_last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
@ -194,7 +193,6 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
_last_update_rs_processed_buffers.reset();
_last_scan_rs_times_ms.reset();
_last_strong_code_root_scan_times_ms.reset();
_last_strong_code_root_mark_times_ms.reset();
_last_obj_copy_times_ms.reset();
_last_termination_times_ms.reset();
_last_termination_attempts.reset();
@ -215,7 +213,6 @@ void G1GCPhaseTimes::note_gc_end() {
_last_update_rs_processed_buffers.verify();
_last_scan_rs_times_ms.verify();
_last_strong_code_root_scan_times_ms.verify();
_last_strong_code_root_mark_times_ms.verify();
_last_obj_copy_times_ms.verify();
_last_termination_times_ms.verify();
_last_termination_attempts.verify();
@ -230,7 +227,6 @@ void G1GCPhaseTimes::note_gc_end() {
_last_update_rs_times_ms.get(i) +
_last_scan_rs_times_ms.get(i) +
_last_strong_code_root_scan_times_ms.get(i) +
_last_strong_code_root_mark_times_ms.get(i) +
_last_obj_copy_times_ms.get(i) +
_last_termination_times_ms.get(i);
@ -302,9 +298,6 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
if (_last_satb_filtering_times_ms.sum() > 0.0) {
_last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
}
if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
_last_strong_code_root_mark_times_ms.print(2, "Code Root Marking (ms)");
}
_last_update_rs_times_ms.print(2, "Update RS (ms)");
_last_update_rs_processed_buffers.print(3, "Processed Buffers");
_last_scan_rs_times_ms.print(2, "Scan RS (ms)");
@ -322,9 +315,6 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
if (_last_satb_filtering_times_ms.sum() > 0.0) {
_last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
}
if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
_last_strong_code_root_mark_times_ms.print(1, "Code Root Marking (ms)");
}
_last_update_rs_times_ms.print(1, "Update RS (ms)");
_last_update_rs_processed_buffers.print(2, "Processed Buffers");
_last_scan_rs_times_ms.print(1, "Scan RS (ms)");

@ -120,7 +120,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
WorkerDataArray<int> _last_update_rs_processed_buffers;
WorkerDataArray<double> _last_scan_rs_times_ms;
WorkerDataArray<double> _last_strong_code_root_scan_times_ms;
WorkerDataArray<double> _last_strong_code_root_mark_times_ms;
WorkerDataArray<double> _last_obj_copy_times_ms;
WorkerDataArray<double> _last_termination_times_ms;
WorkerDataArray<size_t> _last_termination_attempts;
@ -199,10 +198,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_last_strong_code_root_scan_times_ms.set(worker_i, ms);
}
void record_strong_code_root_mark_time(uint worker_i, double ms) {
_last_strong_code_root_mark_times_ms.set(worker_i, ms);
}
void record_obj_copy_time(uint worker_i, double ms) {
_last_obj_copy_times_ms.set(worker_i, ms);
}
@ -369,10 +364,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
return _last_strong_code_root_scan_times_ms.average();
}
double average_last_strong_code_root_mark_time(){
return _last_strong_code_root_mark_times_ms.average();
}
double average_last_obj_copy_time() {
return _last_obj_copy_times_ms.average();
}

@ -129,13 +129,15 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
SharedHeap* sh = SharedHeap::heap();
// Need cleared claim bits for the strong roots processing
// Need cleared claim bits for the roots processing
ClassLoaderDataGraph::clear_claimed_marks();
sh->process_strong_roots(true, // activate StrongRootsScope
SharedHeap::SO_SystemClasses,
MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
sh->process_strong_roots(true, // activate StrongRootsScope
SharedHeap::SO_None,
&GenMarkSweep::follow_root_closure,
&GenMarkSweep::follow_klass_closure);
&GenMarkSweep::follow_cld_closure,
&follow_code_closure);
// Process reference objects found during marking
ReferenceProcessor* rp = GenMarkSweep::ref_processor();
@ -304,13 +306,15 @@ void G1MarkSweep::mark_sweep_phase3() {
SharedHeap* sh = SharedHeap::heap();
// Need cleared claim bits for the strong roots processing
// Need cleared claim bits for the roots processing
ClassLoaderDataGraph::clear_claimed_marks();
sh->process_strong_roots(true, // activate StrongRootsScope
SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
&GenMarkSweep::adjust_pointer_closure,
&GenMarkSweep::adjust_klass_closure);
CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
sh->process_all_roots(true, // activate StrongRootsScope
SharedHeap::SO_AllCodeCache,
&GenMarkSweep::adjust_pointer_closure,
&GenMarkSweep::adjust_cld_closure,
&adjust_code_closure);
assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);

@ -25,7 +25,28 @@
#include "precompiled.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1ParScanThreadState.hpp"
G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
_cm(_g1->concurrent_mark()) {}
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1) :
_g1(g1), _par_scan_state(NULL), _worker_id(UINT_MAX) { }
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
_g1(g1), _par_scan_state(NULL),
_worker_id(UINT_MAX) {
set_par_scan_thread_state(par_scan_state);
}
void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan_state) {
assert(_par_scan_state == NULL, "_par_scan_state must only be set once");
assert(par_scan_state != NULL, "Must set par_scan_state to non-NULL.");
_par_scan_state = par_scan_state;
_worker_id = par_scan_state->queue_num();
assert(_worker_id < MAX2((uint)ParallelGCThreads, 1u),
err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, MAX2((uint)ParallelGCThreads, 1u)));
}

Some files were not shown because too many files have changed in this diff Show More