Merge
This commit is contained in:
commit
b73681ae0c
hotspot
agent/src/os/solaris/proc
make
src
os
os_cpu/linux_x86/vm
share/vm
gc_implementation
g1
concurrentMark.cppconcurrentMark.hppg1CollectedHeap.cppg1CollectedHeap.hppg1RemSet.cppg1_globals.hppheapRegion.cppheapRegion.hpp
parallelScavenge
interpreter
memory
allocation.cppallocation.hppcardTableModRefBS.cppcardTableModRefBS.hppcollectorPolicy.cppmodRefBarrierSet.hpp
prims
runtime
utilities
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -420,7 +420,22 @@ extern uintptr_t Ppltdest(struct ps_prochandle *, uintptr_t, int *);
|
||||
/*
|
||||
* Stack frame iteration interface.
|
||||
*/
|
||||
#ifdef SOLARIS_11_B159_OR_LATER
|
||||
/* building on Nevada-B159 or later so define the new callback */
|
||||
typedef int proc_stack_f(
|
||||
void *, /* the cookie given to Pstack_iter() */
|
||||
const prgregset_t, /* the frame's registers */
|
||||
uint_t, /* argc for the frame's function */
|
||||
const long *, /* argv for the frame's function */
|
||||
int, /* bitwise flags describing the frame (see below) */
|
||||
int); /* a signal number */
|
||||
|
||||
#define PR_SIGNAL_FRAME 1 /* called by a signal handler */
|
||||
#define PR_FOUND_SIGNAL 2 /* we found the corresponding signal number */
|
||||
#else
|
||||
/* building on Nevada-B158 or earlier so define the old callback */
|
||||
typedef int proc_stack_f(void *, const prgregset_t, uint_t, const long *);
|
||||
#endif
|
||||
|
||||
extern int Pstack_iter(struct ps_prochandle *,
|
||||
const prgregset_t, proc_stack_f *, void *);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -101,7 +101,23 @@ extern int Pstop(struct ps_prochandle *, uint_t);
|
||||
/*
|
||||
* Stack frame iteration interface.
|
||||
*/
|
||||
#ifdef SOLARIS_11_B159_OR_LATER
|
||||
/* building on Nevada-B159 or later so define the new callback */
|
||||
typedef int proc_stack_f(
|
||||
void *, /* the cookie given to Pstack_iter() */
|
||||
const prgregset_t, /* the frame's registers */
|
||||
uint_t, /* argc for the frame's function */
|
||||
const long *, /* argv for the frame's function */
|
||||
int, /* bitwise flags describing the frame (see below) */
|
||||
int); /* a signal number */
|
||||
|
||||
#define PR_SIGNAL_FRAME 1 /* called by a signal handler */
|
||||
#define PR_FOUND_SIGNAL 2 /* we found the corresponding signal number */
|
||||
#else
|
||||
/* building on Nevada-B158 or earlier so define the old callback */
|
||||
typedef int proc_stack_f(void *, const prgregset_t, uint_t, const long *);
|
||||
#endif
|
||||
|
||||
extern int Pstack_iter(struct ps_prochandle *,
|
||||
const prgregset_t, proc_stack_f *, void *);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,6 +24,9 @@
|
||||
|
||||
#include "salibproc.h"
|
||||
#include "sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal.h"
|
||||
#ifndef SOLARIS_11_B159_OR_LATER
|
||||
#include <sys/utsname.h>
|
||||
#endif
|
||||
#include <thread_db.h>
|
||||
#include <strings.h>
|
||||
#include <limits.h>
|
||||
@ -40,8 +43,22 @@
|
||||
#define SYMBOL_BUF_SIZE 256
|
||||
#define ERR_MSG_SIZE (PATH_MAX + 256)
|
||||
|
||||
// debug mode
|
||||
// debug modes
|
||||
static int _libsaproc_debug = 0;
|
||||
#ifndef SOLARIS_11_B159_OR_LATER
|
||||
static bool _Pstack_iter_debug = false;
|
||||
|
||||
static void dprintf_2(const char* format,...) {
|
||||
if (_Pstack_iter_debug) {
|
||||
va_list alist;
|
||||
|
||||
va_start(alist, format);
|
||||
fputs("Pstack_iter DEBUG: ", stderr);
|
||||
vfprintf(stderr, format, alist);
|
||||
va_end(alist);
|
||||
}
|
||||
}
|
||||
#endif // !SOLARIS_11_B159_OR_LATER
|
||||
|
||||
static void print_debug(const char* format,...) {
|
||||
if (_libsaproc_debug) {
|
||||
@ -450,6 +467,7 @@ fill_load_object_list(void *cd, const prmap_t* pmp, const char* obj_name) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Pstack_iter() proc_stack_f callback prior to Nevada-B159
|
||||
static int
|
||||
fill_cframe_list(void *cd, const prgregset_t regs, uint_t argc, const long *argv) {
|
||||
DebuggerWith2Objects* dbgo2 = (DebuggerWith2Objects*) cd;
|
||||
@ -472,6 +490,14 @@ fill_cframe_list(void *cd, const prgregset_t regs, uint_t argc, const long *argv
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Pstack_iter() proc_stack_f callback in Nevada-B159 or later
|
||||
/*ARGSUSED*/
|
||||
static int
|
||||
wrapper_fill_cframe_list(void *cd, const prgregset_t regs, uint_t argc,
|
||||
const long *argv, int frame_flags, int sig) {
|
||||
return(fill_cframe_list(cd, regs, argc, argv));
|
||||
}
|
||||
|
||||
// part of the class sharing workaround
|
||||
|
||||
// FIXME: !!HACK ALERT!!
|
||||
@ -970,6 +996,11 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_fill
|
||||
TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
|
||||
}
|
||||
|
||||
#ifndef SOLARIS_11_B159_OR_LATER
|
||||
// building on Nevada-B158 or earlier so more hoops to jump through
|
||||
static bool has_newer_Pstack_iter = false; // older version by default
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal
|
||||
* Method: fillCFrameList0
|
||||
@ -997,7 +1028,24 @@ JNIEXPORT jobject JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_f
|
||||
|
||||
env->ReleaseLongArrayElements(regsArray, ptr, JNI_ABORT);
|
||||
CHECK_EXCEPTION_(0);
|
||||
Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs, fill_cframe_list, &dbgo2);
|
||||
|
||||
#ifdef SOLARIS_11_B159_OR_LATER
|
||||
// building on Nevada-B159 or later so use the new callback
|
||||
Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
|
||||
wrapper_fill_cframe_list, &dbgo2);
|
||||
#else
|
||||
// building on Nevada-B158 or earlier so figure out which callback to use
|
||||
|
||||
if (has_newer_Pstack_iter) {
|
||||
// Since we're building on Nevada-B158 or earlier, we have to
|
||||
// cast wrapper_fill_cframe_list to make the compiler happy.
|
||||
Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
|
||||
(proc_stack_f *)wrapper_fill_cframe_list, &dbgo2);
|
||||
} else {
|
||||
Pstack_iter((struct ps_prochandle*) p_ps_prochandle, gregs,
|
||||
fill_cframe_list, &dbgo2);
|
||||
}
|
||||
#endif // SOLARIS_11_B159_OR_LATER
|
||||
return dbgo2.obj;
|
||||
}
|
||||
|
||||
@ -1218,6 +1266,102 @@ JNIEXPORT jstring JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_d
|
||||
return res;
|
||||
}
|
||||
|
||||
#ifndef SOLARIS_11_B159_OR_LATER
|
||||
// Determine if the OS we're running on has the newer version
|
||||
// of libproc's Pstack_iter.
|
||||
//
|
||||
// Set env var PSTACK_ITER_DEBUG=true to debug this logic.
|
||||
// Set env var PSTACK_ITER_DEBUG_RELEASE to simulate a 'release' value.
|
||||
// Set env var PSTACK_ITER_DEBUG_VERSION to simulate a 'version' value.
|
||||
//
|
||||
// frankenputer 'uname -r -v': 5.10 Generic_141445-09
|
||||
// jurassic 'uname -r -v': 5.11 snv_164
|
||||
// lonepeak 'uname -r -v': 5.11 snv_127
|
||||
//
|
||||
static void set_has_newer_Pstack_iter(JNIEnv *env) {
|
||||
static bool done_set = false;
|
||||
|
||||
if (done_set) {
|
||||
// already set has_newer_Pstack_iter
|
||||
return;
|
||||
}
|
||||
|
||||
struct utsname name;
|
||||
if (uname(&name) == -1) {
|
||||
THROW_NEW_DEBUGGER_EXCEPTION("uname() failed!");
|
||||
}
|
||||
dprintf_2("release='%s' version='%s'\n", name.release, name.version);
|
||||
|
||||
if (_Pstack_iter_debug) {
|
||||
char *override = getenv("PSTACK_ITER_DEBUG_RELEASE");
|
||||
if (override != NULL) {
|
||||
strncpy(name.release, override, SYS_NMLN - 1);
|
||||
name.release[SYS_NMLN - 2] = '\0';
|
||||
dprintf_2("overriding with release='%s'\n", name.release);
|
||||
}
|
||||
override = getenv("PSTACK_ITER_DEBUG_VERSION");
|
||||
if (override != NULL) {
|
||||
strncpy(name.version, override, SYS_NMLN - 1);
|
||||
name.version[SYS_NMLN - 2] = '\0';
|
||||
dprintf_2("overriding with version='%s'\n", name.version);
|
||||
}
|
||||
}
|
||||
|
||||
// the major number corresponds to the old SunOS major number
|
||||
int major = atoi(name.release);
|
||||
if (major >= 6) {
|
||||
dprintf_2("release is SunOS 6 or later\n");
|
||||
has_newer_Pstack_iter = true;
|
||||
done_set = true;
|
||||
return;
|
||||
}
|
||||
if (major < 5) {
|
||||
dprintf_2("release is SunOS 4 or earlier\n");
|
||||
done_set = true;
|
||||
return;
|
||||
}
|
||||
|
||||
// some SunOS 5.* build so now check for Solaris versions
|
||||
char *dot = strchr(name.release, '.');
|
||||
int minor = 0;
|
||||
if (dot != NULL) {
|
||||
// release is major.minor format
|
||||
*dot = NULL;
|
||||
minor = atoi(dot + 1);
|
||||
}
|
||||
|
||||
if (minor <= 10) {
|
||||
dprintf_2("release is Solaris 10 or earlier\n");
|
||||
done_set = true;
|
||||
return;
|
||||
} else if (minor >= 12) {
|
||||
dprintf_2("release is Solaris 12 or later\n");
|
||||
has_newer_Pstack_iter = true;
|
||||
done_set = true;
|
||||
return;
|
||||
}
|
||||
|
||||
// some Solaris 11 build so now check for internal build numbers
|
||||
if (strncmp(name.version, "snv_", 4) != 0) {
|
||||
dprintf_2("release is Solaris 11 post-GA or later\n");
|
||||
has_newer_Pstack_iter = true;
|
||||
done_set = true;
|
||||
return;
|
||||
}
|
||||
|
||||
// version begins with "snv_" so a pre-GA build of Solaris 11
|
||||
int build = atoi(&name.version[4]);
|
||||
if (build >= 159) {
|
||||
dprintf_2("release is Nevada-B159 or later\n");
|
||||
has_newer_Pstack_iter = true;
|
||||
} else {
|
||||
dprintf_2("release is Nevada-B158 or earlier\n");
|
||||
}
|
||||
|
||||
done_set = true;
|
||||
}
|
||||
#endif // !SOLARIS_11_B159_OR_LATER
|
||||
|
||||
/*
|
||||
* Class: sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal
|
||||
* Method: initIDs
|
||||
@ -1237,6 +1381,14 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_proc_ProcDebuggerLocal_init
|
||||
if (libproc_handle == 0)
|
||||
THROW_NEW_DEBUGGER_EXCEPTION("can't load libproc.so, if you are using Solaris 5.7 or below, copy libproc.so from 5.8!");
|
||||
|
||||
#ifndef SOLARIS_11_B159_OR_LATER
|
||||
_Pstack_iter_debug = getenv("PSTACK_ITER_DEBUG") != NULL;
|
||||
|
||||
set_has_newer_Pstack_iter(env);
|
||||
CHECK_EXCEPTION;
|
||||
dprintf_2("has_newer_Pstack_iter=%d\n", has_newer_Pstack_iter);
|
||||
#endif
|
||||
|
||||
p_ps_prochandle_ID = env->GetFieldID(clazz, "p_ps_prochandle", "J");
|
||||
CHECK_EXCEPTION;
|
||||
|
||||
|
@ -24,7 +24,8 @@
|
||||
|
||||
# This file defines variables and macros which are used in the makefiles to
|
||||
# allow distributions to augment or replace common hotspot code with
|
||||
# distribution-specific source files.
|
||||
# distribution-specific source files. This capability is disabled when
|
||||
# an OPENJDK build is requested, unless HS_ALT_SRC_REL has been set externally.
|
||||
|
||||
# Requires: GAMMADIR
|
||||
# Provides:
|
||||
@ -33,14 +34,17 @@
|
||||
|
||||
HS_COMMON_SRC_REL=src
|
||||
|
||||
# This needs to be changed to a more generic location, but we keep it as this
|
||||
# for now for compatibility
|
||||
HS_ALT_SRC_REL=src/closed
|
||||
ifneq ($(OPENJDK),true)
|
||||
# This needs to be changed to a more generic location, but we keep it
|
||||
# as this for now for compatibility
|
||||
HS_ALT_SRC_REL=src/closed
|
||||
else
|
||||
HS_ALT_SRC_REL=NO_SUCH_PATH
|
||||
endif
|
||||
|
||||
HS_COMMON_SRC=$(GAMMADIR)/$(HS_COMMON_SRC_REL)
|
||||
HS_ALT_SRC=$(GAMMADIR)/$(HS_ALT_SRC_REL)
|
||||
|
||||
|
||||
## altsrc-equiv
|
||||
#
|
||||
# Convert a common source path to an alternative source path
|
||||
|
32
hotspot/make/jdk6_hotspot_distro
Normal file
32
hotspot/make/jdk6_hotspot_distro
Normal file
@ -0,0 +1,32 @@
|
||||
#
|
||||
# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
#
|
||||
# This file format must remain compatible with both
|
||||
# GNU Makefile and Microsoft nmake formats.
|
||||
#
|
||||
|
||||
# Don't put quotes (fail windows build).
|
||||
HOTSPOT_VM_DISTRO=Java HotSpot(TM)
|
||||
COMPANY_NAME=Sun Microsystems, Inc.
|
||||
PRODUCT_NAME=Java(TM) Platform SE
|
@ -205,7 +205,7 @@ SONAMEFLAG = -Xlinker -soname=SONAME
|
||||
SHARED_FLAG = -shared
|
||||
|
||||
# Keep symbols even they are not used
|
||||
AOUT_FLAGS += -export-dynamic
|
||||
AOUT_FLAGS += -Xlinker -export-dynamic
|
||||
|
||||
#------------------------------------------------------------------------
|
||||
# Debug flags
|
||||
|
@ -102,6 +102,10 @@ CFLAGS += $(CFLAGS/NOEX)
|
||||
CFLAGS += $(EXTRA_CFLAGS)
|
||||
LFLAGS += $(EXTRA_CFLAGS)
|
||||
|
||||
# Don't set excutable bit on stack segment
|
||||
# the same could be done by separate execstack command
|
||||
LFLAGS += -Xlinker -z -Xlinker noexecstack
|
||||
|
||||
LIBS += -lm -ldl -lpthread
|
||||
|
||||
# By default, link the *.o into the library, not the executable.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -56,6 +56,30 @@ else
|
||||
SA_LFLAGS += -mt -xnolib -norunpath
|
||||
endif
|
||||
|
||||
# The libproc Pstack_iter() interface changed in Nevada-B159.
|
||||
# This logic needs to match
|
||||
# agent/src/os/solaris/proc/saproc.cpp: set_has_newer_Pstack_iter():
|
||||
# - skip SunOS 4 or older
|
||||
# - skip Solaris 10 or older
|
||||
# - skip two digit Nevada builds
|
||||
# - skip three digit Nevada builds thru 149
|
||||
# - skip Nevada builds 150-158
|
||||
SOLARIS_11_B159_OR_LATER := \
|
||||
$(shell uname -r -v \
|
||||
| sed -n ' \
|
||||
/^[0-3]\. /b \
|
||||
/^5\.[0-9] /b \
|
||||
/^5\.10 /b \
|
||||
/ snv_[0-9][0-9]$/b \
|
||||
/ snv_[01][0-4][0-9]$/b \
|
||||
/ snv_15[0-8]$/b \
|
||||
s/.*/-DSOLARIS_11_B159_OR_LATER/p \
|
||||
')
|
||||
|
||||
# Uncomment the following to simulate building on Nevada-B159 or later
|
||||
# when actually building on Nevada-B158 or earlier:
|
||||
#SOLARIS_11_B159_OR_LATER=-DSOLARIS_11_B159_OR_LATER
|
||||
|
||||
$(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
|
||||
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
|
||||
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
|
||||
@ -68,6 +92,7 @@ $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
|
||||
-I$(GENERATED) \
|
||||
-I$(BOOT_JAVA_HOME)/include \
|
||||
-I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \
|
||||
$(SOLARIS_11_B159_OR_LATER) \
|
||||
$(SASRCFILES) \
|
||||
$(SA_LFLAGS) \
|
||||
-o $@ \
|
||||
|
@ -100,11 +100,6 @@ JVM_CHECK_SYMBOLS = $(NM) -u -p $(LIBJVM.o) | \
|
||||
|
||||
LINK_LIB.CC/PRE_HOOK += $(JVM_CHECK_SYMBOLS) || exit 1;
|
||||
|
||||
# Some interfaces (_lwp_create) changed with LP64 and Solaris 7
|
||||
SOLARIS_7_OR_LATER := \
|
||||
$(shell uname -r | awk -F. '{ if ($$2 >= 7) print "-DSOLARIS_7_OR_LATER"; }')
|
||||
CFLAGS += ${SOLARIS_7_OR_LATER}
|
||||
|
||||
# New architecture options started in SS12 (5.9), we need both styles to build.
|
||||
# The older arch options for SS11 (5.8) or older and also for /usr/ccs/bin/as.
|
||||
# Note: default for 32bit sparc is now the same as v8plus, so the
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -125,7 +125,25 @@ VARIANT_TEXT=Kernel
|
||||
# or make/hotspot_distro.
|
||||
!ifndef HOTSPOT_VM_DISTRO
|
||||
!if exists($(WorkSpace)\src\closed)
|
||||
|
||||
# if the build is for JDK6 or earlier version, it should include jdk6_hotspot_distro,
|
||||
# instead of hotspot_distro.
|
||||
JDK6_OR_EARLIER=0
|
||||
!if "$(JDK_MAJOR_VERSION)" != "" && "$(JDK_MINOR_VERSION)" != "" && "$(JDK_MICRO_VERSION)" != ""
|
||||
!if $(JDK_MAJOR_VERSION) == 1 && $(JDK_MINOR_VERSION) < 7
|
||||
JDK6_OR_EARLIER=1
|
||||
!endif
|
||||
!else
|
||||
!if $(JDK_MAJOR_VER) == 1 && $(JDK_MINOR_VER) < 7
|
||||
JDK6_OR_EARLIER=1
|
||||
!endif
|
||||
!endif
|
||||
|
||||
!if $(JDK6_OR_EARLIER) == 1
|
||||
!include $(WorkSpace)\make\jdk6_hotspot_distro
|
||||
!else
|
||||
!include $(WorkSpace)\make\hotspot_distro
|
||||
!endif
|
||||
!else
|
||||
!include $(WorkSpace)\make\openjdk_distro
|
||||
!endif
|
||||
@ -260,7 +278,7 @@ $(variantDir)\local.make: checks
|
||||
@ echo Variant=$(realVariant) >> $@
|
||||
@ echo WorkSpace=$(WorkSpace) >> $@
|
||||
@ echo BootStrapDir=$(BootStrapDir) >> $@
|
||||
@ if "$(USERNAME)" NEQ "" echo BuildUser=$(USERNAME) >> $@
|
||||
@ if "$(USERNAME)" NEQ "" echo BuildUser=$(USERNAME) >> $@
|
||||
@ echo HS_VER=$(HS_VER) >> $@
|
||||
@ echo HS_DOTVER=$(HS_DOTVER) >> $@
|
||||
@ echo HS_COMPANY=$(COMPANY_NAME) >> $@
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,7 +47,7 @@
|
||||
// Defines Linux-specific default values. The flags are available on all
|
||||
// platforms, but they may have different default values on other platforms.
|
||||
//
|
||||
define_pd_global(bool, UseLargePages, false);
|
||||
define_pd_global(bool, UseLargePages, true);
|
||||
define_pd_global(bool, UseLargePagesIndividualAllocation, false);
|
||||
define_pd_global(bool, UseOSErrorReporting, false);
|
||||
define_pd_global(bool, UseThreadPriorities, true) ;
|
||||
|
@ -2914,16 +2914,21 @@ static void set_coredump_filter(void) {
|
||||
|
||||
static size_t _large_page_size = 0;
|
||||
|
||||
bool os::large_page_init() {
|
||||
void os::large_page_init() {
|
||||
if (!UseLargePages) {
|
||||
UseHugeTLBFS = false;
|
||||
UseSHM = false;
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
|
||||
// Our user has not expressed a preference, so we'll try both.
|
||||
UseHugeTLBFS = UseSHM = true;
|
||||
// If UseLargePages is specified on the command line try both methods,
|
||||
// if it's default, then try only HugeTLBFS.
|
||||
if (FLAG_IS_DEFAULT(UseLargePages)) {
|
||||
UseHugeTLBFS = true;
|
||||
} else {
|
||||
UseHugeTLBFS = UseSHM = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (LargePageSizeInBytes) {
|
||||
@ -2978,7 +2983,6 @@ bool os::large_page_init() {
|
||||
_page_sizes[1] = default_page_size;
|
||||
_page_sizes[2] = 0;
|
||||
}
|
||||
|
||||
UseHugeTLBFS = UseHugeTLBFS &&
|
||||
Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
|
||||
|
||||
@ -2988,12 +2992,6 @@ bool os::large_page_init() {
|
||||
UseLargePages = UseHugeTLBFS || UseSHM;
|
||||
|
||||
set_coredump_filter();
|
||||
|
||||
// Large page support is available on 2.6 or newer kernel, some vendors
|
||||
// (e.g. Redhat) have backported it to their 2.4 based distributions.
|
||||
// We optimistically assume the support is available. If later it turns out
|
||||
// not true, VM will automatically switch to use regular page size.
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifndef SHM_HUGETLB
|
||||
@ -4118,7 +4116,7 @@ jint os::init_2(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
FLAG_SET_DEFAULT(UseLargePages, os::large_page_init());
|
||||
os::large_page_init();
|
||||
|
||||
// initialize suspend/resume support - must do this before signal_sets_init()
|
||||
if (SR_initialize() != 0) {
|
||||
|
@ -3336,11 +3336,11 @@ bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool os::large_page_init() {
|
||||
void os::large_page_init() {
|
||||
if (!UseLargePages) {
|
||||
UseISM = false;
|
||||
UseMPSS = false;
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
// print a warning if any large page related flag is specified on command line
|
||||
@ -3361,7 +3361,6 @@ bool os::large_page_init() {
|
||||
Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
|
||||
|
||||
UseLargePages = UseISM || UseMPSS;
|
||||
return UseLargePages;
|
||||
}
|
||||
|
||||
bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) {
|
||||
@ -4992,7 +4991,7 @@ jint os::init_2(void) {
|
||||
#endif
|
||||
}
|
||||
|
||||
FLAG_SET_DEFAULT(UseLargePages, os::large_page_init());
|
||||
os::large_page_init();
|
||||
|
||||
// Check minimum allowable stack size for thread creation and to initialize
|
||||
// the java system classes, including StackOverflowError - depends on page
|
||||
|
@ -2762,8 +2762,8 @@ static void cleanup_after_large_page_init() {
|
||||
_hToken = NULL;
|
||||
}
|
||||
|
||||
bool os::large_page_init() {
|
||||
if (!UseLargePages) return false;
|
||||
void os::large_page_init() {
|
||||
if (!UseLargePages) return;
|
||||
|
||||
// print a warning if any large page related flag is specified on command line
|
||||
bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
|
||||
@ -2808,7 +2808,7 @@ bool os::large_page_init() {
|
||||
}
|
||||
|
||||
cleanup_after_large_page_init();
|
||||
return success;
|
||||
UseLargePages = success;
|
||||
}
|
||||
|
||||
// On win32, one cannot release just a part of reserved memory, it's an
|
||||
@ -3561,7 +3561,7 @@ jint os::init_2(void) {
|
||||
#endif
|
||||
}
|
||||
|
||||
FLAG_SET_DEFAULT(UseLargePages, os::large_page_init());
|
||||
os::large_page_init();
|
||||
|
||||
// Setup Windows Exceptions
|
||||
|
||||
|
@ -93,7 +93,7 @@ inline void OrderAccess::release_store_ptr(volatile void* p, void* v)
|
||||
|
||||
inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
|
||||
__asm__ volatile ( "xchgb (%2),%0"
|
||||
: "=r" (v)
|
||||
: "=q" (v)
|
||||
: "0" (v), "r" (p)
|
||||
: "memory");
|
||||
}
|
||||
@ -155,7 +155,7 @@ inline void OrderAccess::store_ptr_fence(void** p, void* v) {
|
||||
// Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
|
||||
inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) {
|
||||
__asm__ volatile ( "xchgb (%2),%0"
|
||||
: "=r" (v)
|
||||
: "=q" (v)
|
||||
: "0" (v), "r" (p)
|
||||
: "memory");
|
||||
}
|
||||
|
@ -826,6 +826,14 @@ public:
|
||||
void ConcurrentMark::checkpointRootsInitialPost() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
// If we force an overflow during remark, the remark operation will
|
||||
// actually abort and we'll restart concurrent marking. If we always
|
||||
// force an oveflow during remark we'll never actually complete the
|
||||
// marking phase. So, we initilize this here, at the start of the
|
||||
// cycle, so that at the remaining overflow number will decrease at
|
||||
// every remark and we'll eventually not need to cause one.
|
||||
force_overflow_stw()->init();
|
||||
|
||||
// For each region note start of marking.
|
||||
NoteStartOfMarkHRClosure startcl;
|
||||
g1h->heap_region_iterate(&startcl);
|
||||
@ -893,27 +901,37 @@ void ConcurrentMark::checkpointRootsInitial() {
|
||||
}
|
||||
|
||||
/*
|
||||
Notice that in the next two methods, we actually leave the STS
|
||||
during the barrier sync and join it immediately afterwards. If we
|
||||
do not do this, this then the following deadlock can occur: one
|
||||
thread could be in the barrier sync code, waiting for the other
|
||||
thread to also sync up, whereas another one could be trying to
|
||||
yield, while also waiting for the other threads to sync up too.
|
||||
|
||||
Because the thread that does the sync barrier has left the STS, it
|
||||
is possible to be suspended for a Full GC or an evacuation pause
|
||||
could occur. This is actually safe, since the entering the sync
|
||||
barrier is one of the last things do_marking_step() does, and it
|
||||
doesn't manipulate any data structures afterwards.
|
||||
*/
|
||||
* Notice that in the next two methods, we actually leave the STS
|
||||
* during the barrier sync and join it immediately afterwards. If we
|
||||
* do not do this, the following deadlock can occur: one thread could
|
||||
* be in the barrier sync code, waiting for the other thread to also
|
||||
* sync up, whereas another one could be trying to yield, while also
|
||||
* waiting for the other threads to sync up too.
|
||||
*
|
||||
* Note, however, that this code is also used during remark and in
|
||||
* this case we should not attempt to leave / enter the STS, otherwise
|
||||
* we'll either hit an asseert (debug / fastdebug) or deadlock
|
||||
* (product). So we should only leave / enter the STS if we are
|
||||
* operating concurrently.
|
||||
*
|
||||
* Because the thread that does the sync barrier has left the STS, it
|
||||
* is possible to be suspended for a Full GC or an evacuation pause
|
||||
* could occur. This is actually safe, since the entering the sync
|
||||
* barrier is one of the last things do_marking_step() does, and it
|
||||
* doesn't manipulate any data structures afterwards.
|
||||
*/
|
||||
|
||||
void ConcurrentMark::enter_first_sync_barrier(int task_num) {
|
||||
if (verbose_low())
|
||||
gclog_or_tty->print_cr("[%d] entering first barrier", task_num);
|
||||
|
||||
ConcurrentGCThread::stsLeave();
|
||||
if (concurrent()) {
|
||||
ConcurrentGCThread::stsLeave();
|
||||
}
|
||||
_first_overflow_barrier_sync.enter();
|
||||
ConcurrentGCThread::stsJoin();
|
||||
if (concurrent()) {
|
||||
ConcurrentGCThread::stsJoin();
|
||||
}
|
||||
// at this point everyone should have synced up and not be doing any
|
||||
// more work
|
||||
|
||||
@ -923,7 +941,12 @@ void ConcurrentMark::enter_first_sync_barrier(int task_num) {
|
||||
// let task 0 do this
|
||||
if (task_num == 0) {
|
||||
// task 0 is responsible for clearing the global data structures
|
||||
clear_marking_state();
|
||||
// We should be here because of an overflow. During STW we should
|
||||
// not clear the overflow flag since we rely on it being true when
|
||||
// we exit this method to abort the pause and restart concurent
|
||||
// marking.
|
||||
clear_marking_state(concurrent() /* clear_overflow */);
|
||||
force_overflow()->update();
|
||||
|
||||
if (PrintGC) {
|
||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||
@ -940,15 +963,45 @@ void ConcurrentMark::enter_second_sync_barrier(int task_num) {
|
||||
if (verbose_low())
|
||||
gclog_or_tty->print_cr("[%d] entering second barrier", task_num);
|
||||
|
||||
ConcurrentGCThread::stsLeave();
|
||||
if (concurrent()) {
|
||||
ConcurrentGCThread::stsLeave();
|
||||
}
|
||||
_second_overflow_barrier_sync.enter();
|
||||
ConcurrentGCThread::stsJoin();
|
||||
if (concurrent()) {
|
||||
ConcurrentGCThread::stsJoin();
|
||||
}
|
||||
// at this point everything should be re-initialised and ready to go
|
||||
|
||||
if (verbose_low())
|
||||
gclog_or_tty->print_cr("[%d] leaving second barrier", task_num);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ForceOverflowSettings::init() {
|
||||
_num_remaining = G1ConcMarkForceOverflow;
|
||||
_force = false;
|
||||
update();
|
||||
}
|
||||
|
||||
void ForceOverflowSettings::update() {
|
||||
if (_num_remaining > 0) {
|
||||
_num_remaining -= 1;
|
||||
_force = true;
|
||||
} else {
|
||||
_force = false;
|
||||
}
|
||||
}
|
||||
|
||||
bool ForceOverflowSettings::should_force() {
|
||||
if (_force) {
|
||||
_force = false;
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
void ConcurrentMark::grayRoot(oop p) {
|
||||
HeapWord* addr = (HeapWord*) p;
|
||||
// We can't really check against _heap_start and _heap_end, since it
|
||||
@ -1117,6 +1170,7 @@ void ConcurrentMark::markFromRoots() {
|
||||
_restart_for_overflow = false;
|
||||
|
||||
size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
|
||||
force_overflow_conc()->init();
|
||||
set_phase(active_workers, true /* concurrent */);
|
||||
|
||||
CMConcurrentMarkingTask markingTask(this, cmThread());
|
||||
@ -1845,7 +1899,7 @@ void ConcurrentMark::completeCleanup() {
|
||||
while (!_cleanup_list.is_empty()) {
|
||||
HeapRegion* hr = _cleanup_list.remove_head();
|
||||
assert(hr != NULL, "the list was not empty");
|
||||
hr->rem_set()->clear();
|
||||
hr->par_clear();
|
||||
tmp_free_list.add_as_tail(hr);
|
||||
|
||||
// Instead of adding one region at a time to the secondary_free_list,
|
||||
@ -2703,12 +2757,16 @@ void ConcurrentMark::oops_do(OopClosure* cl) {
|
||||
|
||||
}
|
||||
|
||||
void ConcurrentMark::clear_marking_state() {
|
||||
void ConcurrentMark::clear_marking_state(bool clear_overflow) {
|
||||
_markStack.setEmpty();
|
||||
_markStack.clear_overflow();
|
||||
_regionStack.setEmpty();
|
||||
_regionStack.clear_overflow();
|
||||
clear_has_overflown();
|
||||
if (clear_overflow) {
|
||||
clear_has_overflown();
|
||||
} else {
|
||||
assert(has_overflown(), "pre-condition");
|
||||
}
|
||||
_finger = _heap_start;
|
||||
|
||||
for (int i = 0; i < (int)_max_task_num; ++i) {
|
||||
@ -4279,6 +4337,15 @@ void CMTask::do_marking_step(double time_target_ms,
|
||||
}
|
||||
}
|
||||
|
||||
// If we are about to wrap up and go into termination, check if we
|
||||
// should raise the overflow flag.
|
||||
if (do_termination && !has_aborted()) {
|
||||
if (_cm->force_overflow()->should_force()) {
|
||||
_cm->set_has_overflown();
|
||||
regular_clock_call();
|
||||
}
|
||||
}
|
||||
|
||||
// We still haven't aborted. Now, let's try to get into the
|
||||
// termination protocol.
|
||||
if (do_termination && !has_aborted()) {
|
||||
|
@ -316,6 +316,19 @@ public:
|
||||
void setEmpty() { _index = 0; clear_overflow(); }
|
||||
};
|
||||
|
||||
class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
#ifndef PRODUCT
|
||||
uintx _num_remaining;
|
||||
bool _force;
|
||||
#endif // !defined(PRODUCT)
|
||||
|
||||
public:
|
||||
void init() PRODUCT_RETURN;
|
||||
void update() PRODUCT_RETURN;
|
||||
bool should_force() PRODUCT_RETURN_( return false; );
|
||||
};
|
||||
|
||||
// this will enable a variety of different statistics per GC task
|
||||
#define _MARKING_STATS_ 0
|
||||
// this will enable the higher verbose levels
|
||||
@ -462,6 +475,9 @@ protected:
|
||||
|
||||
WorkGang* _parallel_workers;
|
||||
|
||||
ForceOverflowSettings _force_overflow_conc;
|
||||
ForceOverflowSettings _force_overflow_stw;
|
||||
|
||||
void weakRefsWork(bool clear_all_soft_refs);
|
||||
|
||||
void swapMarkBitMaps();
|
||||
@ -470,7 +486,7 @@ protected:
|
||||
// task local ones; should be called during initial mark.
|
||||
void reset();
|
||||
// It resets all the marking data structures.
|
||||
void clear_marking_state();
|
||||
void clear_marking_state(bool clear_overflow = true);
|
||||
|
||||
// It should be called to indicate which phase we're in (concurrent
|
||||
// mark or remark) and how many threads are currently active.
|
||||
@ -547,6 +563,22 @@ protected:
|
||||
void enter_first_sync_barrier(int task_num);
|
||||
void enter_second_sync_barrier(int task_num);
|
||||
|
||||
ForceOverflowSettings* force_overflow_conc() {
|
||||
return &_force_overflow_conc;
|
||||
}
|
||||
|
||||
ForceOverflowSettings* force_overflow_stw() {
|
||||
return &_force_overflow_stw;
|
||||
}
|
||||
|
||||
ForceOverflowSettings* force_overflow() {
|
||||
if (concurrent()) {
|
||||
return force_overflow_conc();
|
||||
} else {
|
||||
return force_overflow_stw();
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
// Manipulation of the global mark stack.
|
||||
// Notice that the first mark_stack_push is CAS-based, whereas the
|
||||
|
@ -3975,6 +3975,9 @@ void G1CollectedHeap::drain_evac_failure_scan_stack() {
|
||||
oop
|
||||
G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
|
||||
oop old) {
|
||||
assert(obj_in_cs(old),
|
||||
err_msg("obj: "PTR_FORMAT" should still be in the CSet",
|
||||
(HeapWord*) old));
|
||||
markOop m = old->mark();
|
||||
oop forward_ptr = old->forward_to_atomic(old);
|
||||
if (forward_ptr == NULL) {
|
||||
@ -3997,7 +4000,13 @@ G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
|
||||
}
|
||||
return old;
|
||||
} else {
|
||||
// Someone else had a place to copy it.
|
||||
// Forward-to-self failed. Either someone else managed to allocate
|
||||
// space for this object (old != forward_ptr) or they beat us in
|
||||
// self-forwarding it (old == forward_ptr).
|
||||
assert(old == forward_ptr || !obj_in_cs(forward_ptr),
|
||||
err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
|
||||
"should not be in the CSet",
|
||||
(HeapWord*) old, (HeapWord*) forward_ptr));
|
||||
return forward_ptr;
|
||||
}
|
||||
}
|
||||
@ -4308,11 +4317,10 @@ template <class T> void G1ParCopyHelper::mark_forwardee(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop(heap_oop);
|
||||
assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)),
|
||||
"shouldn't still be in the CSet if evacuation didn't fail.");
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_g1->is_in_g1_reserved(addr))
|
||||
if (_g1->is_in_g1_reserved(addr)) {
|
||||
_cm->grayRoot(oop(addr));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4961,36 +4969,45 @@ public:
|
||||
|
||||
#ifndef PRODUCT
|
||||
class G1VerifyCardTableCleanup: public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
CardTableModRefBS* _ct_bs;
|
||||
public:
|
||||
G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs)
|
||||
: _ct_bs(ct_bs) { }
|
||||
G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
|
||||
: _g1h(g1h), _ct_bs(ct_bs) { }
|
||||
virtual bool doHeapRegion(HeapRegion* r) {
|
||||
MemRegion mr(r->bottom(), r->end());
|
||||
if (r->is_survivor()) {
|
||||
_ct_bs->verify_dirty_region(mr);
|
||||
_g1h->verify_dirty_region(r);
|
||||
} else {
|
||||
_ct_bs->verify_clean_region(mr);
|
||||
_g1h->verify_not_dirty_region(r);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
|
||||
// All of the region should be clean.
|
||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
|
||||
MemRegion mr(hr->bottom(), hr->end());
|
||||
ct_bs->verify_not_dirty_region(mr);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
|
||||
// We cannot guarantee that [bottom(),end()] is dirty. Threads
|
||||
// dirty allocated blocks as they allocate them. The thread that
|
||||
// retires each region and replaces it with a new one will do a
|
||||
// maximal allocation to fill in [pre_dummy_top(),end()] but will
|
||||
// not dirty that area (one less thing to have to do while holding
|
||||
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
|
||||
// is dirty.
|
||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
|
||||
MemRegion mr(hr->bottom(), hr->pre_dummy_top());
|
||||
ct_bs->verify_dirty_region(mr);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
|
||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
|
||||
CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
|
||||
for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
|
||||
// We cannot guarantee that [bottom(),end()] is dirty. Threads
|
||||
// dirty allocated blocks as they allocate them. The thread that
|
||||
// retires each region and replaces it with a new one will do a
|
||||
// maximal allocation to fill in [pre_dummy_top(),end()] but will
|
||||
// not dirty that area (one less thing to have to do while holding
|
||||
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
|
||||
// is dirty. Also note that verify_dirty_region() requires
|
||||
// mr.start() and mr.end() to be card aligned and pre_dummy_top()
|
||||
// is not guaranteed to be.
|
||||
MemRegion mr(hr->bottom(),
|
||||
ct_bs->align_to_card_boundary(hr->pre_dummy_top()));
|
||||
ct_bs->verify_dirty_region(mr);
|
||||
verify_dirty_region(hr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5033,7 +5050,7 @@ void G1CollectedHeap::cleanUpCardTable() {
|
||||
g1_policy()->record_clear_ct_time( elapsed * 1000.0);
|
||||
#ifndef PRODUCT
|
||||
if (G1VerifyCTCleanup || VerifyAfterGC) {
|
||||
G1VerifyCardTableCleanup cleanup_verifier(ct_bs);
|
||||
G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
|
||||
heap_region_iterate(&cleanup_verifier);
|
||||
}
|
||||
#endif
|
||||
|
@ -970,6 +970,8 @@ public:
|
||||
// The number of regions available for "regular" expansion.
|
||||
size_t expansion_regions() { return _expansion_regions; }
|
||||
|
||||
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
|
||||
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
|
||||
void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
|
||||
void verify_dirty_young_regions() PRODUCT_RETURN;
|
||||
|
||||
|
@ -157,7 +157,6 @@ public:
|
||||
void set_try_claimed() { _try_claimed = true; }
|
||||
|
||||
void scanCard(size_t index, HeapRegion *r) {
|
||||
_cards_done++;
|
||||
DirtyCardToOopClosure* cl =
|
||||
r->new_dcto_closure(_oc,
|
||||
CardTableModRefBS::Precise,
|
||||
@ -168,17 +167,14 @@ public:
|
||||
HeapWord* card_start = _bot_shared->address_for_index(index);
|
||||
HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
|
||||
Space *sp = SharedHeap::heap()->space_containing(card_start);
|
||||
MemRegion sm_region;
|
||||
if (ParallelGCThreads > 0) {
|
||||
// first find the used area
|
||||
sm_region = sp->used_region_at_save_marks();
|
||||
} else {
|
||||
// The closure is not idempotent. We shouldn't look at objects
|
||||
// allocated during the GC.
|
||||
sm_region = sp->used_region_at_save_marks();
|
||||
}
|
||||
MemRegion sm_region = sp->used_region_at_save_marks();
|
||||
MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
|
||||
if (!mr.is_empty()) {
|
||||
if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
|
||||
// We make the card as "claimed" lazily (so races are possible
|
||||
// but they're benign), which reduces the number of duplicate
|
||||
// scans (the rsets of the regions in the cset can intersect).
|
||||
_ct_bs->set_card_claimed(index);
|
||||
_cards_done++;
|
||||
cl->do_MemRegion(mr);
|
||||
}
|
||||
}
|
||||
@ -199,6 +195,9 @@ public:
|
||||
HeapRegionRemSet* hrrs = r->rem_set();
|
||||
if (hrrs->iter_is_complete()) return false; // All done.
|
||||
if (!_try_claimed && !hrrs->claim_iter()) return false;
|
||||
// If we ever free the collection set concurrently, we should also
|
||||
// clear the card table concurrently therefore we won't need to
|
||||
// add regions of the collection set to the dirty cards region.
|
||||
_g1h->push_dirty_cards_region(r);
|
||||
// If we didn't return above, then
|
||||
// _try_claimed || r->claim_iter()
|
||||
@ -230,15 +229,10 @@ public:
|
||||
_g1h->push_dirty_cards_region(card_region);
|
||||
}
|
||||
|
||||
// If the card is dirty, then we will scan it during updateRS.
|
||||
if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) {
|
||||
// We make the card as "claimed" lazily (so races are possible but they're benign),
|
||||
// which reduces the number of duplicate scans (the rsets of the regions in the cset
|
||||
// can intersect).
|
||||
if (!_ct_bs->is_card_claimed(card_index)) {
|
||||
_ct_bs->set_card_claimed(card_index);
|
||||
scanCard(card_index, card_region);
|
||||
}
|
||||
// If the card is dirty, then we will scan it during updateRS.
|
||||
if (!card_region->in_collection_set() &&
|
||||
!_ct_bs->is_card_dirty(card_index)) {
|
||||
scanCard(card_index, card_region);
|
||||
}
|
||||
}
|
||||
if (!_try_claimed) {
|
||||
@ -246,8 +240,6 @@ public:
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// Set all cards back to clean.
|
||||
void cleanup() {_g1h->cleanUpCardTable();}
|
||||
size_t cards_done() { return _cards_done;}
|
||||
size_t cards_looked_up() { return _cards;}
|
||||
};
|
||||
@ -566,8 +558,9 @@ public:
|
||||
update_rs_cl.set_region(r);
|
||||
HeapWord* stop_point =
|
||||
r->oops_on_card_seq_iterate_careful(scanRegion,
|
||||
&filter_then_update_rs_cset_oop_cl,
|
||||
false /* filter_young */);
|
||||
&filter_then_update_rs_cset_oop_cl,
|
||||
false /* filter_young */,
|
||||
NULL /* card_ptr */);
|
||||
|
||||
// Since this is performed in the event of an evacuation failure, we
|
||||
// we shouldn't see a non-null stop point
|
||||
@ -735,12 +728,6 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
|
||||
(OopClosure*)&mux :
|
||||
(OopClosure*)&update_rs_oop_cl));
|
||||
|
||||
// Undirty the card.
|
||||
*card_ptr = CardTableModRefBS::clean_card_val();
|
||||
// We must complete this write before we do any of the reads below.
|
||||
OrderAccess::storeload();
|
||||
// And process it, being careful of unallocated portions of TLAB's.
|
||||
|
||||
// The region for the current card may be a young region. The
|
||||
// current card may have been a card that was evicted from the
|
||||
// card cache. When the card was inserted into the cache, we had
|
||||
@ -749,7 +736,7 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
|
||||
// and tagged as young.
|
||||
//
|
||||
// We wish to filter out cards for such a region but the current
|
||||
// thread, if we're running conucrrently, may "see" the young type
|
||||
// thread, if we're running concurrently, may "see" the young type
|
||||
// change at any time (so an earlier "is_young" check may pass or
|
||||
// fail arbitrarily). We tell the iteration code to perform this
|
||||
// filtering when it has been determined that there has been an actual
|
||||
@ -759,7 +746,8 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
|
||||
HeapWord* stop_point =
|
||||
r->oops_on_card_seq_iterate_careful(dirtyRegion,
|
||||
&filter_then_update_rs_oop_cl,
|
||||
filter_young);
|
||||
filter_young,
|
||||
card_ptr);
|
||||
|
||||
// If stop_point is non-null, then we encountered an unallocated region
|
||||
// (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the
|
||||
|
@ -311,7 +311,11 @@
|
||||
\
|
||||
develop(bool, G1ExitOnExpansionFailure, false, \
|
||||
"Raise a fatal VM exit out of memory failure in the event " \
|
||||
" that heap expansion fails due to running out of swap.")
|
||||
" that heap expansion fails due to running out of swap.") \
|
||||
\
|
||||
develop(uintx, G1ConcMarkForceOverflow, 0, \
|
||||
"The number of times we'll force an overflow during " \
|
||||
"concurrent marking")
|
||||
|
||||
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
|
||||
|
||||
|
@ -376,6 +376,17 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
|
||||
if (clear_space) clear(SpaceDecorator::Mangle);
|
||||
}
|
||||
|
||||
void HeapRegion::par_clear() {
|
||||
assert(used() == 0, "the region should have been already cleared");
|
||||
assert(capacity() == (size_t) HeapRegion::GrainBytes,
|
||||
"should be back to normal");
|
||||
HeapRegionRemSet* hrrs = rem_set();
|
||||
hrrs->clear();
|
||||
CardTableModRefBS* ct_bs =
|
||||
(CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
|
||||
ct_bs->clear(MemRegion(bottom(), end()));
|
||||
}
|
||||
|
||||
// <PREDICTION>
|
||||
void HeapRegion::calc_gc_efficiency() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
@ -600,7 +611,15 @@ HeapWord*
|
||||
HeapRegion::
|
||||
oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
FilterOutOfRegionClosure* cl,
|
||||
bool filter_young) {
|
||||
bool filter_young,
|
||||
jbyte* card_ptr) {
|
||||
// Currently, we should only have to clean the card if filter_young
|
||||
// is true and vice versa.
|
||||
if (filter_young) {
|
||||
assert(card_ptr != NULL, "pre-condition");
|
||||
} else {
|
||||
assert(card_ptr == NULL, "pre-condition");
|
||||
}
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
// If we're within a stop-world GC, then we might look at a card in a
|
||||
@ -626,6 +645,15 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
|
||||
assert(!is_young(), "check value of filter_young");
|
||||
|
||||
// We can only clean the card here, after we make the decision that
|
||||
// the card is not young. And we only clean the card if we have been
|
||||
// asked to (i.e., card_ptr != NULL).
|
||||
if (card_ptr != NULL) {
|
||||
*card_ptr = CardTableModRefBS::clean_card_val();
|
||||
// We must complete this write before we do any of the reads below.
|
||||
OrderAccess::storeload();
|
||||
}
|
||||
|
||||
// We used to use "block_start_careful" here. But we're actually happy
|
||||
// to update the BOT while we do this...
|
||||
HeapWord* cur = block_start(mr.start());
|
||||
|
@ -584,6 +584,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
|
||||
// Reset HR stuff to default values.
|
||||
void hr_clear(bool par, bool clear_space);
|
||||
void par_clear();
|
||||
|
||||
void initialize(MemRegion mr, bool clear_space, bool mangle_space);
|
||||
|
||||
@ -802,12 +803,16 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
HeapWord*
|
||||
object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
|
||||
|
||||
// In this version - if filter_young is true and the region
|
||||
// is a young region then we skip the iteration.
|
||||
// filter_young: if true and the region is a young region then we
|
||||
// skip the iteration.
|
||||
// card_ptr: if not NULL, and we decide that the card is not young
|
||||
// and we iterate over it, we'll clean the card before we start the
|
||||
// iteration.
|
||||
HeapWord*
|
||||
oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
FilterOutOfRegionClosure* cl,
|
||||
bool filter_young);
|
||||
bool filter_young,
|
||||
jbyte* card_ptr);
|
||||
|
||||
// A version of block start that is guaranteed to find *some* block
|
||||
// boundary at or before "p", but does not object iteration, and may
|
||||
|
@ -224,6 +224,12 @@ void PSOldGen::expand(size_t bytes) {
|
||||
const size_t alignment = virtual_space()->alignment();
|
||||
size_t aligned_bytes = align_size_up(bytes, alignment);
|
||||
size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
|
||||
|
||||
if (UseNUMA) {
|
||||
// With NUMA we use round-robin page allocation for the old gen. Expand by at least
|
||||
// providing a page per lgroup. Alignment is larger or equal to the page size.
|
||||
aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
|
||||
}
|
||||
if (aligned_bytes == 0){
|
||||
// The alignment caused the number of bytes to wrap. An expand_by(0) will
|
||||
// return true with the implication that and expansion was done when it
|
||||
|
@ -327,6 +327,7 @@ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle res
|
||||
|
||||
// 1. check if klass is not interface
|
||||
if (resolved_klass->is_interface()) {
|
||||
ResourceMark rm(THREAD);
|
||||
char buf[200];
|
||||
jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected", Klass::cast(resolved_klass())->external_name());
|
||||
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
|
||||
@ -413,6 +414,7 @@ void LinkResolver::resolve_interface_method(methodHandle& resolved_method,
|
||||
|
||||
// check if klass is interface
|
||||
if (!resolved_klass->is_interface()) {
|
||||
ResourceMark rm(THREAD);
|
||||
char buf[200];
|
||||
jio_snprintf(buf, sizeof(buf), "Found class %s, but interface was expected", Klass::cast(resolved_klass())->external_name());
|
||||
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
|
||||
@ -534,6 +536,7 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo
|
||||
|
||||
// check for errors
|
||||
if (is_static != fd.is_static()) {
|
||||
ResourceMark rm(THREAD);
|
||||
char msg[200];
|
||||
jio_snprintf(msg, sizeof(msg), "Expected %s field %s.%s", is_static ? "static" : "non-static", Klass::cast(resolved_klass())->external_name(), fd.name()->as_C_string());
|
||||
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), msg);
|
||||
@ -631,6 +634,7 @@ void LinkResolver::linktime_resolve_static_method(methodHandle& resolved_method,
|
||||
|
||||
// check if static
|
||||
if (!resolved_method->is_static()) {
|
||||
ResourceMark rm(THREAD);
|
||||
char buf[200];
|
||||
jio_snprintf(buf, sizeof(buf), "Expected static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
|
||||
resolved_method->name(),
|
||||
@ -671,6 +675,7 @@ void LinkResolver::linktime_resolve_special_method(methodHandle& resolved_method
|
||||
|
||||
// check if not static
|
||||
if (resolved_method->is_static()) {
|
||||
ResourceMark rm(THREAD);
|
||||
char buf[200];
|
||||
jio_snprintf(buf, sizeof(buf),
|
||||
"Expecting non-static method %s",
|
||||
@ -717,6 +722,7 @@ void LinkResolver::runtime_resolve_special_method(CallInfo& result, methodHandle
|
||||
|
||||
// check if not static
|
||||
if (sel_method->is_static()) {
|
||||
ResourceMark rm(THREAD);
|
||||
char buf[200];
|
||||
jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
|
||||
resolved_method->name(),
|
||||
@ -757,6 +763,7 @@ void LinkResolver::linktime_resolve_virtual_method(methodHandle &resolved_method
|
||||
|
||||
// check if not static
|
||||
if (resolved_method->is_static()) {
|
||||
ResourceMark rm(THREAD);
|
||||
char buf[200];
|
||||
jio_snprintf(buf, sizeof(buf), "Expecting non-static method %s", methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
|
||||
resolved_method->name(),
|
||||
@ -873,6 +880,7 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
|
||||
|
||||
// check if receiver klass implements the resolved interface
|
||||
if (!recv_klass->is_subtype_of(resolved_klass())) {
|
||||
ResourceMark rm(THREAD);
|
||||
char buf[200];
|
||||
jio_snprintf(buf, sizeof(buf), "Class %s does not implement the requested interface %s",
|
||||
(Klass::cast(recv_klass()))->external_name(),
|
||||
|
@ -44,6 +44,14 @@ void* CHeapObj::operator new(size_t size){
|
||||
return (void *) AllocateHeap(size, "CHeapObj-new");
|
||||
}
|
||||
|
||||
void* CHeapObj::operator new (size_t size, const std::nothrow_t& nothrow_constant) {
|
||||
char* p = (char*) os::malloc(size);
|
||||
#ifdef ASSERT
|
||||
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
|
||||
#endif
|
||||
return p;
|
||||
}
|
||||
|
||||
void CHeapObj::operator delete(void* p){
|
||||
FreeHeap(p);
|
||||
}
|
||||
|
@ -34,6 +34,8 @@
|
||||
#include "opto/c2_globals.hpp"
|
||||
#endif
|
||||
|
||||
#include <new>
|
||||
|
||||
#define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
|
||||
#define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
|
||||
#define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
|
||||
@ -99,6 +101,7 @@ class AllocatedObj {
|
||||
class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
|
||||
public:
|
||||
void* operator new(size_t size);
|
||||
void* operator new (size_t size, const std::nothrow_t& nothrow_constant);
|
||||
void operator delete(void* p);
|
||||
void* new_array(size_t size);
|
||||
};
|
||||
|
@ -652,43 +652,37 @@ void CardTableModRefBS::verify() {
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
class GuaranteeNotModClosure: public MemRegionClosure {
|
||||
CardTableModRefBS* _ct;
|
||||
public:
|
||||
GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {}
|
||||
void do_MemRegion(MemRegion mr) {
|
||||
jbyte* entry = _ct->byte_for(mr.start());
|
||||
guarantee(*entry != CardTableModRefBS::clean_card,
|
||||
"Dirty card in region that should be clean");
|
||||
void CardTableModRefBS::verify_region(MemRegion mr,
|
||||
jbyte val, bool val_equals) {
|
||||
jbyte* start = byte_for(mr.start());
|
||||
jbyte* end = byte_for(mr.last());
|
||||
bool failures = false;
|
||||
for (jbyte* curr = start; curr <= end; ++curr) {
|
||||
jbyte curr_val = *curr;
|
||||
bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
|
||||
if (failed) {
|
||||
if (!failures) {
|
||||
tty->cr();
|
||||
tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]");
|
||||
tty->print_cr("== %sexpecting value: %d",
|
||||
(val_equals) ? "" : "not ", val);
|
||||
failures = true;
|
||||
}
|
||||
tty->print_cr("== card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
|
||||
"val: %d", curr, addr_for(curr),
|
||||
(HeapWord*) (((size_t) addr_for(curr)) + card_size),
|
||||
(int) curr_val);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void CardTableModRefBS::verify_clean_region(MemRegion mr) {
|
||||
GuaranteeNotModClosure blk(this);
|
||||
non_clean_card_iterate_serial(mr, &blk);
|
||||
guarantee(!failures, "there should not have been any failures");
|
||||
}
|
||||
|
||||
// To verify a MemRegion is entirely dirty this closure is passed to
|
||||
// dirty_card_iterate. If the region is dirty do_MemRegion will be
|
||||
// invoked only once with a MemRegion equal to the one being
|
||||
// verified.
|
||||
class GuaranteeDirtyClosure: public MemRegionClosure {
|
||||
CardTableModRefBS* _ct;
|
||||
MemRegion _mr;
|
||||
bool _result;
|
||||
public:
|
||||
GuaranteeDirtyClosure(CardTableModRefBS* ct, MemRegion mr)
|
||||
: _ct(ct), _mr(mr), _result(false) {}
|
||||
void do_MemRegion(MemRegion mr) {
|
||||
_result = _mr.equals(mr);
|
||||
}
|
||||
bool result() const { return _result; }
|
||||
};
|
||||
void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
|
||||
verify_region(mr, dirty_card, false /* val_equals */);
|
||||
}
|
||||
|
||||
void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
|
||||
GuaranteeDirtyClosure blk(this, mr);
|
||||
dirty_card_iterate(mr, &blk);
|
||||
guarantee(blk.result(), "Non-dirty cards in region that should be dirty");
|
||||
verify_region(mr, dirty_card, true /* val_equals */);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -475,7 +475,10 @@ public:
|
||||
void verify();
|
||||
void verify_guard();
|
||||
|
||||
void verify_clean_region(MemRegion mr) PRODUCT_RETURN;
|
||||
// val_equals -> it will check that all cards covered by mr equal val
|
||||
// !val_equals -> it will check that all cards covered by mr do not equal val
|
||||
void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
|
||||
void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
|
||||
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
|
||||
|
||||
static size_t par_chunk_heapword_alignment() {
|
||||
|
@ -265,8 +265,6 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
|
||||
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
|
||||
|
||||
always_do_update_barrier = UseConcMarkSweepGC;
|
||||
BlockOffsetArrayUseUnallocatedBlock =
|
||||
BlockOffsetArrayUseUnallocatedBlock || ParallelGCThreads > 0;
|
||||
|
||||
// Check validity of heap flags
|
||||
assert(OldSize % min_alignment() == 0, "old space alignment");
|
||||
|
@ -100,12 +100,6 @@ public:
|
||||
// Pass along the argument to the superclass.
|
||||
ModRefBarrierSet(int max_covered_regions) :
|
||||
BarrierSet(max_covered_regions) {}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Verifies that the given region contains no modified references.
|
||||
virtual void verify_clean_region(MemRegion mr) = 0;
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_MEMORY_MODREFBARRIERSET_HPP
|
||||
|
@ -280,10 +280,8 @@
|
||||
<!ELEMENT externallink (#PCDATA|jvmti|code|i|b|tm)*>
|
||||
<!ATTLIST externallink id CDATA #REQUIRED>
|
||||
|
||||
<!ELEMENT vmspeclink EMPTY>
|
||||
<!ATTLIST vmspeclink id CDATA #IMPLIED>
|
||||
<!ATTLIST vmspeclink name CDATA #IMPLIED>
|
||||
<!ATTLIST vmspeclink preposition CDATA #IMPLIED>
|
||||
<!ELEMENT vmspec EMPTY>
|
||||
<!ATTLIST vmspec chapter CDATA #IMPLIED>
|
||||
|
||||
<!ELEMENT internallink (#PCDATA|jvmti|code|i|b)*>
|
||||
<!ATTLIST internallink id CDATA #REQUIRED>
|
||||
@ -2285,9 +2283,8 @@ jvmtiEnv *jvmti;
|
||||
Stack frames are referenced by depth.
|
||||
The frame at depth zero is the current frame.
|
||||
<p/>
|
||||
Stack frames are as described in the
|
||||
<vmspeclink id="Overview.doc.html#17257"
|
||||
name="Frames section"/>.
|
||||
Stack frames are as described in
|
||||
<vmspec chapter="3.6"/>,
|
||||
That is, they correspond to method
|
||||
invocations (including native methods) but do not correspond to platform native or
|
||||
VM internal frames.
|
||||
@ -2627,7 +2624,7 @@ err = (*jvmti)->Deallocate(jvmti, stack_info);
|
||||
<param id="use_java_stack">
|
||||
<jboolean/>
|
||||
<description>
|
||||
Return the stack showing the <vmspeclink/>
|
||||
Return the stack showing <vmspec/>
|
||||
model of the stack;
|
||||
otherwise, show the internal representation of the stack with
|
||||
inlined and optimized methods missing. If the virtual machine
|
||||
@ -2707,7 +2704,7 @@ err = (*jvmti)->Deallocate(jvmti, stack_info);
|
||||
When the thread is resumed, the execution
|
||||
state of the thread is reset to the state
|
||||
immediately before the called method was invoked.
|
||||
That is (using the <vmspeclink/> terminology):
|
||||
That is (using <vmspec/> terminology):
|
||||
<ul>
|
||||
<li>the current frame is discarded as the previous frame becomes the current one</li>
|
||||
<li>the operand stack is restored--the argument values are added back
|
||||
@ -2868,9 +2865,8 @@ err = (*jvmti)->Deallocate(jvmti, stack_info);
|
||||
to return at any point during its execution.
|
||||
The method which will return early is referred to as the <i>called method</i>.
|
||||
The called method is the current method
|
||||
(as defined by the
|
||||
<vmspeclink id="Overview.doc.html#17257"
|
||||
name="Frames section"/>)
|
||||
(as defined by
|
||||
<vmspec chapter="3.6"/>)
|
||||
for the specified thread at
|
||||
the time the function is called.
|
||||
<p/>
|
||||
@ -3576,10 +3572,8 @@ class C2 extends C1 implements I2 {
|
||||
<field id="index">
|
||||
<jint/>
|
||||
<description>
|
||||
The index into the constant pool of the class. See the
|
||||
<vmspeclink id="ClassFile.doc.html#20080"
|
||||
name="Constant Pool section"/>
|
||||
description.
|
||||
The index into the constant pool of the class. See the description in
|
||||
<vmspec chapter="4.4"/>.
|
||||
</description>
|
||||
</field>
|
||||
</typedef>
|
||||
@ -5006,9 +5000,8 @@ class C2 extends C1 implements I2 {
|
||||
For references of this kind the <code>referrer_index</code>
|
||||
parameter to the <internallink id="jvmtiObjectReferenceCallback">
|
||||
jvmtiObjectReferenceCallback</internallink> is the index into
|
||||
constant pool table of the class, starting at 1. See the
|
||||
<vmspeclink id="ClassFile.doc.html#20080"
|
||||
name="Constant Pool section"/>
|
||||
constant pool table of the class, starting at 1. See
|
||||
<vmspec chapter="4.4"/>.
|
||||
</constant>
|
||||
</constants>
|
||||
|
||||
@ -6441,9 +6434,7 @@ class C2 extends C1 implements I2 {
|
||||
been recorded as an initiating loader. Each
|
||||
class in the returned array was created by this class loader,
|
||||
either by defining it directly or by delegation to another class loader.
|
||||
See the
|
||||
<vmspeclink id="ConstantPool.doc.html#72007"
|
||||
name="Creation and Loading section"/>.
|
||||
See <vmspec chapter="5.3"/>.
|
||||
<p/>
|
||||
For JDK version 1.1 implementations that don't
|
||||
recognize the distinction between initiating and defining class loaders,
|
||||
@ -6626,9 +6617,7 @@ class C2 extends C1 implements I2 {
|
||||
For the class indicated by <code>klass</code>, return the access
|
||||
flags
|
||||
via <code>modifiers_ptr</code>.
|
||||
Access flags are defined in the
|
||||
<vmspeclink id="ClassFile.doc.html"
|
||||
name="Class File Format chapter"/>.
|
||||
Access flags are defined in <vmspec chapter="4"/>.
|
||||
<p/>
|
||||
If the class is an array class, then its public, private, and protected
|
||||
modifiers are the same as those of its component type. For arrays of
|
||||
@ -6794,9 +6783,8 @@ class C2 extends C1 implements I2 {
|
||||
<description>
|
||||
For the class indicated by <code>klass</code>,
|
||||
return the minor and major version numbers,
|
||||
as defined in the
|
||||
<vmspeclink id="ClassFile.doc.html"
|
||||
name="Class File Format chapter"/>.
|
||||
as defined in
|
||||
<vmspec chapter="4"/>.
|
||||
</description>
|
||||
<origin>new</origin>
|
||||
<capabilities>
|
||||
@ -6839,10 +6827,8 @@ class C2 extends C1 implements I2 {
|
||||
<description>
|
||||
For the class indicated by <code>klass</code>,
|
||||
return the raw bytes of the constant pool in the format of the
|
||||
<code>constant_pool</code> item of the
|
||||
<vmspeclink id="ClassFile.doc.html"
|
||||
name="Class File Format"
|
||||
preposition="in"/>.
|
||||
<code>constant_pool</code> item of
|
||||
<vmspec chapter="4"/>.
|
||||
The format of the constant pool may differ between versions
|
||||
of the Class File Format, so, the
|
||||
<functionlink id="GetClassVersionNumbers">minor and major
|
||||
@ -7286,9 +7272,7 @@ class C2 extends C1 implements I2 {
|
||||
<field id="class_bytes">
|
||||
<inbuf incount="class_byte_count"><uchar/></inbuf>
|
||||
<description>
|
||||
Bytes defining class (in the
|
||||
<vmspeclink id="ClassFile.doc.html"
|
||||
name="Class File Format"/>)
|
||||
Bytes defining class (in <vmspec chapter="4"/>)
|
||||
</description>
|
||||
</field>
|
||||
</typedef>
|
||||
@ -7611,10 +7595,8 @@ class C2 extends C1 implements I2 {
|
||||
<paramlink id="signature_ptr"/>.
|
||||
<p/>
|
||||
Field signatures are defined in the JNI Specification and
|
||||
are referred to as
|
||||
<vmspeclink id="ClassFile.doc.html#14152"
|
||||
name="field descriptors"
|
||||
preposition="in"/>.
|
||||
are referred to as <code>field descriptors</code> in
|
||||
<vmspec chapter="4.3.2"/>.
|
||||
</description>
|
||||
<origin>jvmdiClone</origin>
|
||||
<capabilities>
|
||||
@ -7709,9 +7691,7 @@ class C2 extends C1 implements I2 {
|
||||
<description>
|
||||
For the field indicated by <code>klass</code> and <code>field</code>
|
||||
return the access flags via <code>modifiers_ptr</code>.
|
||||
Access flags are defined in the
|
||||
<vmspeclink id="ClassFile.doc.html"
|
||||
name="Class File Format chapter"/>.
|
||||
Access flags are defined in <vmspec chapter="4"/>.
|
||||
</description>
|
||||
<origin>jvmdi</origin>
|
||||
<capabilities>
|
||||
@ -7810,10 +7790,9 @@ class C2 extends C1 implements I2 {
|
||||
return the method name via <code>name_ptr</code> and method signature via
|
||||
<code>signature_ptr</code>.
|
||||
<p/>
|
||||
Method signatures are defined in the JNI Specification and are referred to as
|
||||
<vmspeclink id="ClassFile.doc.html#7035"
|
||||
name="method descriptors"
|
||||
preposition="in"/>.
|
||||
Method signatures are defined in the JNI Specification and are
|
||||
referred to as <code>method descriptors</code> in
|
||||
<vmspec chapter="4.3.3"/>.
|
||||
Note this is different
|
||||
than method signatures as defined in the <i>Java Language Specification</i>.
|
||||
</description>
|
||||
@ -7902,9 +7881,7 @@ class C2 extends C1 implements I2 {
|
||||
<description>
|
||||
For the method indicated by <code>method</code>,
|
||||
return the access flags via <code>modifiers_ptr</code>.
|
||||
Access flags are defined in the
|
||||
<vmspeclink id="ClassFile.doc.html"
|
||||
name="Class File Format chapter"/>.
|
||||
Access flags are defined in <vmspec chapter="4"/>.
|
||||
</description>
|
||||
<origin>jvmdi</origin>
|
||||
<capabilities>
|
||||
@ -7941,9 +7918,7 @@ class C2 extends C1 implements I2 {
|
||||
including the local variables used to pass parameters to the
|
||||
method on its invocation.
|
||||
<p/>
|
||||
See <code>max_locals</code> in the
|
||||
<vmspeclink id="ClassFile.doc.html#1546"
|
||||
name="Code Attribute section"/>.
|
||||
See <code>max_locals</code> in <vmspec chapter="4.7.3"/>.
|
||||
</description>
|
||||
<origin>jvmdi</origin>
|
||||
<capabilities>
|
||||
@ -8150,8 +8125,7 @@ class C2 extends C1 implements I2 {
|
||||
The local variable's type signature, encoded as a
|
||||
<internallink id="mUTF">modified UTF-8</internallink> string.
|
||||
The signature format is the same as that defined in
|
||||
<vmspeclink id="ClassFile.doc.html#14152"
|
||||
name="Field Descriptors section"/>
|
||||
<vmspec chapter="4.3.2"/>.
|
||||
</description>
|
||||
</field>
|
||||
<field id="generic_signature">
|
||||
@ -10460,10 +10434,7 @@ myInit() {
|
||||
<synopsis>Add To Bootstrap Class Loader Search</synopsis>
|
||||
<description>
|
||||
This function can be used to cause instrumentation classes to be defined by the
|
||||
bootstrap class loader. See
|
||||
<vmspeclink id="ConstantPool.doc.html#79383"
|
||||
name="Loading Using the Bootstrap Class Loader"
|
||||
preposition="in"/>.
|
||||
bootstrap class loader. See <vmspec chapter="5.3.1"/>.
|
||||
After the bootstrap
|
||||
class loader unsuccessfully searches for a class, the specified platform-dependent
|
||||
search path <paramlink id="segment"/> will be searched as well. Only one segment may be specified in
|
||||
@ -10480,7 +10451,7 @@ myInit() {
|
||||
contain any classes or resources other than those to be defined by the bootstrap
|
||||
class loader for the purposes of instrumentation.
|
||||
<p/>
|
||||
The <vmspeclink/> specifies that a subsequent attempt to resolve a symbolic
|
||||
<vmspec/> specifies that a subsequent attempt to resolve a symbolic
|
||||
reference that the Java virtual machine has previously unsuccessfully attempted
|
||||
to resolve always fails with the same error that was thrown as a result of the
|
||||
initial resolution attempt. Consequently, if the JAR file contains an entry
|
||||
@ -10512,10 +10483,7 @@ myInit() {
|
||||
<synopsis>Add To System Class Loader Search</synopsis>
|
||||
<description>
|
||||
This function can be used to cause instrumentation classes to be
|
||||
defined by the system class loader. See
|
||||
<vmspeclink id="ConstantPool.doc.html#79441"
|
||||
name="Loading Using a User-defined Class Loader"
|
||||
preposition="in"/>.
|
||||
defined by the system class loader. See <vmspec chapter="5.3.2"/>.
|
||||
After the class loader unsuccessfully searches for a class, the specified platform-dependent search
|
||||
path <paramlink id="segment"/> will be searched as well. Only one segment may be specified in the
|
||||
<paramlink id="segment"/>. This function may be called multiple times to add multiple segments, the
|
||||
@ -10536,7 +10504,7 @@ myInit() {
|
||||
which takes a single parameter of type <code>java.lang.String</code>. The method is not required
|
||||
to have <code>public</code> access.
|
||||
<p/>
|
||||
The <vmspeclink/> specifies that a subsequent attempt to resolve a symbolic
|
||||
<vmspec/> specifies that a subsequent attempt to resolve a symbolic
|
||||
reference that the Java virtual machine has previously unsuccessfully attempted
|
||||
to resolve always fails with the same error that was thrown as a result of the
|
||||
initial resolution attempt. Consequently, if the JAR file contains an entry
|
||||
@ -11438,7 +11406,7 @@ myInit() {
|
||||
at the finest granularity allowed by the VM. A single step event is
|
||||
generated whenever a thread reaches a new location.
|
||||
Typically, single step events represent the completion of one VM
|
||||
instruction as defined in the <vmspeclink/>. However, some implementations
|
||||
instruction as defined in <vmspec/>. However, some implementations
|
||||
may define locations differently. In any case the
|
||||
<code>method</code> and <code>location</code>
|
||||
parameters uniquely identify the current location and allow
|
||||
@ -13841,7 +13809,7 @@ typedef void (JNICALL *jvmtiEventVMInit)
|
||||
and can_get_source_debug_extension.
|
||||
PopFrame cannot have a native calling method.
|
||||
Removed incorrect statement in GetClassloaderClasses
|
||||
(see http://java.sun.com/docs/books/vmspec/2nd-edition/html/ConstantPool.doc.html#79383).
|
||||
(see <vmspec chapter="4.4"/>).
|
||||
</change>
|
||||
<change date="24 July 2003" version="v79">
|
||||
XML and text fixes.
|
||||
|
@ -1039,34 +1039,14 @@ typedef struct {
|
||||
</a>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match="vmspeclink">
|
||||
<xsl:if test="count(@id)=1">
|
||||
<a>
|
||||
<xsl:attribute name="href">
|
||||
<xsl:text>http://java.sun.com/docs/books/vmspec/2nd-edition/html/</xsl:text>
|
||||
<xsl:value-of select="@id"/>
|
||||
</xsl:attribute>
|
||||
<xsl:value-of select="@name"/>
|
||||
</a>
|
||||
<xsl:text> </xsl:text>
|
||||
<xsl:choose>
|
||||
<xsl:when test="count(@preposition)=1">
|
||||
<xsl:value-of select="@preposition"/>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
<xsl:text>of</xsl:text>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
<xsl:text> the </xsl:text>
|
||||
</xsl:if>
|
||||
<a>
|
||||
<xsl:attribute name="href">
|
||||
<xsl:text>http://java.sun.com/docs/books/vmspec/</xsl:text>
|
||||
</xsl:attribute>
|
||||
<i>
|
||||
<xsl:text>Java Virtual Machine Specification</xsl:text>
|
||||
</i>
|
||||
</a>
|
||||
<xsl:template match="vmspec">
|
||||
<cite>
|
||||
<xsl:text>The Java™ Virtual Machine Specification</xsl:text>
|
||||
<xsl:if test="count(@chapter)=1">
|
||||
<xsl:text>, Chapter </xsl:text>
|
||||
<xsl:value-of select="@chapter"/>
|
||||
</xsl:if>
|
||||
</cite>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match="internallink">
|
||||
|
@ -1804,6 +1804,8 @@ void JvmtiExport::post_compiled_method_load(JvmtiEnv* env, const jmethodID metho
|
||||
}
|
||||
|
||||
void JvmtiExport::post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) {
|
||||
assert(name != NULL && name[0] != '\0', "sanity check");
|
||||
|
||||
JavaThread* thread = JavaThread::current();
|
||||
// In theory everyone coming thru here is in_vm but we need to be certain
|
||||
// because a callee will do a vm->native transition
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
#include "runtime/javaCalls.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/serviceThread.hpp"
|
||||
#include "runtime/signature.hpp"
|
||||
#include "runtime/vframe.hpp"
|
||||
@ -939,10 +940,15 @@ JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
|
||||
nmethodLocker::lock_nmethod(nm, true /* zombie_ok */);
|
||||
return event;
|
||||
}
|
||||
|
||||
JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
|
||||
const char* name, const void* code_begin, const void* code_end) {
|
||||
JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED);
|
||||
event._event_data.dynamic_code_generated.name = name;
|
||||
// Need to make a copy of the name since we don't know how long
|
||||
// the event poster will keep it around after we enqueue the
|
||||
// deferred event and return. strdup() failure is handled in
|
||||
// the post() routine below.
|
||||
event._event_data.dynamic_code_generated.name = os::strdup(name);
|
||||
event._event_data.dynamic_code_generated.code_begin = code_begin;
|
||||
event._event_data.dynamic_code_generated.code_end = code_end;
|
||||
return event;
|
||||
@ -968,12 +974,19 @@ void JvmtiDeferredEvent::post() {
|
||||
nmethodLocker::unlock_nmethod(nm);
|
||||
break;
|
||||
}
|
||||
case TYPE_DYNAMIC_CODE_GENERATED:
|
||||
case TYPE_DYNAMIC_CODE_GENERATED: {
|
||||
JvmtiExport::post_dynamic_code_generated_internal(
|
||||
_event_data.dynamic_code_generated.name,
|
||||
// if strdup failed give the event a default name
|
||||
(_event_data.dynamic_code_generated.name == NULL)
|
||||
? "unknown_code" : _event_data.dynamic_code_generated.name,
|
||||
_event_data.dynamic_code_generated.code_begin,
|
||||
_event_data.dynamic_code_generated.code_end);
|
||||
if (_event_data.dynamic_code_generated.name != NULL) {
|
||||
// release our copy
|
||||
os::free((void *)_event_data.dynamic_code_generated.name);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
@ -960,7 +960,7 @@ void Arguments::set_mode_flags(Mode mode) {
|
||||
// Ensure Agent_OnLoad has the correct initial values.
|
||||
// This may not be the final mode; mode may change later in onload phase.
|
||||
PropertyList_unique_add(&_system_properties, "java.vm.info",
|
||||
(char*)Abstract_VM_Version::vm_info_string(), false);
|
||||
(char*)VM_Version::vm_info_string(), false);
|
||||
|
||||
UseInterpreter = true;
|
||||
UseCompiler = true;
|
||||
@ -1423,6 +1423,11 @@ void Arguments::set_parallel_gc_flags() {
|
||||
}
|
||||
}
|
||||
}
|
||||
if (UseNUMA) {
|
||||
if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
|
||||
FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Arguments::set_g1_gc_flags() {
|
||||
@ -2379,7 +2384,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
|
||||
_gc_log_filename = strdup(tail);
|
||||
FLAG_SET_CMDLINE(bool, PrintGC, true);
|
||||
FLAG_SET_CMDLINE(bool, PrintGCTimeStamps, true);
|
||||
FLAG_SET_CMDLINE(bool, TraceClassUnloading, true);
|
||||
|
||||
// JNI hooks
|
||||
} else if (match_option(option, "-Xcheck", &tail)) {
|
||||
|
@ -1827,7 +1827,7 @@ class CommandLineFlags {
|
||||
develop(bool, VerifyBlockOffsetArray, false, \
|
||||
"Do (expensive!) block offset array verification") \
|
||||
\
|
||||
product(bool, BlockOffsetArrayUseUnallocatedBlock, false, \
|
||||
diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \
|
||||
"Maintain _unallocated_block in BlockOffsetArray" \
|
||||
" (currently applicable only to CMS collector)") \
|
||||
\
|
||||
@ -2882,7 +2882,7 @@ class CommandLineFlags {
|
||||
"Max. no. of lines in the stack trace for Java exceptions " \
|
||||
"(0 means all)") \
|
||||
\
|
||||
NOT_EMBEDDED(develop(intx, GuaranteedSafepointInterval, 1000, \
|
||||
NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000, \
|
||||
"Guarantee a safepoint (at least) every so many milliseconds " \
|
||||
"(0 means none)")) \
|
||||
\
|
||||
|
@ -274,7 +274,7 @@ class os: AllStatic {
|
||||
static char* reserve_memory_special(size_t size, char* addr = NULL,
|
||||
bool executable = false);
|
||||
static bool release_memory_special(char* addr, size_t bytes);
|
||||
static bool large_page_init();
|
||||
static void large_page_init();
|
||||
static size_t large_page_size();
|
||||
static bool can_commit_large_page_memory();
|
||||
static bool can_execute_large_page_memory();
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <limits.h>
|
||||
#include <new>
|
||||
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "utilities/decoder.hpp"
|
||||
@ -46,7 +47,7 @@ ElfFile::ElfFile(const char* filepath) {
|
||||
m_status = Decoder::no_error;
|
||||
|
||||
int len = strlen(filepath) + 1;
|
||||
m_filepath = NEW_C_HEAP_ARRAY(char, len);
|
||||
m_filepath = (const char*)os::malloc(len * sizeof(char));
|
||||
if (m_filepath != NULL) {
|
||||
strcpy((char*)m_filepath, filepath);
|
||||
m_file = fopen(filepath, "r");
|
||||
@ -74,7 +75,7 @@ ElfFile::~ElfFile() {
|
||||
}
|
||||
|
||||
if (m_filepath != NULL) {
|
||||
FREE_C_HEAP_ARRAY(char, m_filepath);
|
||||
os::free((void*)m_filepath);
|
||||
}
|
||||
|
||||
if (m_next != NULL) {
|
||||
@ -120,14 +121,14 @@ bool ElfFile::load_tables() {
|
||||
}
|
||||
// string table
|
||||
if (shdr.sh_type == SHT_STRTAB) {
|
||||
ElfStringTable* table = new ElfStringTable(m_file, shdr, index);
|
||||
ElfStringTable* table = new (std::nothrow) ElfStringTable(m_file, shdr, index);
|
||||
if (table == NULL) {
|
||||
m_status = Decoder::out_of_memory;
|
||||
return false;
|
||||
}
|
||||
add_string_table(table);
|
||||
} else if (shdr.sh_type == SHT_SYMTAB || shdr.sh_type == SHT_DYNSYM) {
|
||||
ElfSymbolTable* table = new ElfSymbolTable(m_file, shdr);
|
||||
ElfSymbolTable* table = new (std::nothrow) ElfSymbolTable(m_file, shdr);
|
||||
if (table == NULL) {
|
||||
m_status = Decoder::out_of_memory;
|
||||
return false;
|
||||
|
@ -27,6 +27,7 @@
|
||||
#ifndef _WINDOWS
|
||||
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/elfStringTable.hpp"
|
||||
|
||||
// We will try to load whole string table into memory if we can.
|
||||
@ -41,14 +42,14 @@ ElfStringTable::ElfStringTable(FILE* file, Elf_Shdr shdr, int index) {
|
||||
|
||||
// try to load the string table
|
||||
long cur_offset = ftell(file);
|
||||
m_table = (char*)NEW_C_HEAP_ARRAY(char, shdr.sh_size);
|
||||
m_table = (char*)os::malloc(sizeof(char) * shdr.sh_size);
|
||||
if (m_table != NULL) {
|
||||
// if there is an error, mark the error
|
||||
if (fseek(file, shdr.sh_offset, SEEK_SET) ||
|
||||
fread((void*)m_table, shdr.sh_size, 1, file) != 1 ||
|
||||
fseek(file, cur_offset, SEEK_SET)) {
|
||||
m_status = Decoder::file_invalid;
|
||||
FREE_C_HEAP_ARRAY(char, m_table);
|
||||
os::free((void*)m_table);
|
||||
m_table = NULL;
|
||||
}
|
||||
} else {
|
||||
@ -58,7 +59,7 @@ ElfStringTable::ElfStringTable(FILE* file, Elf_Shdr shdr, int index) {
|
||||
|
||||
ElfStringTable::~ElfStringTable() {
|
||||
if (m_table != NULL) {
|
||||
FREE_C_HEAP_ARRAY(char, m_table);
|
||||
os::free((void*)m_table);
|
||||
}
|
||||
|
||||
if (m_next != NULL) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user