This commit is contained in:
J. Duke 2017-07-05 20:11:04 +02:00
commit 7bcd68204b
402 changed files with 10327 additions and 4763 deletions

View File

@ -284,3 +284,4 @@ d42c0a90afc3c66ca87543076ec9aafd4b4680de jdk9-b38
512dbbeb1730edcebfec873fc3f1455660b32000 jdk9-b39
cf136458ee747e151a27aa9ea0c1492ea55ef3e7 jdk9-b40
67395f7ca2db3b52e3a62a84888487de5cb9210a jdk9-b41
f7c11da0b0481d49cc7a65a453336c108191e821 jdk9-b42

View File

@ -73,31 +73,25 @@ AC_DEFUN([BOOTJDK_DO_CHECK],
AC_MSG_NOTICE([(This might be an JRE instead of an JDK)])
BOOT_JDK_FOUND=no
else
# Do we have an rt.jar? (On MacOSX it is called classes.jar)
if test ! -f "$BOOT_JDK/jre/lib/rt.jar" && test ! -f "$BOOT_JDK/../Classes/classes.jar"; then
AC_MSG_NOTICE([Potential Boot JDK found at $BOOT_JDK did not contain an rt.jar; ignoring])
# Oh, this is looking good! We probably have found a proper JDK. Is it the correct version?
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
# Extra M4 quote needed to protect [] in grep expression.
[FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`]
if test "x$FOUND_CORRECT_VERSION" = x; then
AC_MSG_NOTICE([Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring])
AC_MSG_NOTICE([(Your Boot JDK must be version 8 or 9)])
BOOT_JDK_FOUND=no
else
# Oh, this is looking good! We probably have found a proper JDK. Is it the correct version?
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | head -n 1`
# Extra M4 quote needed to protect [] in grep expression.
[FOUND_CORRECT_VERSION=`echo $BOOT_JDK_VERSION | grep '\"1\.[89]\.'`]
if test "x$FOUND_CORRECT_VERSION" = x; then
AC_MSG_NOTICE([Potential Boot JDK found at $BOOT_JDK is incorrect JDK version ($BOOT_JDK_VERSION); ignoring])
AC_MSG_NOTICE([(Your Boot JDK must be version 8 or 9)])
BOOT_JDK_FOUND=no
else
# We're done! :-)
BOOT_JDK_FOUND=yes
BASIC_FIXUP_PATH(BOOT_JDK)
AC_MSG_CHECKING([for Boot JDK])
AC_MSG_RESULT([$BOOT_JDK])
AC_MSG_CHECKING([Boot JDK version])
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | $TR '\n\r' ' '`
AC_MSG_RESULT([$BOOT_JDK_VERSION])
fi # end check jdk version
fi # end check rt.jar
# We're done! :-)
BOOT_JDK_FOUND=yes
BASIC_FIXUP_PATH(BOOT_JDK)
AC_MSG_CHECKING([for Boot JDK])
AC_MSG_RESULT([$BOOT_JDK])
AC_MSG_CHECKING([Boot JDK version])
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java" -version 2>&1 | $TR '\n\r' ' '`
AC_MSG_RESULT([$BOOT_JDK_VERSION])
fi # end check jdk version
fi # end check javac
fi # end check java
fi # end check boot jdk found

File diff suppressed because it is too large Load Diff

View File

@ -284,3 +284,4 @@ ffd90c81d4ef9d94d880fc852e2fc482ecd9b374 jdk9-b36
53bf36cb722db50815712258a77cb6bbe25a2f5f jdk9-b39
e27c725d6c9d155667b35255f442d4ceb8c3c084 jdk9-b40
1908b886ba1eda46fa725cf1160fe5d30fd1a7e5 jdk9-b41
078bb11af876fe528d4b516f33ad4dd9bb60549e jdk9-b42

View File

@ -444,3 +444,4 @@ c363a8b87e477ee45d6d3cb2a36cb365141bc596 jdk9-b38
9cb75e5e394827ccbaf2e15524108a412dc4ddc5 jdk9-b39
6b09b3193d731e3288e2a240c504a20d0a06c766 jdk9-b40
1d29b13e8a515a7ea3b882f140576d5d675bc11f jdk9-b41
38cb4fbd47e3472bd1b5ebac83bda96fe4869c4f jdk9-b42

View File

@ -37,10 +37,7 @@ import sun.jvm.hotspot.runtime.*;
<ul>
<li> CardGeneration
<ul>
<li> OneContigSpaceCardGeneration
<ul>
<li> TenuredGeneration
</ul>
<li> TenuredGeneration
</ul>
<li> DefNewGeneration
</ul>

View File

@ -1,82 +0,0 @@
/*
* Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.memory;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
/** <P> OneSpaceOldGeneration models a heap of old objects contained
in a single contiguous space. </P>
<P> Garbage collection is performed using mark-compact. </P> */
public abstract class OneContigSpaceCardGeneration extends CardGeneration {
private static AddressField theSpaceField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("OneContigSpaceCardGeneration");
theSpaceField = type.getAddressField("_the_space");
}
public OneContigSpaceCardGeneration(Address addr) {
super(addr);
}
public ContiguousSpace theSpace() {
return (ContiguousSpace) VMObjectFactory.newObject(ContiguousSpace.class, theSpaceField.getValue(addr));
}
public boolean isIn(Address p) {
return theSpace().contains(p);
}
/** Space queries */
public long capacity() { return theSpace().capacity(); }
public long used() { return theSpace().used(); }
public long free() { return theSpace().free(); }
public long contiguousAvailable() { return theSpace().free() + virtualSpace().uncommittedSize(); }
public void spaceIterate(SpaceClosure blk, boolean usedOnly) {
blk.doSpace(theSpace());
}
public void printOn(PrintStream tty) {
tty.print(" old ");
theSpace().printOn(tty);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,13 +24,62 @@
package sun.jvm.hotspot.memory;
import sun.jvm.hotspot.debugger.*;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
/** <P> TenuredGeneration models a heap of old objects contained
in a single contiguous space. </P>
<P> Garbage collection is performed using mark-compact. </P> */
public class TenuredGeneration extends CardGeneration {
private static AddressField theSpaceField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("TenuredGeneration");
theSpaceField = type.getAddressField("_the_space");
}
public class TenuredGeneration extends OneContigSpaceCardGeneration {
public TenuredGeneration(Address addr) {
super(addr);
}
public ContiguousSpace theSpace() {
return (ContiguousSpace) VMObjectFactory.newObject(ContiguousSpace.class, theSpaceField.getValue(addr));
}
public boolean isIn(Address p) {
return theSpace().contains(p);
}
/** Space queries */
public long capacity() { return theSpace().capacity(); }
public long used() { return theSpace().used(); }
public long free() { return theSpace().free(); }
public long contiguousAvailable() { return theSpace().free() + virtualSpace().uncommittedSize(); }
public void spaceIterate(SpaceClosure blk, boolean usedOnly) {
blk.doSpace(theSpace());
}
public void printOn(PrintStream tty) {
tty.print(" old ");
theSpace().printOn(tty);
}
public Generation.Name kind() {
return Generation.Name.MARK_SWEEP_COMPACT;
}

View File

@ -226,18 +226,8 @@ SHARED_FLAG = -shared
# Allow no optimizations.
DEBUG_CFLAGS=-O0
# Use the stabs format for debugging information (this is the default
# on gcc-2.91). It's good enough, has all the information about line
# numbers and local variables, and libjvm.so is only about 16M.
# Change this back to "-g" if you want the most expressive format.
# (warning: that could easily inflate libjvm.so to 150M!)
# Note: The Itanium gcc compiler crashes when using -gstabs.
DEBUG_CFLAGS/ia64 = -g
DEBUG_CFLAGS/amd64 = -g
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
DEBUG_CFLAGS += -gstabs
endif
# Enable debug symbols
DEBUG_CFLAGS += -g
# Enable bounds checking.
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"

View File

@ -496,15 +496,6 @@ ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
FASTDEBUG_CFLAGS += -xs
endif
# Special global options for SS12
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
# There appears to be multiple issues with the new Dwarf2 debug format, so
# we tell the compiler to use the older 'stabs' debug format all the time.
# Note that this needs to be used in optimized compiles too to be 100%.
# This is a workaround for SS12 (5.9) bug 6694600
CFLAGS += -xdebugformat=stabs
endif
# Enable the following CFLAGS additions if you need to compare the
# built ELF objects.
#

View File

@ -570,10 +570,12 @@ public:
static uint cores_per_cpu() {
uint result = 1;
if (is_intel()) {
if (supports_processor_topology()) {
bool supports_topology = supports_processor_topology();
if (supports_topology) {
result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
_cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
} else {
}
if (!supports_topology || result == 0) {
result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
}
} else if (is_amd()) {

View File

@ -569,13 +569,13 @@ void os::init_system_properties_values() {
char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
Arguments::set_library_path(ld_library_path);
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
FREE_C_HEAP_ARRAY(char, ld_library_path);
// Extensions directories.
sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
FREE_C_HEAP_ARRAY(char, buf);
#undef DEFAULT_LIBPATH
#undef EXTENSIONS_DIR
@ -1300,11 +1300,11 @@ bool os::dll_build_name(char* buffer, size_t buflen,
// release the storage
for (int i = 0; i < n; i++) {
if (pelements[i] != NULL) {
FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
FREE_C_HEAP_ARRAY(char, pelements[i]);
}
}
if (pelements != NULL) {
FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
FREE_C_HEAP_ARRAY(char*, pelements);
}
} else {
snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
@ -4144,8 +4144,29 @@ int os::available(int fd, jlong *bytes) {
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
bool allow_exec) {
Unimplemented();
return NULL;
int prot;
int flags = MAP_PRIVATE;
if (read_only) {
prot = PROT_READ;
} else {
prot = PROT_READ | PROT_WRITE;
}
if (allow_exec) {
prot |= PROT_EXEC;
}
if (addr != NULL) {
flags |= MAP_FIXED;
}
char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
fd, file_offset);
if (mapped_address == MAP_FAILED) {
return NULL;
}
return mapped_address;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -127,7 +127,7 @@ static void save_memory_to_file(char* addr, size_t size) {
}
}
}
FREE_C_HEAP_ARRAY(char, destfile, mtInternal);
FREE_C_HEAP_ARRAY(char, destfile);
}
@ -279,14 +279,14 @@ static char* get_user_name(uid_t uid) {
"pw_name zero length");
}
}
FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, pwbuf);
return NULL;
}
char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1, mtInternal);
strcpy(user_name, p->pw_name);
FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, pwbuf);
return user_name;
}
@ -347,7 +347,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
DIR* subdirp = os::opendir(usrdir_name);
if (subdirp == NULL) {
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name);
continue;
}
@ -358,7 +358,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
// symlink can be exploited.
//
if (!is_directory_secure(usrdir_name)) {
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name);
os::closedir(subdirp);
continue;
}
@ -382,13 +382,13 @@ static char* get_user_name_slow(int vmid, TRAPS) {
// don't follow symbolic links for the file
RESTARTABLE(::lstat(filename, &statbuf), result);
if (result == OS_ERR) {
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
continue;
}
// skip over files that are not regular files.
if (!S_ISREG(statbuf.st_mode)) {
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
continue;
}
@ -398,7 +398,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
if (statbuf.st_ctime > oldest_ctime) {
char* user = strchr(dentry->d_name, '_') + 1;
if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user, mtInternal);
if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user);
oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal);
strcpy(oldest_user, user);
@ -406,15 +406,15 @@ static char* get_user_name_slow(int vmid, TRAPS) {
}
}
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
}
}
os::closedir(subdirp);
FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, udbuf);
FREE_C_HEAP_ARRAY(char, usrdir_name);
}
os::closedir(tmpdirp);
FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, tdbuf);
return(oldest_user);
}
@ -481,7 +481,7 @@ static void remove_file(const char* dirname, const char* filename) {
remove_file(path);
FREE_C_HEAP_ARRAY(char, path, mtInternal);
FREE_C_HEAP_ARRAY(char, path);
}
@ -558,7 +558,7 @@ static void cleanup_sharedmem_resources(const char* dirname) {
errno = 0;
}
os::closedir(dirp);
FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, dbuf);
}
// make the user specific temporary directory. Returns true if
@ -703,11 +703,11 @@ static char* mmap_create_shared(size_t size) {
fd = create_sharedmem_resources(dirname, filename, size);
FREE_C_HEAP_ARRAY(char, user_name, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, user_name);
FREE_C_HEAP_ARRAY(char, dirname);
if (fd == -1) {
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
return NULL;
}
@ -723,7 +723,7 @@ static char* mmap_create_shared(size_t size) {
warning("mmap failed - %s\n", strerror(errno));
}
remove_file(filename);
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
return NULL;
}
@ -769,7 +769,7 @@ static void delete_shared_memory(char* addr, size_t size) {
remove_file(backing_store_file_name);
// Don't.. Free heap memory could deadlock os::abort() if it is called
// from signal handler. OS will reclaim the heap memory.
// FREE_C_HEAP_ARRAY(char, backing_store_file_name, mtInternal);
// FREE_C_HEAP_ARRAY(char, backing_store_file_name);
backing_store_file_name = NULL;
}
}
@ -853,9 +853,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
// store file, we don't follow them when attaching either.
//
if (!is_directory_secure(dirname)) {
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname);
if (luser != user) {
FREE_C_HEAP_ARRAY(char, luser, mtInternal);
FREE_C_HEAP_ARRAY(char, luser);
}
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
@ -871,9 +871,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
strcpy(rfilename, filename);
// free the c heap resources that are no longer needed
if (luser != user) FREE_C_HEAP_ARRAY(char, luser, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
if (luser != user) FREE_C_HEAP_ARRAY(char, luser);
FREE_C_HEAP_ARRAY(char, dirname);
FREE_C_HEAP_ARRAY(char, filename);
// open the shared memory file for the give vmid
fd = open_sharedmem_file(rfilename, file_flags, CHECK);

View File

@ -416,14 +416,14 @@ void os::init_system_properties_values() {
mtInternal);
sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib/%s:" DEFAULT_LIBPATH, v, v_colon, cpu_arch);
Arguments::set_library_path(ld_library_path);
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
FREE_C_HEAP_ARRAY(char, ld_library_path);
}
// Extensions directories.
sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
FREE_C_HEAP_ARRAY(char, buf);
#else // __APPLE__
@ -506,7 +506,7 @@ void os::init_system_properties_values() {
sprintf(ld_library_path, "%s%s%s%s%s" SYS_EXTENSIONS_DIR ":" SYS_EXTENSIONS_DIRS ":.",
v, v_colon, l, l_colon, user_home_dir);
Arguments::set_library_path(ld_library_path);
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
FREE_C_HEAP_ARRAY(char, ld_library_path);
}
// Extensions directories.
@ -518,7 +518,7 @@ void os::init_system_properties_values() {
user_home_dir, Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
FREE_C_HEAP_ARRAY(char, buf);
#undef SYS_EXTENSIONS_DIR
#undef SYS_EXTENSIONS_DIRS
@ -1303,11 +1303,11 @@ bool os::dll_build_name(char* buffer, size_t buflen,
// release the storage
for (int i = 0; i < n; i++) {
if (pelements[i] != NULL) {
FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
FREE_C_HEAP_ARRAY(char, pelements[i]);
}
}
if (pelements != NULL) {
FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
FREE_C_HEAP_ARRAY(char*, pelements);
}
} else {
snprintf(buffer, buflen, "%s/" JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX, pname, fname);

View File

@ -127,7 +127,7 @@ static void save_memory_to_file(char* addr, size_t size) {
}
}
}
FREE_C_HEAP_ARRAY(char, destfile, mtInternal);
FREE_C_HEAP_ARRAY(char, destfile);
}
@ -279,14 +279,14 @@ static char* get_user_name(uid_t uid) {
"pw_name zero length");
}
}
FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, pwbuf);
return NULL;
}
char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1, mtInternal);
strcpy(user_name, p->pw_name);
FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, pwbuf);
return user_name;
}
@ -347,7 +347,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
DIR* subdirp = os::opendir(usrdir_name);
if (subdirp == NULL) {
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name);
continue;
}
@ -358,7 +358,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
// symlink can be exploited.
//
if (!is_directory_secure(usrdir_name)) {
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name);
os::closedir(subdirp);
continue;
}
@ -382,13 +382,13 @@ static char* get_user_name_slow(int vmid, TRAPS) {
// don't follow symbolic links for the file
RESTARTABLE(::lstat(filename, &statbuf), result);
if (result == OS_ERR) {
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
continue;
}
// skip over files that are not regular files.
if (!S_ISREG(statbuf.st_mode)) {
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
continue;
}
@ -398,7 +398,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
if (statbuf.st_ctime > oldest_ctime) {
char* user = strchr(dentry->d_name, '_') + 1;
if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user, mtInternal);
if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user);
oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal);
strcpy(oldest_user, user);
@ -406,15 +406,15 @@ static char* get_user_name_slow(int vmid, TRAPS) {
}
}
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
}
}
os::closedir(subdirp);
FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, udbuf);
FREE_C_HEAP_ARRAY(char, usrdir_name);
}
os::closedir(tmpdirp);
FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, tdbuf);
return(oldest_user);
}
@ -481,7 +481,7 @@ static void remove_file(const char* dirname, const char* filename) {
remove_file(path);
FREE_C_HEAP_ARRAY(char, path, mtInternal);
FREE_C_HEAP_ARRAY(char, path);
}
@ -558,7 +558,7 @@ static void cleanup_sharedmem_resources(const char* dirname) {
errno = 0;
}
os::closedir(dirp);
FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, dbuf);
}
// make the user specific temporary directory. Returns true if
@ -725,11 +725,11 @@ static char* mmap_create_shared(size_t size) {
fd = create_sharedmem_resources(dirname, filename, size);
FREE_C_HEAP_ARRAY(char, user_name, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, user_name);
FREE_C_HEAP_ARRAY(char, dirname);
if (fd == -1) {
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
return NULL;
}
@ -743,7 +743,7 @@ static char* mmap_create_shared(size_t size) {
warning("mmap failed - %s\n", strerror(errno));
}
remove_file(filename);
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
return NULL;
}
@ -872,9 +872,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
// store file, we don't follow them when attaching either.
//
if (!is_directory_secure(dirname)) {
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname);
if (luser != user) {
FREE_C_HEAP_ARRAY(char, luser, mtInternal);
FREE_C_HEAP_ARRAY(char, luser);
}
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
@ -890,9 +890,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
strcpy(rfilename, filename);
// free the c heap resources that are no longer needed
if (luser != user) FREE_C_HEAP_ARRAY(char, luser, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
if (luser != user) FREE_C_HEAP_ARRAY(char, luser);
FREE_C_HEAP_ARRAY(char, dirname);
FREE_C_HEAP_ARRAY(char, filename);
// open the shared memory file for the give vmid
fd = open_sharedmem_file(rfilename, file_flags, CHECK);

View File

@ -402,14 +402,14 @@ void os::init_system_properties_values() {
mtInternal);
sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib/%s:" DEFAULT_LIBPATH, v, v_colon, cpu_arch);
Arguments::set_library_path(ld_library_path);
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
FREE_C_HEAP_ARRAY(char, ld_library_path);
}
// Extensions directories.
sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
FREE_C_HEAP_ARRAY(char, buf);
#undef DEFAULT_LIBPATH
#undef SYS_EXT_DIR
@ -1614,11 +1614,11 @@ bool os::dll_build_name(char* buffer, size_t buflen,
// release the storage
for (int i = 0; i < n; i++) {
if (pelements[i] != NULL) {
FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
FREE_C_HEAP_ARRAY(char, pelements[i]);
}
}
if (pelements != NULL) {
FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
FREE_C_HEAP_ARRAY(char*, pelements);
}
} else {
snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
@ -2929,7 +2929,7 @@ void os::Linux::rebuild_cpu_to_node_map() {
}
}
}
FREE_C_HEAP_ARRAY(unsigned long, cpu_map, mtInternal);
FREE_C_HEAP_ARRAY(unsigned long, cpu_map);
}
int os::Linux::get_node_by_cpu(int cpu_id) {

View File

@ -127,7 +127,7 @@ static void save_memory_to_file(char* addr, size_t size) {
}
}
}
FREE_C_HEAP_ARRAY(char, destfile, mtInternal);
FREE_C_HEAP_ARRAY(char, destfile);
}
@ -279,14 +279,14 @@ static char* get_user_name(uid_t uid) {
"pw_name zero length");
}
}
FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, pwbuf);
return NULL;
}
char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1, mtInternal);
strcpy(user_name, p->pw_name);
FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, pwbuf);
return user_name;
}
@ -347,7 +347,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
DIR* subdirp = os::opendir(usrdir_name);
if (subdirp == NULL) {
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name);
continue;
}
@ -358,7 +358,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
// symlink can be exploited.
//
if (!is_directory_secure(usrdir_name)) {
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name);
os::closedir(subdirp);
continue;
}
@ -382,13 +382,13 @@ static char* get_user_name_slow(int vmid, TRAPS) {
// don't follow symbolic links for the file
RESTARTABLE(::lstat(filename, &statbuf), result);
if (result == OS_ERR) {
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
continue;
}
// skip over files that are not regular files.
if (!S_ISREG(statbuf.st_mode)) {
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
continue;
}
@ -398,7 +398,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
if (statbuf.st_ctime > oldest_ctime) {
char* user = strchr(dentry->d_name, '_') + 1;
if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user, mtInternal);
if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user);
oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal);
strcpy(oldest_user, user);
@ -406,15 +406,15 @@ static char* get_user_name_slow(int vmid, TRAPS) {
}
}
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
}
}
os::closedir(subdirp);
FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, udbuf);
FREE_C_HEAP_ARRAY(char, usrdir_name);
}
os::closedir(tmpdirp);
FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, tdbuf);
return(oldest_user);
}
@ -481,7 +481,7 @@ static void remove_file(const char* dirname, const char* filename) {
remove_file(path);
FREE_C_HEAP_ARRAY(char, path, mtInternal);
FREE_C_HEAP_ARRAY(char, path);
}
@ -558,7 +558,7 @@ static void cleanup_sharedmem_resources(const char* dirname) {
errno = 0;
}
os::closedir(dirp);
FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, dbuf);
}
// make the user specific temporary directory. Returns true if
@ -725,11 +725,11 @@ static char* mmap_create_shared(size_t size) {
fd = create_sharedmem_resources(dirname, filename, size);
FREE_C_HEAP_ARRAY(char, user_name, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, user_name);
FREE_C_HEAP_ARRAY(char, dirname);
if (fd == -1) {
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
return NULL;
}
@ -743,7 +743,7 @@ static char* mmap_create_shared(size_t size) {
warning("mmap failed - %s\n", strerror(errno));
}
remove_file(filename);
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
return NULL;
}
@ -872,9 +872,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
// store file, we don't follow them when attaching either.
//
if (!is_directory_secure(dirname)) {
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname);
if (luser != user) {
FREE_C_HEAP_ARRAY(char, luser, mtInternal);
FREE_C_HEAP_ARRAY(char, luser);
}
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
@ -890,9 +890,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
strcpy(rfilename, filename);
// free the c heap resources that are no longer needed
if (luser != user) FREE_C_HEAP_ARRAY(char, luser, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
if (luser != user) FREE_C_HEAP_ARRAY(char, luser);
FREE_C_HEAP_ARRAY(char, dirname);
FREE_C_HEAP_ARRAY(char, filename);
// open the shared memory file for the give vmid
fd = open_sharedmem_file(rfilename, file_flags, THREAD);

View File

@ -506,7 +506,7 @@ static bool assign_distribution(processorid_t* id_array,
}
}
if (available_id != NULL) {
FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
FREE_C_HEAP_ARRAY(bool, available_id);
}
return true;
}
@ -538,7 +538,7 @@ bool os::distribute_processes(uint length, uint* distribution) {
}
}
if (id_array != NULL) {
FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
FREE_C_HEAP_ARRAY(processorid_t, id_array);
}
return result;
}
@ -673,7 +673,7 @@ void os::init_system_properties_values() {
// Determine search path count and required buffer size.
if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
FREE_C_HEAP_ARRAY(char, buf);
vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
}
@ -684,8 +684,8 @@ void os::init_system_properties_values() {
// Obtain search path information.
if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
FREE_C_HEAP_ARRAY(char, info, mtInternal);
FREE_C_HEAP_ARRAY(char, buf);
FREE_C_HEAP_ARRAY(char, info);
vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
}
@ -755,15 +755,15 @@ void os::init_system_properties_values() {
// Callee copies into its own buffer.
Arguments::set_library_path(library_path);
FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
FREE_C_HEAP_ARRAY(char, info, mtInternal);
FREE_C_HEAP_ARRAY(char, library_path);
FREE_C_HEAP_ARRAY(char, info);
}
// Extensions directories.
sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
Arguments::set_ext_dirs(buf);
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
FREE_C_HEAP_ARRAY(char, buf);
#undef SYS_EXT_DIR
#undef EXTENSIONS_DIR
@ -1592,11 +1592,11 @@ bool os::dll_build_name(char* buffer, size_t buflen,
// release the storage
for (int i = 0; i < n; i++) {
if (pelements[i] != NULL) {
FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
FREE_C_HEAP_ARRAY(char, pelements[i]);
}
}
if (pelements != NULL) {
FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
FREE_C_HEAP_ARRAY(char*, pelements);
}
} else {
snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
@ -4683,7 +4683,7 @@ jint os::init_2(void) {
size_t lgrp_limit = os::numa_get_groups_num();
int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
FREE_C_HEAP_ARRAY(int, lgrp_ids);
if (lgrp_num < 2) {
// There's only one locality group, disable NUMA.
UseNUMA = false;

View File

@ -129,7 +129,7 @@ static void save_memory_to_file(char* addr, size_t size) {
}
}
}
FREE_C_HEAP_ARRAY(char, destfile, mtInternal);
FREE_C_HEAP_ARRAY(char, destfile);
}
@ -270,14 +270,14 @@ static char* get_user_name(uid_t uid) {
"pw_name zero length");
}
}
FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, pwbuf);
return NULL;
}
char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1, mtInternal);
strcpy(user_name, p->pw_name);
FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, pwbuf);
return user_name;
}
@ -338,7 +338,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
DIR* subdirp = os::opendir(usrdir_name);
if (subdirp == NULL) {
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name);
continue;
}
@ -349,7 +349,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
// symlink can be exploited.
//
if (!is_directory_secure(usrdir_name)) {
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name);
os::closedir(subdirp);
continue;
}
@ -373,13 +373,13 @@ static char* get_user_name_slow(int vmid, TRAPS) {
// don't follow symbolic links for the file
RESTARTABLE(::lstat(filename, &statbuf), result);
if (result == OS_ERR) {
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
continue;
}
// skip over files that are not regular files.
if (!S_ISREG(statbuf.st_mode)) {
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
continue;
}
@ -389,7 +389,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
if (statbuf.st_ctime > oldest_ctime) {
char* user = strchr(dentry->d_name, '_') + 1;
if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user, mtInternal);
if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user);
oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal);
strcpy(oldest_user, user);
@ -397,15 +397,15 @@ static char* get_user_name_slow(int vmid, TRAPS) {
}
}
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
}
}
os::closedir(subdirp);
FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, udbuf);
FREE_C_HEAP_ARRAY(char, usrdir_name);
}
os::closedir(tmpdirp);
FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, tdbuf);
return(oldest_user);
}
@ -520,7 +520,7 @@ static void remove_file(const char* dirname, const char* filename) {
remove_file(path);
FREE_C_HEAP_ARRAY(char, path, mtInternal);
FREE_C_HEAP_ARRAY(char, path);
}
@ -597,7 +597,7 @@ static void cleanup_sharedmem_resources(const char* dirname) {
errno = 0;
}
os::closedir(dirp);
FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, dbuf);
}
// make the user specific temporary directory. Returns true if
@ -742,11 +742,11 @@ static char* mmap_create_shared(size_t size) {
fd = create_sharedmem_resources(dirname, filename, size);
FREE_C_HEAP_ARRAY(char, user_name, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, user_name);
FREE_C_HEAP_ARRAY(char, dirname);
if (fd == -1) {
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
return NULL;
}
@ -760,7 +760,7 @@ static char* mmap_create_shared(size_t size) {
warning("mmap failed - %s\n", strerror(errno));
}
remove_file(filename);
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
return NULL;
}
@ -890,9 +890,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
// store file, we don't follow them when attaching either.
//
if (!is_directory_secure(dirname)) {
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname);
if (luser != user) {
FREE_C_HEAP_ARRAY(char, luser, mtInternal);
FREE_C_HEAP_ARRAY(char, luser);
}
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
@ -908,9 +908,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
strcpy(rfilename, filename);
// free the c heap resources that are no longer needed
if (luser != user) FREE_C_HEAP_ARRAY(char, luser, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
if (luser != user) FREE_C_HEAP_ARRAY(char, luser);
FREE_C_HEAP_ARRAY(char, dirname);
FREE_C_HEAP_ARRAY(char, filename);
// open the shared memory file for the give vmid
fd = open_sharedmem_file(rfilename, file_flags, THREAD);

View File

@ -211,7 +211,7 @@ void os::init_system_properties_values() {
}
strcpy(home_path, home_dir);
Arguments::set_java_home(home_path);
FREE_C_HEAP_ARRAY(char, home_path, mtInternal);
FREE_C_HEAP_ARRAY(char, home_path);
dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
mtInternal);
@ -221,7 +221,7 @@ void os::init_system_properties_values() {
strcpy(dll_path, home_dir);
strcat(dll_path, bin);
Arguments::set_dll_dir(dll_path);
FREE_C_HEAP_ARRAY(char, dll_path, mtInternal);
FREE_C_HEAP_ARRAY(char, dll_path);
if (!set_boot_path('\\', ';')) {
return;
@ -276,7 +276,7 @@ void os::init_system_properties_values() {
strcat(library_path, ";.");
Arguments::set_library_path(library_path);
FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
FREE_C_HEAP_ARRAY(char, library_path);
}
// Default extensions directory
@ -1123,7 +1123,7 @@ DIR * os::opendir(const char *dirname) {
dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
if (dirp->path == 0) {
free(dirp, mtInternal);
free(dirp);
errno = ENOMEM;
return 0;
}
@ -1131,13 +1131,13 @@ DIR * os::opendir(const char *dirname) {
fattr = GetFileAttributes(dirp->path);
if (fattr == 0xffffffff) {
free(dirp->path, mtInternal);
free(dirp, mtInternal);
free(dirp->path);
free(dirp);
errno = ENOENT;
return 0;
} else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
free(dirp->path, mtInternal);
free(dirp, mtInternal);
free(dirp->path);
free(dirp);
errno = ENOTDIR;
return 0;
}
@ -1155,8 +1155,8 @@ DIR * os::opendir(const char *dirname) {
dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
if (dirp->handle == INVALID_HANDLE_VALUE) {
if (GetLastError() != ERROR_FILE_NOT_FOUND) {
free(dirp->path, mtInternal);
free(dirp, mtInternal);
free(dirp->path);
free(dirp);
errno = EACCES;
return 0;
}
@ -1194,8 +1194,8 @@ int os::closedir(DIR *dirp) {
}
dirp->handle = INVALID_HANDLE_VALUE;
}
free(dirp->path, mtInternal);
free(dirp, mtInternal);
free(dirp->path);
free(dirp);
return 0;
}
@ -1262,11 +1262,11 @@ bool os::dll_build_name(char *buffer, size_t buflen,
// release the storage
for (int i = 0; i < n; i++) {
if (pelements[i] != NULL) {
FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
FREE_C_HEAP_ARRAY(char, pelements[i]);
}
}
if (pelements != NULL) {
FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
FREE_C_HEAP_ARRAY(char*, pelements);
}
} else {
jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
@ -2732,7 +2732,7 @@ class NUMANodeListHolder {
void free_node_list() {
if (_numa_used_node_list != NULL) {
FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal);
FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
}
}
@ -3768,8 +3768,8 @@ HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
return NULL;
}
#define MAX_EXIT_HANDLES 16
#define EXIT_TIMEOUT 1000 /* 1 sec */
#define MAX_EXIT_HANDLES PRODUCT_ONLY(32) NOT_PRODUCT(128)
#define EXIT_TIMEOUT PRODUCT_ONLY(1000) NOT_PRODUCT(4000) /* 1 sec in product, 4 sec in debug */
static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
@ -3820,6 +3820,9 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
// If there's no free slot in the array of the kept handles, we'll have to
// wait until at least one thread completes exiting.
if ((handle_count = j) == MAX_EXIT_HANDLES) {
// Raise the priority of the oldest exiting thread to increase its chances
// to complete sooner.
SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
res = WaitForMultipleObjects(MAX_EXIT_HANDLES, handles, FALSE, EXIT_TIMEOUT);
if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAX_EXIT_HANDLES)) {
i = (res - WAIT_OBJECT_0);
@ -3828,7 +3831,8 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
handles[i] = handles[i + 1];
}
} else {
warning("WaitForMultipleObjects failed in %s: %d\n", __FILE__, __LINE__);
warning("WaitForMultipleObjects %s in %s: %d\n",
(res == WAIT_FAILED ? "failed" : "timed out"), __FILE__, __LINE__);
// Don't keep handles, if we failed waiting for them.
for (i = 0; i < MAX_EXIT_HANDLES; ++i) {
CloseHandle(handles[i]);
@ -3854,9 +3858,20 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
if (handle_count > 0) {
// Before ending the process, make sure all the threads that had called
// _endthreadex() completed.
// Set the priority level of the current thread to the same value as
// the priority level of exiting threads.
// This is to ensure it will be given a fair chance to execute if
// the timeout expires.
hthr = GetCurrentThread();
SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
for (i = 0; i < handle_count; ++i) {
SetThreadPriority(handles[i], THREAD_PRIORITY_ABOVE_NORMAL);
}
res = WaitForMultipleObjects(handle_count, handles, TRUE, EXIT_TIMEOUT);
if (res == WAIT_FAILED) {
warning("WaitForMultipleObjects failed in %s: %d\n", __FILE__, __LINE__);
if (res < WAIT_OBJECT_0 || res >= (WAIT_OBJECT_0 + MAX_EXIT_HANDLES)) {
warning("WaitForMultipleObjects %s in %s: %d\n",
(res == WAIT_FAILED ? "failed" : "timed out"), __FILE__, __LINE__);
}
for (i = 0; i < handle_count; ++i) {
CloseHandle(handles[i]);
@ -4631,7 +4646,7 @@ static int stdinAvailable(int fd, long *pbytes) {
error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
if (error == 0) {
os::free(lpBuffer, mtInternal);
os::free(lpBuffer);
return FALSE;
}
@ -4652,7 +4667,7 @@ static int stdinAvailable(int fd, long *pbytes) {
}
if (lpBuffer != NULL) {
os::free(lpBuffer, mtInternal);
os::free(lpBuffer);
}
*pbytes = (long) actualLength;

View File

@ -122,7 +122,7 @@ static void save_memory_to_file(char* addr, size_t size) {
}
}
FREE_C_HEAP_ARRAY(char, destfile, mtInternal);
FREE_C_HEAP_ARRAY(char, destfile);
}
// Shared Memory Implementation Details
@ -335,7 +335,7 @@ static char* get_user_name_slow(int vmid) {
DIR* subdirp = os::opendir(usrdir_name);
if (subdirp == NULL) {
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name);
continue;
}
@ -346,7 +346,7 @@ static char* get_user_name_slow(int vmid) {
// symlink can be exploited.
//
if (!is_directory_secure(usrdir_name)) {
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name);
os::closedir(subdirp);
continue;
}
@ -367,13 +367,13 @@ static char* get_user_name_slow(int vmid) {
strcat(filename, udentry->d_name);
if (::stat(filename, &statbuf) == OS_ERR) {
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
continue;
}
// skip over files that are not regular files.
if ((statbuf.st_mode & S_IFMT) != S_IFREG) {
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
continue;
}
@ -395,22 +395,22 @@ static char* get_user_name_slow(int vmid) {
if (statbuf.st_ctime > latest_ctime) {
char* user = strchr(dentry->d_name, '_') + 1;
if (latest_user != NULL) FREE_C_HEAP_ARRAY(char, latest_user, mtInternal);
if (latest_user != NULL) FREE_C_HEAP_ARRAY(char, latest_user);
latest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal);
strcpy(latest_user, user);
latest_ctime = statbuf.st_ctime;
}
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
}
}
os::closedir(subdirp);
FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
FREE_C_HEAP_ARRAY(char, udbuf);
FREE_C_HEAP_ARRAY(char, usrdir_name);
}
os::closedir(tmpdirp);
FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, tdbuf);
return(latest_user);
}
@ -502,7 +502,7 @@ static void remove_file(const char* dirname, const char* filename) {
}
}
FREE_C_HEAP_ARRAY(char, path, mtInternal);
FREE_C_HEAP_ARRAY(char, path);
}
// returns true if the process represented by pid is alive, otherwise
@ -683,7 +683,7 @@ static void cleanup_sharedmem_resources(const char* dirname) {
errno = 0;
}
os::closedir(dirp);
FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
FREE_C_HEAP_ARRAY(char, dbuf);
}
// create a file mapping object with the requested name, and size
@ -749,11 +749,11 @@ static void free_security_desc(PSECURITY_DESCRIPTOR pSD) {
// be an ACL we enlisted. free the resources.
//
if (success && exists && pACL != NULL && !isdefault) {
FREE_C_HEAP_ARRAY(char, pACL, mtInternal);
FREE_C_HEAP_ARRAY(char, pACL);
}
// free the security descriptor
FREE_C_HEAP_ARRAY(char, pSD, mtInternal);
FREE_C_HEAP_ARRAY(char, pSD);
}
}
@ -768,7 +768,7 @@ static void free_security_attr(LPSECURITY_ATTRIBUTES lpSA) {
lpSA->lpSecurityDescriptor = NULL;
// free the security attributes structure
FREE_C_HEAP_ARRAY(char, lpSA, mtInternal);
FREE_C_HEAP_ARRAY(char, lpSA);
}
}
@ -815,7 +815,7 @@ static PSID get_user_sid(HANDLE hProcess) {
warning("GetTokenInformation failure: lasterror = %d,"
" rsize = %d\n", GetLastError(), rsize);
}
FREE_C_HEAP_ARRAY(char, token_buf, mtInternal);
FREE_C_HEAP_ARRAY(char, token_buf);
CloseHandle(hAccessToken);
return NULL;
}
@ -828,15 +828,15 @@ static PSID get_user_sid(HANDLE hProcess) {
warning("GetTokenInformation failure: lasterror = %d,"
" rsize = %d\n", GetLastError(), rsize);
}
FREE_C_HEAP_ARRAY(char, token_buf, mtInternal);
FREE_C_HEAP_ARRAY(char, pSID, mtInternal);
FREE_C_HEAP_ARRAY(char, token_buf);
FREE_C_HEAP_ARRAY(char, pSID);
CloseHandle(hAccessToken);
return NULL;
}
// close the access token.
CloseHandle(hAccessToken);
FREE_C_HEAP_ARRAY(char, token_buf, mtInternal);
FREE_C_HEAP_ARRAY(char, token_buf);
return pSID;
}
@ -920,7 +920,7 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
if (PrintMiscellaneous && Verbose) {
warning("InitializeAcl failure: lasterror = %d \n", GetLastError());
}
FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
@ -933,7 +933,7 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
if (PrintMiscellaneous && Verbose) {
warning("InitializeAcl failure: lasterror = %d \n", GetLastError());
}
FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
if (((ACCESS_ALLOWED_ACE *)ace)->Header.AceFlags && INHERITED_ACE) {
@ -960,7 +960,7 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
if (PrintMiscellaneous && Verbose) {
warning("AddAce failure: lasterror = %d \n", GetLastError());
}
FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
}
@ -976,7 +976,7 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
warning("AddAccessAllowedAce failure: lasterror = %d \n",
GetLastError());
}
FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
}
@ -991,7 +991,7 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
if (PrintMiscellaneous && Verbose) {
warning("InitializeAcl failure: lasterror = %d \n", GetLastError());
}
FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
if (!AddAce(newACL, ACL_REVISION, MAXDWORD, ace,
@ -999,7 +999,7 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
if (PrintMiscellaneous && Verbose) {
warning("AddAce failure: lasterror = %d \n", GetLastError());
}
FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
ace_index++;
@ -1012,7 +1012,7 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
warning("SetSecurityDescriptorDacl failure:"
" lasterror = %d \n", GetLastError());
}
FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
@ -1032,7 +1032,7 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
warning("SetSecurityDescriptorControl failure:"
" lasterror = %d \n", GetLastError());
}
FREE_C_HEAP_ARRAY(char, newACL, mtInternal);
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
}
@ -1149,7 +1149,7 @@ static LPSECURITY_ATTRIBUTES make_user_everybody_admin_security_attr(
// create a security attributes structure with access control
// entries as initialized above.
LPSECURITY_ATTRIBUTES lpSA = make_security_attr(aces, 3);
FREE_C_HEAP_ARRAY(char, aces[0].pSid, mtInternal);
FREE_C_HEAP_ARRAY(char, aces[0].pSid);
FreeSid(everybodySid);
FreeSid(administratorsSid);
return(lpSA);
@ -1464,15 +1464,15 @@ static char* mapping_create_shared(size_t size) {
assert(((size != 0) && (size % os::vm_page_size() == 0)),
"unexpected PerfMemry region size");
FREE_C_HEAP_ARRAY(char, user, mtInternal);
FREE_C_HEAP_ARRAY(char, user);
// create the shared memory resources
sharedmem_fileMapHandle =
create_sharedmem_resources(dirname, filename, objectname, size);
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, objectname, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, filename);
FREE_C_HEAP_ARRAY(char, objectname);
FREE_C_HEAP_ARRAY(char, dirname);
if (sharedmem_fileMapHandle == NULL) {
return NULL;
@ -1627,7 +1627,7 @@ static void open_file_mapping(const char* user, int vmid,
// store file, we also don't following them when attaching
//
if (!is_directory_secure(dirname)) {
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname);
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"Process not found");
}
@ -1646,10 +1646,10 @@ static void open_file_mapping(const char* user, int vmid,
strcpy(robjectname, objectname);
// free the c heap resources that are no longer needed
if (luser != user) FREE_C_HEAP_ARRAY(char, luser, mtInternal);
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
FREE_C_HEAP_ARRAY(char, filename, mtInternal);
FREE_C_HEAP_ARRAY(char, objectname, mtInternal);
if (luser != user) FREE_C_HEAP_ARRAY(char, luser);
FREE_C_HEAP_ARRAY(char, dirname);
FREE_C_HEAP_ARRAY(char, filename);
FREE_C_HEAP_ARRAY(char, objectname);
if (*sizep == 0) {
size = sharedmem_filesize(rfilename, CHECK);

View File

@ -1025,7 +1025,7 @@ class CodeString: public CHeapObj<mtCode> {
~CodeString() {
assert(_next == NULL, "wrong interface for freeing list");
os::free((void*)_string, mtCode);
os::free((void*)_string);
}
bool is_comment() const { return _offset >= 0; }

View File

@ -36,6 +36,7 @@
#include "interpreter/bytecodes.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/compile.hpp"
#include "opto/node.hpp"
#include "runtime/deoptimization.hpp"
#include "utilities/growableArray.hpp"

View File

@ -165,7 +165,7 @@ MetaIndex::MetaIndex(char** meta_package_names, int num_meta_package_names) {
MetaIndex::~MetaIndex() {
FREE_C_HEAP_ARRAY(char*, _meta_package_names, mtClass);
FREE_C_HEAP_ARRAY(char*, _meta_package_names);
}
@ -251,7 +251,7 @@ ClassPathZipEntry::~ClassPathZipEntry() {
if (ZipClose != NULL) {
(*ZipClose)(_zip);
}
FREE_C_HEAP_ARRAY(char, _zip_name, mtClass);
FREE_C_HEAP_ARRAY(char, _zip_name);
}
u1* ClassPathZipEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) {

View File

@ -90,7 +90,7 @@ ImageFile::~ImageFile() {
close();
// Free up name.
FREE_C_HEAP_ARRAY(char, _name, mtClass);
FREE_C_HEAP_ARRAY(char, _name);
}
bool ImageFile::open() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -164,7 +164,7 @@ void LoaderConstraintTable::purge_loader_constraints() {
// Purge entry
*p = probe->next();
FREE_C_HEAP_ARRAY(oop, probe->loaders(), mtClass);
FREE_C_HEAP_ARRAY(oop, probe->loaders());
free_entry(probe);
} else {
#ifdef ASSERT
@ -340,7 +340,7 @@ void LoaderConstraintTable::ensure_loader_constraint_capacity(
ClassLoaderData** new_loaders = NEW_C_HEAP_ARRAY(ClassLoaderData*, n, mtClass);
memcpy(new_loaders, p->loaders(), sizeof(ClassLoaderData*) * p->num_loaders());
p->set_max_loaders(n);
FREE_C_HEAP_ARRAY(ClassLoaderData*, p->loaders(), mtClass);
FREE_C_HEAP_ARRAY(ClassLoaderData*, p->loaders());
p->set_loaders(new_loaders);
}
}
@ -422,7 +422,7 @@ void LoaderConstraintTable::merge_loader_constraints(
}
*pp2 = p2->next();
FREE_C_HEAP_ARRAY(oop, p2->loaders(), mtClass);
FREE_C_HEAP_ARRAY(oop, p2->loaders());
free_entry(p2);
return;
}

View File

@ -89,7 +89,7 @@ public:
}
~SharedPathsMiscInfo() {
if (_allocated) {
FREE_C_HEAP_ARRAY(char, _buf_start, mtClass);
FREE_C_HEAP_ARRAY(char, _buf_start);
}
}
int get_used_bytes() {

View File

@ -168,7 +168,7 @@ void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* nam
void CodeBlob::flush() {
if (_oop_maps) {
FREE_C_HEAP_ARRAY(unsigned char, _oop_maps, mtCode);
FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
_oop_maps = NULL;
}
_strings.free();

View File

@ -1190,7 +1190,7 @@ void CodeCache::print_internals() {
}
}
FREE_C_HEAP_ARRAY(int, buckets, mtCode);
FREE_C_HEAP_ARRAY(int, buckets);
print_memory_overhead();
}

View File

@ -58,10 +58,8 @@ CompileLog::CompileLog(const char* file_name, FILE* fp, intx thread_id)
CompileLog::~CompileLog() {
delete _out; // Close fd in fileStream::~fileStream()
_out = NULL;
// Remove partial file after merging in CompileLog::finish_log_on_error
unlink(_file);
FREE_C_HEAP_ARRAY(char, _identities, mtCompiler);
FREE_C_HEAP_ARRAY(char, _file, mtCompiler);
FREE_C_HEAP_ARRAY(char, _identities);
FREE_C_HEAP_ARRAY(char, _file);
}

View File

@ -52,21 +52,9 @@ void ConcurrentMarkSweepPolicy::initialize_alignments() {
}
void ConcurrentMarkSweepPolicy::initialize_generations() {
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC,
CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_generations == NULL)
vm_exit_during_initialization("Unable to allocate gen spec");
Generation::Name yg_name =
UseParNewGC ? Generation::ParNew : Generation::DefNew;
_generations[0] = new GenerationSpec(yg_name, _initial_young_size,
_max_young_size);
_generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
_initial_old_size, _max_old_size);
if (_generations[0] == NULL || _generations[1] == NULL) {
vm_exit_during_initialization("Unable to allocate gen spec");
}
_generations = NEW_C_HEAP_ARRAY(GenerationSpecPtr, number_of_generations(), mtGC);
_generations[0] = new GenerationSpec(Generation::ParNew, _initial_young_size, _max_young_size);
_generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep, _initial_old_size, _max_old_size);
}
void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
@ -82,10 +70,5 @@ void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
// initialize the policy counters - 2 collectors, 3 generations
if (UseParNewGC) {
_gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 3);
}
else {
_gc_policy_counters = new GCPolicyCounters("Copy:CMS", 2, 3);
}
_gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 3);
}

View File

@ -90,7 +90,8 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
CMSRescanMultiple),
_marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
CMSConcMarkMultiple),
_collector(NULL)
_collector(NULL),
_preconsumptionDirtyCardClosure(NULL)
{
assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
"FreeChunk is larger than expected");

View File

@ -155,6 +155,9 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Used to keep track of limit of sweep for the space
HeapWord* _sweep_limit;
// Used to make the young collector update the mod union table
MemRegionClosure* _preconsumptionDirtyCardClosure;
// Support for compacting cms
HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
@ -356,6 +359,14 @@ class CompactibleFreeListSpace: public CompactibleSpace {
void initialize_sequential_subtasks_for_marking(int n_threads,
HeapWord* low = NULL);
virtual MemRegionClosure* preconsumptionDirtyCardClosure() const {
return _preconsumptionDirtyCardClosure;
}
void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
_preconsumptionDirtyCardClosure = cl;
}
// Space enquiries
size_t used() const;
size_t free() const;

View File

@ -623,7 +623,8 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
// Support for parallelizing young gen rescan
GenCollectedHeap* gch = GenCollectedHeap::heap();
_young_gen = gch->prev_gen(_cmsGen);
assert(gch->prev_gen(_cmsGen)->kind() == Generation::ParNew, "CMS can only be used with ParNew");
_young_gen = (ParNewGeneration*)gch->prev_gen(_cmsGen);
if (gch->supports_inline_contig_alloc()) {
_top_addr = gch->top_addr();
_end_addr = gch->end_addr();
@ -650,15 +651,15 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|| _cursor == NULL) {
warning("Failed to allocate survivor plab/chunk array");
if (_survivor_plab_array != NULL) {
FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
_survivor_plab_array = NULL;
}
if (_survivor_chunk_array != NULL) {
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
_survivor_chunk_array = NULL;
}
if (_cursor != NULL) {
FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
FREE_C_HEAP_ARRAY(size_t, _cursor);
_cursor = NULL;
}
} else {
@ -668,10 +669,10 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
if (vec == NULL) {
warning("Failed to allocate survivor plab array");
for (int j = i; j > 0; j--) {
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
}
FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
_survivor_plab_array = NULL;
_survivor_chunk_array = NULL;
_survivor_chunk_capacity = 0;
@ -1201,14 +1202,6 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
return obj;
}
void
ConcurrentMarkSweepGeneration::
par_promote_alloc_undo(int thread_num,
HeapWord* obj, size_t word_sz) {
// CMS does not support promotion undo.
ShouldNotReachHere();
}
void
ConcurrentMarkSweepGeneration::
par_promote_alloc_done(int thread_num) {
@ -1641,13 +1634,12 @@ void CMSCollector::acquire_control_and_collect(bool full,
do_compaction_work(clear_all_soft_refs);
// Has the GC time limit been exceeded?
DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
size_t max_eden_size = young_gen->max_capacity() -
young_gen->to()->capacity() -
young_gen->from()->capacity();
size_t max_eden_size = _young_gen->max_capacity() -
_young_gen->to()->capacity() -
_young_gen->from()->capacity();
GCCause::Cause gc_cause = gch->gc_cause();
size_policy()->check_gc_overhead_limit(_young_gen->used(),
young_gen->eden()->used(),
_young_gen->eden()->used(),
_cmsGen->max_capacity(),
max_eden_size,
full,
@ -1768,10 +1760,9 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
}
void CMSCollector::print_eden_and_survivor_chunk_arrays() {
DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
ContiguousSpace* eden_space = dng->eden();
ContiguousSpace* from_space = dng->from();
ContiguousSpace* to_space = dng->to();
ContiguousSpace* eden_space = _young_gen->eden();
ContiguousSpace* from_space = _young_gen->from();
ContiguousSpace* to_space = _young_gen->to();
// Eden
if (_eden_chunk_array != NULL) {
gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
@ -2821,7 +2812,7 @@ ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
}
// YSR: All of this generation expansion/shrinking stuff is an exact copy of
// OneContigSpaceCardGeneration, which makes me wonder if we should move this
// TenuredGeneration, which makes me wonder if we should move this
// to CardGeneration and share it...
bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
return CardGeneration::expand(bytes, expand_bytes);
@ -4094,10 +4085,6 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
}
if (clean_survivor) { // preclean the active survivor space(s)
assert(_young_gen->kind() == Generation::DefNew ||
_young_gen->kind() == Generation::ParNew,
"incorrect type for cast");
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
PushAndMarkClosure pam_cl(this, _span, ref_processor(),
&_markBitMap, &_modUnionTable,
&_markStack, true /* precleaning phase */);
@ -4110,8 +4097,8 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
SurvivorSpacePrecleanClosure
sss_cl(this, _span, &_markBitMap, &_markStack,
&pam_cl, before_count, CMSYield);
dng->from()->object_iterate_careful(&sss_cl);
dng->to()->object_iterate_careful(&sss_cl);
_young_gen->from()->object_iterate_careful(&sss_cl);
_young_gen->to()->object_iterate_careful(&sss_cl);
}
MarkRefsIntoAndScanClosure
mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
@ -4696,10 +4683,10 @@ class RemarkKlassClosure : public KlassClosure {
};
void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
ContiguousSpace* eden_space = dng->eden();
ContiguousSpace* from_space = dng->from();
ContiguousSpace* to_space = dng->to();
ParNewGeneration* young_gen = _collector->_young_gen;
ContiguousSpace* eden_space = young_gen->eden();
ContiguousSpace* from_space = young_gen->from();
ContiguousSpace* to_space = young_gen->to();
HeapWord** eca = _collector->_eden_chunk_array;
size_t ect = _collector->_eden_chunk_index;
@ -5168,11 +5155,10 @@ void
CMSCollector::
initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
assert(n_threads > 0, "Unexpected n_threads argument");
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
// Eden space
if (!dng->eden()->is_empty()) {
SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
if (!_young_gen->eden()->is_empty()) {
SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
assert(!pst->valid(), "Clobbering existing data?");
// Each valid entry in [0, _eden_chunk_index) represents a task.
size_t n_tasks = _eden_chunk_index + 1;
@ -5185,14 +5171,14 @@ initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
// Merge the survivor plab arrays into _survivor_chunk_array
if (_survivor_plab_array != NULL) {
merge_survivor_plab_arrays(dng->from(), n_threads);
merge_survivor_plab_arrays(_young_gen->from(), n_threads);
} else {
assert(_survivor_chunk_index == 0, "Error");
}
// To space
{
SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
assert(!pst->valid(), "Clobbering existing data?");
// Sets the condition for completion of the subtask (how many threads
// need to finish in order to be done).
@ -5203,7 +5189,7 @@ initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
// From space
{
SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
assert(!pst->valid(), "Clobbering existing data?");
size_t n_tasks = _survivor_chunk_index + 1;
assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
@ -5945,7 +5931,6 @@ void CMSCollector::reset(bool concurrent) {
}
void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
TraceCollectorStats tcs(counters());

View File

@ -721,7 +721,8 @@ class CMSCollector: public CHeapObj<mtGC> {
private:
// Support for parallelizing young gen rescan in CMS remark phase
Generation* _young_gen; // the younger gen
ParNewGeneration* _young_gen; // the younger gen
HeapWord** _top_addr; // ... Top of Eden
HeapWord** _end_addr; // ... End of Eden
Mutex* _eden_chunk_lock;
@ -1151,9 +1152,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// Overrides for parallel promotion.
virtual oop par_promote(int thread_num,
oop obj, markOop m, size_t word_sz);
// This one should not be called for CMS.
virtual void par_promote_alloc_undo(int thread_num,
HeapWord* obj, size_t word_sz);
virtual void par_promote_alloc_done(int thread_num);
virtual void par_oop_since_save_marks_iterate_done(int thread_num);
@ -1256,8 +1254,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
virtual const char* short_name() const { return "CMS"; }
void print() const;
void printOccupancy(const char* s);
bool must_be_youngest() const { return false; }
bool must_be_oldest() const { return true; }
// Resize the generation after a compacting GC. The
// generation can be treated as a contiguous space

View File

@ -29,8 +29,9 @@
#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
#include "gc_implementation/parNew/parNewGeneration.hpp"
#include "gc_implementation/shared/gcUtil.hpp"
#include "memory/defNewGeneration.hpp"
#include "memory/genCollectedHeap.hpp"
inline void CMSBitMap::clear_all() {
assert_locked();
@ -257,11 +258,11 @@ inline bool CMSCollector::should_abort_preclean() const {
}
inline size_t CMSCollector::get_eden_used() const {
return _young_gen->as_DefNewGeneration()->eden()->used();
return _young_gen->eden()->used();
}
inline size_t CMSCollector::get_eden_capacity() const {
return _young_gen->as_DefNewGeneration()->eden()->capacity();
return _young_gen->eden()->capacity();
}
inline bool CMSStats::valid() const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -107,7 +107,7 @@ ConcurrentG1Refine::~ConcurrentG1Refine() {
for (uint i = 0; i < _n_threads; i++) {
delete _threads[i];
}
FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads, mtGC);
FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
}
}

View File

@ -180,9 +180,32 @@ class ClearBitmapHRClosure : public HeapRegionClosure {
}
};
class ParClearNextMarkBitmapTask : public AbstractGangTask {
ClearBitmapHRClosure* _cl;
HeapRegionClaimer _hrclaimer;
bool _suspendible; // If the task is suspendible, workers must join the STS.
public:
ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
_cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
void work(uint worker_id) {
if (_suspendible) {
SuspendibleThreadSet::join();
}
G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
if (_suspendible) {
SuspendibleThreadSet::leave();
}
}
};
void CMBitMap::clearAll() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
G1CollectedHeap::heap()->heap_region_iterate(&cl);
uint n_workers = g1h->workers()->active_workers();
ParClearNextMarkBitmapTask task(&cl, n_workers, false);
g1h->workers()->run_task(&task);
guarantee(cl.complete(), "Must have completed iteration.");
return;
}
@ -861,7 +884,8 @@ void ConcurrentMark::clearNextBitmap() {
guarantee(!g1h->mark_in_progress(), "invariant");
ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
g1h->heap_region_iterate(&cl);
ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
_parallel_workers->run_task(&task);
// Clear the liveness counting data. If the marking has been aborted, the abort()
// call already did that.
@ -2099,6 +2123,7 @@ void ConcurrentMark::cleanup() {
// We reclaimed old regions so we should calculate the sizes to make
// sure we update the old gen/space data.
g1h->g1mm()->update_sizes();
g1h->allocation_context_stats().update_after_mark();
g1h->trace_heap_after_concurrent_cycle();
}
@ -3219,7 +3244,6 @@ void ConcurrentMark::aggregate_count_data() {
_g1h->set_par_threads(n_workers);
_g1h->workers()->run_task(&g1_par_agg_task);
_g1h->set_par_threads(0);
_g1h->allocation_context_stats().update_at_remark();
}
// Clear the per-worker arrays used to store the per-region counting data

View File

@ -280,7 +280,6 @@ void ConcurrentMarkThread::run() {
// We may have aborted just before the remark. Do not bother clearing the
// bitmap then, as it has been done during mark abort.
if (!cm()->has_aborted()) {
SuspendibleThreadSetJoiner sts;
_cm->clearNextBitmap();
} else {
assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");

View File

@ -45,7 +45,7 @@ class AllocationContextStats: public StackObj {
public:
inline void clear() { }
inline void update(bool full_gc) { }
inline void update_at_remark() { }
inline void update_after_mark() { }
inline bool available() { return false; }
};

View File

@ -59,7 +59,7 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
!(retained_region->top() == retained_region->end()) &&
!retained_region->is_empty() &&
!retained_region->is_humongous()) {
retained_region->record_top_and_timestamp();
retained_region->record_timestamp();
// The retained region was added to the old region set when it was
// retired. We have to remove it now, since we don't allow regions
// we allocate to in the region sets. We'll re-add it later, when
@ -94,6 +94,9 @@ void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, Evacuat
// want either way so no reason to check explicitly for either
// condition.
_retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
if (_retained_old_gc_alloc_region != NULL) {
_retained_old_gc_alloc_region->record_retained_region();
}
if (ResizePLAB) {
_g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);

View File

@ -111,13 +111,13 @@ CodeRootSetTable::~CodeRootSetTable() {
// read next before freeing.
e = e->next();
unlink_entry(to_remove);
FREE_C_HEAP_ARRAY(char, to_remove, mtGC);
FREE_C_HEAP_ARRAY(char, to_remove);
}
}
assert(number_of_entries() == 0, "should have removed all entries");
free_buckets();
for (BasicHashtableEntry<mtGC>* e = new_entry_free_list(); e != NULL; e = new_entry_free_list()) {
FREE_C_HEAP_ARRAY(char, e, mtGC);
FREE_C_HEAP_ARRAY(char, e);
}
}

View File

@ -1222,7 +1222,6 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
// Timing
assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
{
@ -2258,6 +2257,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
case GCCause::_g1_humongous_allocation: return true;
case GCCause::_update_allocation_context_stats_inc: return true;
case GCCause::_wb_conc_mark: return true;
default: return false;
}
}
@ -2552,8 +2552,9 @@ void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
void
G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
uint worker_id,
HeapRegionClaimer *hrclaimer) const {
_hrm.par_iterate(cl, worker_id, hrclaimer);
HeapRegionClaimer *hrclaimer,
bool concurrent) const {
_hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
}
// Clear the cached CSet starting regions and (more importantly)
@ -3561,7 +3562,7 @@ G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
void
G1CollectedHeap::cleanup_surviving_young_words() {
guarantee( _surviving_young_words != NULL, "pre-condition" );
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
_surviving_young_words = NULL;
}
@ -6530,7 +6531,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
// We really only need to do this for old regions given that we
// should never scan survivors. But it doesn't hurt to do it
// for survivors too.
new_alloc_region->record_top_and_timestamp();
new_alloc_region->record_timestamp();
if (survivor) {
new_alloc_region->set_survivor();
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);

View File

@ -1380,10 +1380,13 @@ public:
// in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
// to each of the regions, by attempting to claim the region using the
// HeapRegionClaimer and, if successful, applying the closure to the claimed
// region.
// region. The concurrent argument should be set to true if iteration is
// performed concurrently, during which no assumptions are made for consistent
// attributes of the heap regions (as they might be modified while iterating).
void heap_region_par_iterate(HeapRegionClosure* cl,
uint worker_id,
HeapRegionClaimer* hrclaimer) const;
HeapRegionClaimer* hrclaimer,
bool concurrent = false) const;
// Clear the cached cset start regions and (more importantly)
// the time stamps. Called when we reset the GC time stamp.

View File

@ -1425,6 +1425,18 @@ void G1CollectorPolicy::print_yg_surv_rate_info() const {
#endif // PRODUCT
}
bool G1CollectorPolicy::is_young_list_full() {
uint young_list_length = _g1->young_list()->length();
uint young_list_target_length = _young_list_target_length;
return young_list_length >= young_list_target_length;
}
bool G1CollectorPolicy::can_expand_young_list() {
uint young_list_length = _g1->young_list()->length();
uint young_list_max_length = _young_list_max_length;
return young_list_length < young_list_max_length;
}
uint G1CollectorPolicy::max_regions(int purpose) {
switch (purpose) {
case GCAllocForSurvived:

View File

@ -26,6 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
#include "gc_implementation/g1/collectionSetChooser.hpp"
#include "gc_implementation/g1/g1Allocator.hpp"
#include "gc_implementation/g1/g1MMUTracker.hpp"
#include "memory/collectorPolicy.hpp"
@ -807,7 +808,7 @@ public:
// If an expansion would be appropriate, because recent GC overhead had
// exceeded the desired limit, return an amount to expand by.
size_t expansion_amount();
virtual size_t expansion_amount();
// Print tracing information.
void print_tracing_info() const;
@ -826,17 +827,9 @@ public:
size_t young_list_target_length() const { return _young_list_target_length; }
bool is_young_list_full() {
uint young_list_length = _g1->young_list()->length();
uint young_list_target_length = _young_list_target_length;
return young_list_length >= young_list_target_length;
}
bool is_young_list_full();
bool can_expand_young_list() {
uint young_list_length = _g1->young_list()->length();
uint young_list_max_length = _young_list_max_length;
return young_list_length < young_list_max_length;
}
bool can_expand_young_list();
uint young_list_max_length() {
return _young_list_max_length;

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
class G1CollectorPolicyExt : public G1CollectorPolicy { };
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP

View File

@ -53,7 +53,7 @@ class WorkerDataArray : public CHeapObj<mtGC> {
}
~WorkerDataArray() {
FREE_C_HEAP_ARRAY(T, _data, mtGC);
FREE_C_HEAP_ARRAY(T, _data);
}
void set(uint worker_i, T value) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,7 +53,7 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
G1HotCardCache::~G1HotCardCache() {
if (default_use_cache()) {
assert(_hot_cache != NULL, "Logic");
FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
FREE_C_HEAP_ARRAY(jbyte*, _hot_cache);
}
}

View File

@ -65,7 +65,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
G1ParScanThreadState::~G1ParScanThreadState() {
_g1_par_allocator->retire_alloc_buffers();
delete _g1_par_allocator;
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
}
void

View File

@ -94,7 +94,7 @@ G1RemSet::~G1RemSet() {
for (uint i = 0; i < n_workers(); i++) {
assert(_cset_rs_update_cl[i] == NULL, "it should be");
}
FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl, mtGC);
FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl);
}
class ScanRSClosure : public HeapRegionClosure {
@ -140,11 +140,9 @@ public:
// Set the "from" region in the closure.
_oc->set_region(r);
HeapWord* card_start = _bot_shared->address_for_index(index);
HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
Space *sp = SharedHeap::heap()->space_containing(card_start);
MemRegion sm_region = sp->used_region_at_save_marks();
MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
MemRegion card_region(_bot_shared->address_for_index(index), G1BlockOffsetSharedArray::N_words);
MemRegion pre_gc_allocated(r->bottom(), r->scan_top());
MemRegion mr = pre_gc_allocated.intersection(card_region);
if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
// We make the card as "claimed" lazily (so races are possible
// but they're benign), which reduces the number of duplicate
@ -353,7 +351,7 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() {
for (uint i = 0; i < n_workers(); ++i) {
_total_cards_scanned += _cards_scanned[i];
}
FREE_C_HEAP_ARRAY(size_t, _cards_scanned, mtGC);
FREE_C_HEAP_ARRAY(size_t, _cards_scanned);
_cards_scanned = NULL;
// Cleanup after copy
_g1->set_refine_cte_cl_concurrency(true);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,7 +59,7 @@ private:
void free_and_null() {
if (_rs_threads_vtimes) {
FREE_C_HEAP_ARRAY(double, _rs_threads_vtimes, mtGC);
FREE_C_HEAP_ARRAY(double, _rs_threads_vtimes);
_rs_threads_vtimes = NULL;
_num_vtimes = 0;
}

View File

@ -187,7 +187,7 @@ G1StringDedupTable::G1StringDedupTable(size_t size, jint hash_seed) :
}
G1StringDedupTable::~G1StringDedupTable() {
FREE_C_HEAP_ARRAY(G1StringDedupEntry*, _buckets, mtGC);
FREE_C_HEAP_ARRAY(G1StringDedupEntry*, _buckets);
}
void G1StringDedupTable::create() {

View File

@ -326,7 +326,7 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
hr_clear(false /*par*/, false /*clear_space*/);
set_top(bottom());
record_top_and_timestamp();
record_timestamp();
assert(mr.end() == orig_end(),
err_msg("Given region end address " PTR_FORMAT " should match exactly "
@ -416,9 +416,9 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
// If we're within a stop-world GC, then we might look at a card in a
// GC alloc region that extends onto a GC LAB, which may not be
// parseable. Stop such at the "saved_mark" of the region.
// parseable. Stop such at the "scan_top" of the region.
if (g1h->is_gc_active()) {
mr = mr.intersection(used_region_at_save_marks());
mr = mr.intersection(MemRegion(bottom(), scan_top()));
} else {
mr = mr.intersection(used_region());
}
@ -969,7 +969,7 @@ void HeapRegion::prepare_for_compaction(CompactPoint* cp) {
void G1OffsetTableContigSpace::clear(bool mangle_space) {
set_top(bottom());
set_saved_mark_word(bottom());
_scan_top = bottom();
CompactibleSpace::clear(mangle_space);
reset_bot();
}
@ -1001,41 +1001,42 @@ HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
return _offsets.threshold();
}
HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
HeapWord* G1OffsetTableContigSpace::scan_top() const {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
HeapWord* local_top = top();
OrderAccess::loadload();
if (_gc_time_stamp < g1h->get_gc_time_stamp()) {
const unsigned local_time_stamp = _gc_time_stamp;
assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant");
if (local_time_stamp < g1h->get_gc_time_stamp()) {
return local_top;
} else {
return Space::saved_mark_word();
return _scan_top;
}
}
void G1OffsetTableContigSpace::record_top_and_timestamp() {
void G1OffsetTableContigSpace::record_timestamp() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
if (_gc_time_stamp < curr_gc_time_stamp) {
// The order of these is important, as another thread might be
// about to start scanning this region. If it does so after
// set_saved_mark and before _gc_time_stamp = ..., then the latter
// will be false, and it will pick up top() as the high water mark
// of region. If it does so after _gc_time_stamp = ..., then it
// will pick up the right saved_mark_word() as the high water mark
// of the region. Either way, the behavior will be correct.
Space::set_saved_mark_word(top());
OrderAccess::storestore();
// Setting the time stamp here tells concurrent readers to look at
// scan_top to know the maximum allowed address to look at.
// scan_top should be bottom for all regions except for the
// retained old alloc region which should have scan_top == top
HeapWord* st = _scan_top;
guarantee(st == _bottom || st == _top, "invariant");
_gc_time_stamp = curr_gc_time_stamp;
// No need to do another barrier to flush the writes above. If
// this is called in parallel with other threads trying to
// allocate into the region, the caller should call this while
// holding a lock and when the lock is released the writes will be
// flushed.
}
}
void G1OffsetTableContigSpace::record_retained_region() {
// scan_top is the maximum address where it's safe for the next gc to
// scan this region.
_scan_top = top();
}
void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
object_iterate(blk);
}
@ -1063,6 +1064,8 @@ G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
CompactibleSpace::initialize(mr, clear_space, mangle_space);
_top = bottom();
_scan_top = bottom();
set_saved_mark_word(NULL);
reset_bot();
}

View File

@ -101,28 +101,25 @@ public:
// OffsetTableContigSpace. If the two versions of BlockOffsetTable could
// be reconciled, then G1OffsetTableContigSpace could go away.
// The idea behind time stamps is the following. Doing a save_marks on
// all regions at every GC pause is time consuming (if I remember
// well, 10ms or so). So, we would like to do that only for regions
// that are GC alloc regions. To achieve this, we use time
// stamps. For every evacuation pause, G1CollectedHeap generates a
// unique time stamp (essentially a counter that gets
// incremented). Every time we want to call save_marks on a region,
// we set the saved_mark_word to top and also copy the current GC
// time stamp to the time stamp field of the space. Reading the
// saved_mark_word involves checking the time stamp of the
// region. If it is the same as the current GC time stamp, then we
// can safely read the saved_mark_word field, as it is valid. If the
// time stamp of the region is not the same as the current GC time
// stamp, then we instead read top, as the saved_mark_word field is
// invalid. Time stamps (on the regions and also on the
// G1CollectedHeap) are reset at every cleanup (we iterate over
// the regions anyway) and at the end of a Full GC. The current scheme
// that uses sequential unsigned ints will fail only if we have 4b
// The idea behind time stamps is the following. We want to keep track of
// the highest address where it's safe to scan objects for each region.
// This is only relevant for current GC alloc regions so we keep a time stamp
// per region to determine if the region has been allocated during the current
// GC or not. If the time stamp is current we report a scan_top value which
// was saved at the end of the previous GC for retained alloc regions and which is
// equal to the bottom for all other regions.
// There is a race between card scanners and allocating gc workers where we must ensure
// that card scanners do not read the memory allocated by the gc workers.
// In order to enforce that, we must not return a value of _top which is more recent than the
// time stamp. This is due to the fact that a region may become a gc alloc region at
// some point after we've read the timestamp value as being < the current time stamp.
// The time stamps are re-initialized to zero at cleanup and at Full GCs.
// The current scheme that uses sequential unsigned ints will fail only if we have 4b
// evacuation pauses between two cleanups, which is _highly_ unlikely.
class G1OffsetTableContigSpace: public CompactibleSpace {
friend class VMStructs;
HeapWord* _top;
HeapWord* volatile _scan_top;
protected:
G1BlockOffsetArrayContigSpace _offsets;
Mutex _par_alloc_lock;
@ -166,10 +163,11 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
void set_bottom(HeapWord* value);
void set_end(HeapWord* value);
virtual HeapWord* saved_mark_word() const;
void record_top_and_timestamp();
HeapWord* scan_top() const;
void record_timestamp();
void reset_gc_time_stamp() { _gc_time_stamp = 0; }
unsigned get_gc_time_stamp() { return _gc_time_stamp; }
void record_retained_region();
// See the comment above in the declaration of _pre_dummy_top for an
// explanation of what it is.
@ -191,6 +189,8 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
virtual HeapWord* allocate(size_t word_size);
HeapWord* par_allocate(size_t word_size);
HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
// MarkSweep support phase3
virtual HeapWord* initialize_threshold();
virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);

View File

@ -260,7 +260,7 @@ uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx)
return num_regions;
}
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const {
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
const uint start_index = hrclaimer->start_region_for_worker(worker_id);
// Every worker will actually look at all regions, skipping over regions that
@ -279,7 +279,11 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, Heap
// We'll ignore "continues humongous" regions (we'll process them
// when we come across their corresponding "start humongous"
// region) and regions already claimed.
if (hrclaimer->is_region_claimed(index) || r->is_continues_humongous()) {
// However, if the iteration is specified as concurrent, the values for
// is_starts_humongous and is_continues_humongous can not be trusted,
// and we should just blindly iterate over regions regardless of their
// humongous status.
if (hrclaimer->is_region_claimed(index) || (!concurrent && r->is_continues_humongous())) {
continue;
}
// OK, try to claim it
@ -287,7 +291,9 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, Heap
continue;
}
// Success!
if (r->is_starts_humongous()) {
// As mentioned above, special treatment of humongous regions can only be
// done if we are iterating non-concurrently.
if (!concurrent && r->is_starts_humongous()) {
// If the region is "starts humongous" we'll iterate over its
// "continues humongous" first; in fact we'll do them
// first. The order is important. In one case, calling the
@ -449,7 +455,7 @@ HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
HeapRegionClaimer::~HeapRegionClaimer() {
if (_claims != NULL) {
FREE_C_HEAP_ARRAY(uint, _claims, mtGC);
FREE_C_HEAP_ARRAY(uint, _claims);
}
}

View File

@ -222,7 +222,7 @@ public:
// terminating the iteration early if doHeapRegion() returns true.
void iterate(HeapRegionClosure* blk) const;
void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const;
void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const;
// Uncommit up to num_regions_to_remove regions that are completely free.
// Return the actual number of uncommitted regions.

View File

@ -449,5 +449,5 @@ void FreeRegionList_test() {
bot_storage->uncommit_regions(0, num_regions_in_test);
delete bot_storage;
FREE_C_HEAP_ARRAY(HeapWord, bot_data, mtGC);
FREE_C_HEAP_ARRAY(HeapWord, bot_data);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -129,7 +129,7 @@ void PtrQueueSet::reduce_free_list() {
assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
void* b = BufferNode::make_block_from_node(_buf_free_list);
_buf_free_list = _buf_free_list->next();
FREE_C_HEAP_ARRAY(char, b, mtGC);
FREE_C_HEAP_ARRAY(char, b);
_buf_free_list_sz --;
n--;
}

View File

@ -154,11 +154,11 @@ RSHashTable::RSHashTable(size_t capacity) :
RSHashTable::~RSHashTable() {
if (_entries != NULL) {
FREE_C_HEAP_ARRAY(SparsePRTEntry, _entries, mtGC);
FREE_C_HEAP_ARRAY(SparsePRTEntry, _entries);
_entries = NULL;
}
if (_buckets != NULL) {
FREE_C_HEAP_ARRAY(int, _buckets, mtGC);
FREE_C_HEAP_ARRAY(int, _buckets);
_buckets = NULL;
}
}

View File

@ -106,13 +106,13 @@ SurvRateGroup::stop_adding_regions() {
_stats_arrays_length = _region_num;
if (old_surv_rate != NULL) {
FREE_C_HEAP_ARRAY(double, old_surv_rate, mtGC);
FREE_C_HEAP_ARRAY(double, old_surv_rate);
}
if (old_accum_surv_rate_pred != NULL) {
FREE_C_HEAP_ARRAY(double, old_accum_surv_rate_pred, mtGC);
FREE_C_HEAP_ARRAY(double, old_accum_surv_rate_pred);
}
if (old_surv_rate_pred != NULL) {
FREE_C_HEAP_ARRAY(TruncatedSeq*, old_surv_rate_pred, mtGC);
FREE_C_HEAP_ARRAY(TruncatedSeq*, old_surv_rate_pred);
}
}

View File

@ -92,12 +92,8 @@ bool VM_G1IncCollectionPause::doit_prologue() {
void VM_G1IncCollectionPause::doit() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(!_should_initiate_conc_mark ||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
_gc_cause == GCCause::_g1_humongous_allocation ||
_gc_cause == GCCause::_update_allocation_context_stats_inc),
"only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause),
"only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle");
if (_word_size > 0) {
// An allocation has been requested. So, try to do that first.
@ -230,7 +226,6 @@ void VM_CGC_Operation::release_and_notify_pending_list_lock() {
}
void VM_CGC_Operation::doit() {
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
SharedHeap* sh = SharedHeap::heap();

View File

@ -462,7 +462,7 @@ get_LNC_array_for_space(Space* sp,
if (_lowest_non_clean[i] != NULL) {
assert(n_chunks != _lowest_non_clean_chunk_size[i],
"logical consequence");
FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i], mtGC);
FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
_lowest_non_clean[i] = NULL;
}
// Now allocate a new one if necessary.

View File

@ -39,7 +39,6 @@
#include "memory/genCollectedHeap.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/generation.hpp"
#include "memory/generation.inline.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
#include "memory/sharedHeap.hpp"
@ -884,8 +883,6 @@ void EvacuateFollowersClosureGeneral::do_void() {
// A Generation that does parallel young-gen collection.
bool ParNewGeneration::_avoid_promotion_undo = false;
void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) {
assert(_promo_failure_scan_stack.is_empty(), "post condition");
_promo_failure_scan_stack.clear(true); // Clear cached segments.
@ -934,10 +931,6 @@ void ParNewGeneration::collect(bool full,
assert(gch->n_gens() == 2,
"Par collection currently only works with single older gen.");
_next_gen = gch->next_gen(this);
// Do we have to avoid promotion_undo?
if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
set_avoid_promotion_undo(true);
}
// If the next generation is too full to accommodate worst-case promotion
// from this generation, pass on collection; let the next generation
@ -999,6 +992,11 @@ void ParNewGeneration::collect(bool full,
thread_state_set.reset(0 /* Bad value in debug if not reset */,
promotion_failed());
// Trace and reset failed promotion info.
if (promotion_failed()) {
thread_state_set.trace_promotion_failed(gc_tracer);
}
// Process (weak) reference objects found during scavenge.
ReferenceProcessor* rp = ref_processor();
IsAliveClosure is_alive(this);
@ -1136,7 +1134,7 @@ oop ParNewGeneration::real_forwardee_slow(oop obj) {
#ifdef ASSERT
bool ParNewGeneration::is_legal_forward_ptr(oop p) {
return
(_avoid_promotion_undo && p == ClaimedForwardPtr)
(p == ClaimedForwardPtr)
|| Universe::heap()->is_in_reserved(p);
}
#endif
@ -1157,7 +1155,7 @@ void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
// thus avoiding the need to undo the copy as in
// copy_to_survivor_space_avoiding_with_undo.
oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
oop ParNewGeneration::copy_to_survivor_space(
ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
// In the sequential version, this assert also says that the object is
// not forwarded. That might not be the case here. It is the case that
@ -1277,131 +1275,6 @@ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
return forward_ptr;
}
// Multiple GC threads may try to promote the same object. If two
// or more GC threads copy the object, only one wins the race to install
// the forwarding pointer. The other threads have to undo their copy.
oop ParNewGeneration::copy_to_survivor_space_with_undo(
ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
// In the sequential version, this assert also says that the object is
// not forwarded. That might not be the case here. It is the case that
// the caller observed it to be not forwarded at some time in the past.
assert(is_in_reserved(old), "shouldn't be scavenging this oop");
// The sequential code read "old->age()" below. That doesn't work here,
// since the age is in the mark word, and that might be overwritten with
// a forwarding pointer by a parallel thread. So we must save the mark
// word here, install it in a local oopDesc, and then analyze it.
oopDesc dummyOld;
dummyOld.set_mark(m);
assert(!dummyOld.is_forwarded(),
"should not be called with forwarding pointer mark word.");
bool failed_to_promote = false;
oop new_obj = NULL;
oop forward_ptr;
// Try allocating obj in to-space (unless too old)
if (dummyOld.age() < tenuring_threshold()) {
new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
if (new_obj == NULL) {
set_survivor_overflow(true);
}
}
if (new_obj == NULL) {
// Either to-space is full or we decided to promote
// try allocating obj tenured
new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
old, m, sz);
if (new_obj == NULL) {
// promotion failed, forward to self
forward_ptr = old->forward_to_atomic(old);
new_obj = old;
if (forward_ptr != NULL) {
return forward_ptr; // someone else succeeded
}
_promotion_failed = true;
failed_to_promote = true;
preserve_mark_if_necessary(old, m);
par_scan_state->register_promotion_failure(sz);
}
} else {
// Is in to-space; do copying ourselves.
Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
// Restore the mark word copied above.
new_obj->set_mark(m);
// Increment age if new_obj still in new generation
new_obj->incr_age();
par_scan_state->age_table()->add(new_obj, sz);
}
assert(new_obj != NULL, "just checking");
#ifndef PRODUCT
// This code must come after the CAS test, or it will print incorrect
// information.
if (TraceScavenge) {
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
is_in_reserved(new_obj) ? "copying" : "tenuring",
new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
}
#endif
// Now attempt to install the forwarding pointer (atomically).
// We have to copy the mark word before overwriting with forwarding
// ptr, so we can restore it below in the copy.
if (!failed_to_promote) {
forward_ptr = old->forward_to_atomic(new_obj);
}
if (forward_ptr == NULL) {
oop obj_to_push = new_obj;
if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
// Length field used as index of next element to be scanned.
// Real length can be obtained from real_forwardee()
arrayOop(old)->set_length(0);
obj_to_push = old;
assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
"push forwarded object");
}
// Push it on one of the queues of to-be-scanned objects.
bool simulate_overflow = false;
NOT_PRODUCT(
if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
// simulate a stack overflow
simulate_overflow = true;
}
)
if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
// Add stats for overflow pushes.
push_on_overflow_list(old, par_scan_state);
TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
}
return new_obj;
}
// Oops. Someone beat us to it. Undo the allocation. Where did we
// allocate it?
if (is_in_reserved(new_obj)) {
// Must be in to_space.
assert(to()->is_in_reserved(new_obj), "Checking");
par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
} else {
assert(!_avoid_promotion_undo, "Should not be here if avoiding.");
_next_gen->par_promote_alloc_undo(par_scan_state->thread_num(),
(HeapWord*)new_obj, sz);
}
return forward_ptr;
}
#ifndef PRODUCT
// It's OK to call this multi-threaded; the worst thing
// that can happen is that we'll get a bunch of closely
@ -1609,7 +1482,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
// This can become a scaling bottleneck when there is work queue overflow coincident
// with promotion failure.
oopDesc* f = cur;
FREE_C_HEAP_ARRAY(oopDesc, f, mtGC);
FREE_C_HEAP_ARRAY(oopDesc, f);
} else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
obj_to_push = cur;

View File

@ -329,9 +329,6 @@ class ParNewGeneration: public DefNewGeneration {
oop _overflow_list;
NOT_PRODUCT(ssize_t _num_par_pushes;)
// If true, older generation does not support promotion undo, so avoid.
static bool _avoid_promotion_undo;
// This closure is used by the reference processor to filter out
// references to live referent.
DefNewGeneration::IsAliveClosure _is_alive_closure;
@ -349,9 +346,6 @@ class ParNewGeneration: public DefNewGeneration {
bool _survivor_overflow;
bool avoid_promotion_undo() { return _avoid_promotion_undo; }
void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; }
bool survivor_overflow() { return _survivor_overflow; }
void set_survivor_overflow(bool v) { _survivor_overflow = v; }
@ -372,7 +366,6 @@ class ParNewGeneration: public DefNewGeneration {
// override
virtual bool refs_discovery_is_mt() const {
assert(UseParNewGC, "ParNewGeneration only when UseParNewGC");
return ParallelGCThreads > 1;
}
@ -386,20 +379,7 @@ class ParNewGeneration: public DefNewGeneration {
// "obj" is the object to be copied, "m" is a recent value of its mark
// that must not contain a forwarding pointer (though one might be
// inserted in "obj"s mark word by a parallel thread).
inline oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
oop obj, size_t obj_sz, markOop m) {
if (_avoid_promotion_undo) {
return copy_to_survivor_space_avoiding_promotion_undo(par_scan_state,
obj, obj_sz, m);
}
return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m);
}
oop copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state,
oop obj, size_t obj_sz, markOop m);
oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state,
oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
oop obj, size_t obj_sz, markOop m);
// in support of testing overflow code

View File

@ -28,6 +28,7 @@
#include "gc_implementation/parNew/parNewGeneration.hpp"
#include "gc_implementation/parNew/parOopClosures.hpp"
#include "memory/cardTableRS.hpp"
#include "memory/genCollectedHeap.hpp"
template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
assert (!oopDesc::is_null(*p), "null weak reference?");

View File

@ -429,7 +429,7 @@ void GCTaskManager::initialize() {
}
tty->cr();
}
FREE_C_HEAP_ARRAY(uint, processor_assignment, mtGC);
FREE_C_HEAP_ARRAY(uint, processor_assignment);
}
reset_busy_workers();
set_unblocked();
@ -458,11 +458,11 @@ GCTaskManager::~GCTaskManager() {
GCTaskThread::destroy(thread(i));
set_thread(i, NULL);
}
FREE_C_HEAP_ARRAY(GCTaskThread*, _thread, mtGC);
FREE_C_HEAP_ARRAY(GCTaskThread*, _thread);
_thread = NULL;
}
if (_resource_flag != NULL) {
FREE_C_HEAP_ARRAY(bool, _resource_flag, mtGC);
FREE_C_HEAP_ARRAY(bool, _resource_flag);
_resource_flag = NULL;
}
if (queue() != NULL) {

View File

@ -58,7 +58,7 @@ GCTaskThread::GCTaskThread(GCTaskManager* manager,
GCTaskThread::~GCTaskThread() {
if (_time_stamps != NULL) {
FREE_C_HEAP_ARRAY(GCTaskTimeStamp, _time_stamps, mtGC);
FREE_C_HEAP_ARRAY(GCTaskTimeStamp, _time_stamps);
}
}

View File

@ -168,7 +168,6 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
{
HandleMark hm;
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());
TraceCollectorStats tcs(counters());

View File

@ -2055,7 +2055,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
gc_task_manager()->task_idle_workers();
heap->set_par_threads(gc_task_manager()->active_workers());
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
TraceCollectorStats tcs(counters());

View File

@ -330,7 +330,6 @@ bool PSScavenge::invoke_no_policy() {
ResourceMark rm;
HandleMark hm;
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
TraceCollectorStats tcs(counters());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,7 +53,7 @@ class CSpaceCounters: public CHeapObj<mtGC> {
ContiguousSpace* s, GenerationCounters* gc);
~CSpaceCounters() {
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtInternal);
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
}
virtual inline void update_capacity() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,7 +50,7 @@ class CollectorCounters: public CHeapObj<mtGC> {
CollectorCounters(const char* name, int ordinal);
~CollectorCounters() {
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC);
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
}
inline PerfCounter* invocation_counter() const { return _invocations; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ class GSpaceCounters: public CHeapObj<mtGC> {
GenerationCounters* gc, bool sampled=true);
~GSpaceCounters() {
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC);
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
}
inline void update_capacity() {

View File

@ -49,10 +49,8 @@ GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* t
}
if (_doit) {
if (PrintGCTimeStamps) {
gclog_or_tty->stamp();
gclog_or_tty->print(": ");
}
gclog_or_tty->date_stamp(PrintGCDateStamps);
gclog_or_tty->stamp(PrintGCTimeStamps);
if (PrintGCID) {
gclog_or_tty->print("#%u: ", gc_id.id());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -69,7 +69,7 @@ private:
size_t min_capacity, size_t max_capacity, VirtualSpace* v);
~GenerationCounters() {
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC);
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
}
virtual void update_all();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,7 +54,7 @@ class HSpaceCounters: public CHeapObj<mtGC> {
size_t initial_capacity, GenerationCounters* gc);
~HSpaceCounters() {
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC);
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
}
inline void update_capacity(size_t v) {

View File

@ -276,7 +276,7 @@ bool MutableNUMASpace::update_layout(bool force) {
}
}
FREE_C_HEAP_ARRAY(int, lgrp_ids, mtGC);
FREE_C_HEAP_ARRAY(int, lgrp_ids);
if (changed) {
for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {

View File

@ -142,216 +142,3 @@ void ParGCAllocBuffer::print() {
"FT"[_retained], _retained_filler.start(), _retained_filler.end());
}
#endif // !PRODUCT
const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords =
MIN2(CardTableModRefBS::par_chunk_heapword_alignment(),
((size_t)Generation::GenGrain)/HeapWordSize);
const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes =
MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,
(size_t)Generation::GenGrain);
ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
BlockOffsetSharedArray* bsa) :
ParGCAllocBuffer(word_sz),
_bsa(bsa),
_bt(bsa, MemRegion(_bottom, _hard_end)),
_true_end(_hard_end)
{}
// The buffer comes with its own BOT, with a shared (obviously) underlying
// BlockOffsetSharedArray. We manipulate this BOT in the normal way
// as we would for any contiguous space. However, on occasion we
// need to do some buffer surgery at the extremities before we
// start using the body of the buffer for allocations. Such surgery
// (as explained elsewhere) is to prevent allocation on a card that
// is in the process of being walked concurrently by another GC thread.
// When such surgery happens at a point that is far removed (to the
// right of the current allocation point, top), we use the "contig"
// parameter below to directly manipulate the shared array without
// modifying the _next_threshold state in the BOT.
void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
bool contig) {
CollectedHeap::fill_with_object(mr);
if (contig) {
_bt.alloc_block(mr.start(), mr.end());
} else {
_bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
}
}
HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
HeapWord* res = NULL;
if (_true_end > _hard_end) {
assert((HeapWord*)align_size_down(intptr_t(_hard_end),
ChunkSizeInBytes) == _hard_end,
"or else _true_end should be equal to _hard_end");
assert(_retained, "or else _true_end should be equal to _hard_end");
assert(_retained_filler.end() <= _top, "INVARIANT");
CollectedHeap::fill_with_object(_retained_filler);
if (_top < _hard_end) {
fill_region_with_block(MemRegion(_top, _hard_end), true);
}
HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
_retained_filler = MemRegion(_hard_end, FillerHeaderSize);
_bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
_top = _retained_filler.end();
_hard_end = next_hard_end;
_end = _hard_end - AlignmentReserve;
res = ParGCAllocBuffer::allocate(word_sz);
if (res != NULL) {
_bt.alloc_block(res, word_sz);
}
}
return res;
}
void
ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
ParGCAllocBuffer::undo_allocation(obj, word_sz);
// This may back us up beyond the previous threshold, so reset.
_bt.set_region(MemRegion(_top, _hard_end));
_bt.initialize_threshold();
}
void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
assert(!retain || end_of_gc, "Can only retain at GC end.");
if (_retained) {
// We're about to make the retained_filler into a block.
_bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
_retained_filler.end());
}
// Reset _hard_end to _true_end (and update _end)
if (retain && _hard_end != NULL) {
assert(_hard_end <= _true_end, "Invariant.");
_hard_end = _true_end;
_end = MAX2(_top, _hard_end - AlignmentReserve);
assert(_end <= _hard_end, "Invariant.");
}
_true_end = _hard_end;
HeapWord* pre_top = _top;
ParGCAllocBuffer::retire(end_of_gc, retain);
// Now any old _retained_filler is cut back to size, the free part is
// filled with a filler object, and top is past the header of that
// object.
if (retain && _top < _end) {
assert(end_of_gc && retain, "Or else retain should be false.");
// If the lab does not start on a card boundary, we don't want to
// allocate onto that card, since that might lead to concurrent
// allocation and card scanning, which we don't support. So we fill
// the first card with a garbage object.
size_t first_card_index = _bsa->index_for(pre_top);
HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
if (first_card_start < pre_top) {
HeapWord* second_card_start =
_bsa->inc_by_region_size(first_card_start);
// Ensure enough room to fill with the smallest block
second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
// If the end is already in the first card, don't go beyond it!
// Or if the remainder is too small for a filler object, gobble it up.
if (_hard_end < second_card_start ||
pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
second_card_start = _hard_end;
}
if (pre_top < second_card_start) {
MemRegion first_card_suffix(pre_top, second_card_start);
fill_region_with_block(first_card_suffix, true);
}
pre_top = second_card_start;
_top = pre_top;
_end = MAX2(_top, _hard_end - AlignmentReserve);
}
// If the lab does not end on a card boundary, we don't want to
// allocate onto that card, since that might lead to concurrent
// allocation and card scanning, which we don't support. So we fill
// the last card with a garbage object.
size_t last_card_index = _bsa->index_for(_hard_end);
HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
if (last_card_start < _hard_end) {
// Ensure enough room to fill with the smallest block
last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);
// If the top is already in the last card, don't go back beyond it!
// Or if the remainder is too small for a filler object, gobble it up.
if (_top > last_card_start ||
pointer_delta(last_card_start, _top) < AlignmentReserve) {
last_card_start = _top;
}
if (last_card_start < _hard_end) {
MemRegion last_card_prefix(last_card_start, _hard_end);
fill_region_with_block(last_card_prefix, false);
}
_hard_end = last_card_start;
_end = MAX2(_top, _hard_end - AlignmentReserve);
_true_end = _hard_end;
assert(_end <= _hard_end, "Invariant.");
}
// At this point:
// 1) we had a filler object from the original top to hard_end.
// 2) We've filled in any partial cards at the front and back.
if (pre_top < _hard_end) {
// Now we can reset the _bt to do allocation in the given area.
MemRegion new_filler(pre_top, _hard_end);
fill_region_with_block(new_filler, false);
_top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
// If there's no space left, don't retain.
if (_top >= _end) {
_retained = false;
invalidate();
return;
}
_retained_filler = MemRegion(pre_top, _top);
_bt.set_region(MemRegion(_top, _hard_end));
_bt.initialize_threshold();
assert(_bt.threshold() > _top, "initialize_threshold failed!");
// There may be other reasons for queries into the middle of the
// filler object. When such queries are done in parallel with
// allocation, bad things can happen, if the query involves object
// iteration. So we ensure that such queries do not involve object
// iteration, by putting another filler object on the boundaries of
// such queries. One such is the object spanning a parallel card
// chunk boundary.
// "chunk_boundary" is the address of the first chunk boundary less
// than "hard_end".
HeapWord* chunk_boundary =
(HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
assert(chunk_boundary < _hard_end, "Or else above did not work.");
assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
"Consequence of last card handling above.");
if (_top <= chunk_boundary) {
assert(_true_end == _hard_end, "Invariant.");
while (_top <= chunk_boundary) {
assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
"Consequence of last card handling above.");
_bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
_hard_end = chunk_boundary;
chunk_boundary -= ChunkSizeInWords;
}
_end = _hard_end - AlignmentReserve;
assert(_top <= _end, "Invariant.");
// Now reset the initial filler chunk so it doesn't overlap with
// the one(s) inserted above.
MemRegion new_filler(pre_top, _hard_end);
fill_region_with_block(new_filler, false);
}
} else {
_retained = false;
invalidate();
}
} else {
assert(!end_of_gc ||
(!_retained && _true_end == _hard_end), "Checking.");
}
assert(_end <= _hard_end, "Invariant.");
assert(_top < _end || _top == _hard_end, "Invariant");
}

View File

@ -216,44 +216,4 @@ class PLABStats VALUE_OBJ_CLASS_SPEC {
}
};
class ParGCAllocBufferWithBOT: public ParGCAllocBuffer {
BlockOffsetArrayContigSpace _bt;
BlockOffsetSharedArray* _bsa;
HeapWord* _true_end; // end of the whole ParGCAllocBuffer
static const size_t ChunkSizeInWords;
static const size_t ChunkSizeInBytes;
HeapWord* allocate_slow(size_t word_sz);
void fill_region_with_block(MemRegion mr, bool contig);
public:
ParGCAllocBufferWithBOT(size_t word_sz, BlockOffsetSharedArray* bsa);
HeapWord* allocate(size_t word_sz) {
HeapWord* res = ParGCAllocBuffer::allocate(word_sz);
if (res != NULL) {
_bt.alloc_block(res, word_sz);
} else {
res = allocate_slow(word_sz);
}
return res;
}
void undo_allocation(HeapWord* obj, size_t word_sz);
virtual void set_buf(HeapWord* buf_start) {
ParGCAllocBuffer::set_buf(buf_start);
_true_end = _hard_end;
_bt.set_region(MemRegion(buf_start, word_sz()));
_bt.initialize_threshold();
}
virtual void retire(bool end_of_gc, bool retain);
MemRegion range() {
return MemRegion(_top, _true_end);
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,7 +56,7 @@ class SpaceCounters: public CHeapObj<mtGC> {
MutableSpace* m, GenerationCounters* gc);
~SpaceCounters() {
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC);
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
}
inline void update_capacity() {

View File

@ -54,6 +54,9 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _wb_young_gc:
return "WhiteBox Initiated Young GC";
case _wb_conc_mark:
return "WhiteBox Initiated Concurrent Mark";
case _update_allocation_context_stats_inc:
case _update_allocation_context_stats_full:
return "Update Allocation Context Stats";

View File

@ -47,6 +47,7 @@ class GCCause : public AllStatic {
_heap_inspection,
_heap_dump,
_wb_young_gc,
_wb_conc_mark,
_update_allocation_context_stats_inc,
_update_allocation_context_stats_full,

View File

@ -334,7 +334,7 @@ void OopMapCacheEntry::deallocate_bit_mask() {
if (mask_size() > small_mask_limit && _bit_mask[0] != 0) {
assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]),
"This bit mask should not be in the resource area");
FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0], mtClass);
FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);
debug_only(_bit_mask[0] = 0;)
}
}
@ -492,7 +492,7 @@ OopMapCache::~OopMapCache() {
flush();
// Deallocate array
NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array, mtClass);
FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array);
}
OopMapCacheEntry* OopMapCache::entry_at(int i) const {
@ -603,5 +603,5 @@ void OopMapCache::compute_one_oop_map(methodHandle method, int bci, InterpreterO
tmp->initialize();
tmp->fill(method, bci);
entry->resource_copy(tmp);
FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp, mtInternal);
FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp);
}

View File

@ -296,7 +296,7 @@ class ChunkPool: public CHeapObj<mtInternal> {
// to avoid deadlock with NMT
while(cur != NULL) {
next = cur->next();
os::free(cur, mtChunk);
os::free(cur);
cur = next;
}
}
@ -384,7 +384,7 @@ void Chunk::operator delete(void* p) {
case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break;
default: os::free(c, mtChunk);
default: os::free(c);
}
}

View File

@ -101,7 +101,7 @@ typedef AllocFailStrategy::AllocFailEnum AllocFailType;
// NEW_RESOURCE_OBJ(type)
// NEW_C_HEAP_ARRAY(type, size)
// NEW_C_HEAP_OBJ(type, memflags)
// FREE_C_HEAP_ARRAY(type, old, memflags)
// FREE_C_HEAP_ARRAY(type, old)
// FREE_C_HEAP_OBJ(objname, type, memflags)
// char* AllocateHeap(size_t size, const char* name);
// void FreeHeap(void* p);
@ -669,8 +669,8 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\
(type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
#define FREE_C_HEAP_ARRAY(type, old, memflags) \
FreeHeap((char*)(old), memflags)
#define FREE_C_HEAP_ARRAY(type, old) \
FreeHeap((char*)(old))
// allocate type in heap without calling ctor
#define NEW_C_HEAP_OBJ(type, memflags)\
@ -680,8 +680,8 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags)
// deallocate obj of type in heap without calling dtor
#define FREE_C_HEAP_OBJ(objname, memflags)\
FreeHeap((char*)objname, memflags);
#define FREE_C_HEAP_OBJ(objname)\
FreeHeap((char*)objname);
// for statistics
#ifndef PRODUCT

View File

@ -79,11 +79,11 @@ inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
return p;
}
inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) {
inline void FreeHeap(void* p) {
#ifdef ASSERT
if (PrintMallocFree) trace_heap_free(p);
#endif
os::free(p, memflags);
os::free(p);
}
@ -136,11 +136,11 @@ template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
}
template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
FreeHeap(p, F);
FreeHeap(p);
}
template <MEMFLAGS F> void CHeapObj<F>::operator delete [](void* p){
FreeHeap(p, F);
FreeHeap(p);
}
template <class E, MEMFLAGS F>
@ -199,7 +199,7 @@ template<class E, MEMFLAGS F>
void ArrayAllocator<E, F>::free() {
if (_addr != NULL) {
if (_use_malloc) {
FreeHeap(_addr, F);
FreeHeap(_addr);
} else {
os::release_memory(_addr, _size);
}

View File

@ -251,12 +251,6 @@ public:
// Return the address indicating the start of the region corresponding to
// "index" in "_offset_array".
HeapWord* address_for_index(size_t index) const;
// Return the address "p" incremented by the size of
// a region. This method does not align the address
// returned to the start of a region. It is a simple
// primitive.
HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; }
};
//////////////////////////////////////////////////////////////////////////

View File

@ -171,19 +171,19 @@ CardTableModRefBS::~CardTableModRefBS() {
_committed = NULL;
}
if (_lowest_non_clean) {
FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean, mtGC);
FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean);
_lowest_non_clean = NULL;
}
if (_lowest_non_clean_chunk_size) {
FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size, mtGC);
FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size);
_lowest_non_clean_chunk_size = NULL;
}
if (_lowest_non_clean_base_chunk_index) {
FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index, mtGC);
FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index);
_lowest_non_clean_base_chunk_index = NULL;
}
if (_last_LNC_resizing_collection) {
FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection, mtGC);
FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection);
_last_LNC_resizing_collection = NULL;
}
}
@ -462,19 +462,6 @@ void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
// equal to active_workers. When a different mechanism for shutting
// off parallelism is used, then active_workers can be used in
// place of n_par_threads.
// This is an example of a path where n_par_threads is
// set to 0 to turn off parallelism.
// [7] CardTableModRefBS::non_clean_card_iterate()
// [8] CardTableRS::younger_refs_in_space_iterate()
// [9] Generation::younger_refs_in_space_iterate()
// [10] OneContigSpaceCardGeneration::younger_refs_iterate()
// [11] CompactingPermGenGen::younger_refs_iterate()
// [12] CardTableRS::younger_refs_iterate()
// [13] SharedHeap::process_strong_roots()
// [14] G1CollectedHeap::verify()
// [15] Universe::verify()
// [16] G1CollectedHeap::do_collection_pause_at_safepoint()
//
int n_threads = SharedHeap::heap()->n_par_threads();
bool is_par = n_threads > 0;
if (is_par) {

View File

@ -466,11 +466,6 @@ public:
void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
static size_t par_chunk_heapword_alignment() {
return ParGCCardsPerStrideChunk * card_size_in_words;
}
};
class CardTableRS;

View File

@ -70,7 +70,7 @@ CardTableRS::~CardTableRS() {
_ct_bs = NULL;
}
if (_last_cur_val_in_gen) {
FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen, mtInternal);
FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen);
}
}
@ -283,14 +283,14 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp,
// Convert the assertion check to a warning if we are running
// CMS+ParNew until related bug is fixed.
MemRegion ur = sp->used_region();
assert(ur.contains(urasm) || (UseConcMarkSweepGC && UseParNewGC),
assert(ur.contains(urasm) || (UseConcMarkSweepGC),
err_msg("Did you forget to call save_marks()? "
"[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
"[" PTR_FORMAT ", " PTR_FORMAT ")",
p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())));
// In the case of CMS+ParNew, issue a warning
if (!ur.contains(urasm)) {
assert(UseConcMarkSweepGC && UseParNewGC, "Tautology: see assert above");
assert(UseConcMarkSweepGC, "Tautology: see assert above");
warning("CMS+ParNew: Did you forget to call save_marks()? "
"[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
"[" PTR_FORMAT ", " PTR_FORMAT ")",
@ -609,21 +609,3 @@ void CardTableRS::verify() {
_ct_bs->verify();
}
}
void CardTableRS::verify_aligned_region_empty(MemRegion mr) {
if (!mr.is_empty()) {
jbyte* cur_entry = byte_for(mr.start());
jbyte* limit = byte_after(mr.last());
// The region mr may not start on a card boundary so
// the first card may reflect a write to the space
// just prior to mr.
if (!is_aligned(mr.start())) {
cur_entry++;
}
for (;cur_entry < limit; cur_entry++) {
guarantee(*cur_entry == CardTableModRefBS::clean_card,
"Unexpected dirty card found");
}
}
}

View File

@ -138,7 +138,6 @@ public:
}
void verify();
void verify_aligned_region_empty(MemRegion mr);
void clear(MemRegion mr) { _ct_bs->clear(mr); }
void clear_into_younger(Generation* old_gen);

View File

@ -908,31 +908,14 @@ void MarkSweepPolicy::initialize_alignments() {
}
void MarkSweepPolicy::initialize_generations() {
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC,
AllocFailStrategy::RETURN_NULL);
if (_generations == NULL) {
vm_exit_during_initialization("Unable to allocate gen spec");
}
if (UseParNewGC) {
_generations[0] = new GenerationSpec(Generation::ParNew, _initial_young_size, _max_young_size);
} else {
_generations[0] = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size);
}
_generations = NEW_C_HEAP_ARRAY(GenerationSpecPtr, number_of_generations(), mtGC);
_generations[0] = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size);
_generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_old_size, _max_old_size);
if (_generations[0] == NULL || _generations[1] == NULL) {
vm_exit_during_initialization("Unable to allocate gen spec");
}
}
void MarkSweepPolicy::initialize_gc_policy_counters() {
// Initialize the policy counters - 2 collectors, 3 generations.
if (UseParNewGC) {
_gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
} else {
_gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
}
_gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
}
/////////////// Unit tests ///////////////

View File

@ -29,7 +29,6 @@
#include "gc_implementation/shared/cSpaceCounters.hpp"
#include "gc_implementation/shared/generationCounters.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp"
#include "memory/generation.inline.hpp"
#include "utilities/stack.hpp"
class ContiguousSpace;
@ -340,9 +339,6 @@ protected:
virtual const char* name() const;
virtual const char* short_name() const { return "DefNew"; }
bool must_be_youngest() const { return true; }
bool must_be_oldest() const { return false; }
// PrintHeapAtGC support.
void print_on(outputStream* st) const;

View File

@ -331,7 +331,7 @@ bool FileMapInfo::init_from_file(int fd) {
n = os::read(fd, _paths_misc_info, (unsigned int)info_size);
if (n != info_size) {
fail_continue("Unable to read the shared path info header.");
FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass);
FREE_C_HEAP_ARRAY(char, _paths_misc_info);
_paths_misc_info = NULL;
return false;
}
@ -714,7 +714,7 @@ bool FileMapInfo::validate_header() {
}
if (_paths_misc_info != NULL) {
FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass);
FREE_C_HEAP_ARRAY(char, _paths_misc_info);
_paths_misc_info = NULL;
}
return status;

View File

@ -36,7 +36,6 @@
#include "memory/gcLocker.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/generation.inline.hpp"
#include "memory/generationSpec.hpp"
#include "memory/resourceArea.hpp"
#include "memory/sharedHeap.hpp"
@ -182,10 +181,10 @@ void GenCollectedHeap::post_initialize() {
SharedHeap::post_initialize();
GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
guarantee(policy->is_generation_policy(), "Illegal policy type");
DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
assert(def_new_gen->kind() == Generation::DefNew ||
def_new_gen->kind() == Generation::ParNew,
"Wrong generation kind");
assert((get_gen(0)->kind() == Generation::DefNew) ||
(get_gen(0)->kind() == Generation::ParNew),
"Wrong youngest generation type");
DefNewGeneration* def_new_gen = (DefNewGeneration*)get_gen(0);
Generation* old_gen = get_gen(1);
assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
@ -363,7 +362,6 @@ void GenCollectedHeap::do_collection(bool full,
bool complete = full && (max_level == (n_gens()-1));
const char* gc_cause_prefix = complete ? "Full GC" : "GC";
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
// so we can assume here that the next GC id is what we want.
@ -1118,10 +1116,8 @@ void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
#if INCLUDE_ALL_GCS
if (UseParNewGC) {
workers()->print_worker_threads_on(st);
}
if (UseConcMarkSweepGC) {
workers()->print_worker_threads_on(st);
ConcurrentMarkSweepThread::print_all_on(st);
}
#endif // INCLUDE_ALL_GCS

View File

@ -262,12 +262,12 @@ public:
// We don't need barriers for stores to objects in the
// young gen and, a fortiori, for initializing stores to
// objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS}
// objects therein. This applies to DefNew+Tenured and ParNew+CMS
// only and may need to be re-examined in case other
// kinds of collectors are implemented in the future.
virtual bool can_elide_initializing_store_barrier(oop new_obj) {
// We wanted to assert that:-
// assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC,
// assert(UseSerialGC || UseConcMarkSweepGC,
// "Check can_elide_initializing_store_barrier() for this collector");
// but unfortunately the flag UseSerialGC need not necessarily always
// be set when DefNew+Tenured are being used.

View File

@ -37,7 +37,6 @@
#include "memory/genCollectedHeap.hpp"
#include "memory/genMarkSweep.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/generation.inline.hpp"
#include "memory/modRefBarrierSet.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/space.hpp"

View File

@ -105,17 +105,6 @@ public:
virtual void verify() = 0;
// Verify that the remembered set has no entries for
// the heap interval denoted by mr. If there are any
// alignment constraints on the remembered set, only the
// part of the region that is aligned is checked.
//
// alignment boundaries
// +--------+-------+--------+-------+
// [ region mr )
// [ part checked )
virtual void verify_aligned_region_empty(MemRegion mr) = 0;
// If appropriate, print some information about the remset on "tty".
virtual void print() {}

View File

@ -36,7 +36,6 @@
#include "memory/genOopClosures.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/generation.hpp"
#include "memory/generation.inline.hpp"
#include "memory/space.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
@ -152,13 +151,6 @@ bool Generation::is_in(const void* p) const {
return blk.sp != NULL;
}
DefNewGeneration* Generation::as_DefNewGeneration() {
assert((kind() == Generation::DefNew) ||
(kind() == Generation::ParNew),
"Wrong youngest generation type");
return (DefNewGeneration*) this;
}
Generation* Generation::next_gen() const {
GenCollectedHeap* gch = GenCollectedHeap::heap();
int next = level() + 1;
@ -220,12 +212,6 @@ oop Generation::par_promote(int thread_num,
return NULL;
}
void Generation::par_promote_alloc_undo(int thread_num,
HeapWord* obj, size_t word_sz) {
// Could do a bad general impl here that gets a lock. But no.
guarantee(false, "No good general implementation.");
}
Space* Generation::space_containing(const void* p) const {
GenerationIsInReservedClosure blk(p);
// Cast away const
@ -616,252 +602,3 @@ void CardGeneration::compute_new_size() {
// Currently nothing to do.
void CardGeneration::prepare_for_verify() {}
void OneContigSpaceCardGeneration::collect(bool full,
bool clear_all_soft_refs,
size_t size,
bool is_tlab) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
SpecializationStats::clear();
// Temporarily expand the span of our ref processor, so
// refs discovery is over the entire heap, not just this generation
ReferenceProcessorSpanMutator
x(ref_processor(), gch->reserved_region());
STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
gc_timer->register_gc_start();
SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
gc_timer->register_gc_end();
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
SpecializationStats::print();
}
HeapWord*
OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
bool is_tlab,
bool parallel) {
assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
if (parallel) {
MutexLocker x(ParGCRareEvent_lock);
HeapWord* result = NULL;
size_t byte_size = word_size * HeapWordSize;
while (true) {
expand(byte_size, _min_heap_delta_bytes);
if (GCExpandToAllocateDelayMillis > 0) {
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
}
result = _the_space->par_allocate(word_size);
if ( result != NULL) {
return result;
} else {
// If there's not enough expansion space available, give up.
if (_virtual_space.uncommitted_size() < byte_size) {
return NULL;
}
// else try again
}
}
} else {
expand(word_size*HeapWordSize, _min_heap_delta_bytes);
return _the_space->allocate(word_size);
}
}
bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
GCMutexLocker x(ExpandHeap_lock);
return CardGeneration::expand(bytes, expand_bytes);
}
void OneContigSpaceCardGeneration::shrink(size_t bytes) {
assert_locked_or_safepoint(ExpandHeap_lock);
size_t size = ReservedSpace::page_align_size_down(bytes);
if (size > 0) {
shrink_by(size);
}
}
size_t OneContigSpaceCardGeneration::capacity() const {
return _the_space->capacity();
}
size_t OneContigSpaceCardGeneration::used() const {
return _the_space->used();
}
size_t OneContigSpaceCardGeneration::free() const {
return _the_space->free();
}
MemRegion OneContigSpaceCardGeneration::used_region() const {
return the_space()->used_region();
}
size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const {
return _the_space->free();
}
size_t OneContigSpaceCardGeneration::contiguous_available() const {
return _the_space->free() + _virtual_space.uncommitted_size();
}
bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
assert_locked_or_safepoint(ExpandHeap_lock);
bool result = _virtual_space.expand_by(bytes);
if (result) {
size_t new_word_size =
heap_word_size(_virtual_space.committed_size());
MemRegion mr(_the_space->bottom(), new_word_size);
// Expand card table
Universe::heap()->barrier_set()->resize_covered_region(mr);
// Expand shared block offset array
_bts->resize(new_word_size);
// Fix for bug #4668531
if (ZapUnusedHeapArea) {
MemRegion mangle_region(_the_space->end(),
(HeapWord*)_virtual_space.high());
SpaceMangler::mangle_region(mangle_region);
}
// Expand space -- also expands space's BOT
// (which uses (part of) shared array above)
_the_space->set_end((HeapWord*)_virtual_space.high());
// update the space and generation capacity counters
update_counters();
if (Verbose && PrintGC) {
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size - bytes;
gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, bytes/K, new_mem_size/K);
}
}
return result;
}
bool OneContigSpaceCardGeneration::grow_to_reserved() {
assert_locked_or_safepoint(ExpandHeap_lock);
bool success = true;
const size_t remaining_bytes = _virtual_space.uncommitted_size();
if (remaining_bytes > 0) {
success = grow_by(remaining_bytes);
DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
}
return success;
}
void OneContigSpaceCardGeneration::shrink_by(size_t bytes) {
assert_locked_or_safepoint(ExpandHeap_lock);
// Shrink committed space
_virtual_space.shrink_by(bytes);
// Shrink space; this also shrinks the space's BOT
_the_space->set_end((HeapWord*) _virtual_space.high());
size_t new_word_size = heap_word_size(_the_space->capacity());
// Shrink the shared block offset array
_bts->resize(new_word_size);
MemRegion mr(_the_space->bottom(), new_word_size);
// Shrink the card table
Universe::heap()->barrier_set()->resize_covered_region(mr);
if (Verbose && PrintGC) {
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size + bytes;
gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, new_mem_size/K);
}
}
// Currently nothing to do.
void OneContigSpaceCardGeneration::prepare_for_verify() {}
// Override for a card-table generation with one contiguous
// space. NOTE: For reasons that are lost in the fog of history,
// this code is used when you iterate over perm gen objects,
// even when one uses CDS, where the perm gen has a couple of
// other spaces; this is because CompactingPermGenGen derives
// from OneContigSpaceCardGeneration. This should be cleaned up,
// see CR 6897789..
void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
_the_space->object_iterate(blk);
}
void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
bool usedOnly) {
blk->do_space(_the_space);
}
void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
blk->set_generation(this);
younger_refs_in_space_iterate(_the_space, blk);
blk->reset_generation();
}
void OneContigSpaceCardGeneration::save_marks() {
_the_space->set_saved_mark();
}
void OneContigSpaceCardGeneration::reset_saved_marks() {
_the_space->reset_saved_mark();
}
bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() {
return _the_space->saved_mark_at_top();
}
#define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
\
void OneContigSpaceCardGeneration:: \
oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
blk->set_generation(this); \
_the_space->oop_since_save_marks_iterate##nv_suffix(blk); \
blk->reset_generation(); \
save_marks(); \
}
ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
#undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN
void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
_last_gc = WaterMark(the_space(), the_space()->top());
// update the generation and space performance counters
update_counters();
if (ZapUnusedHeapArea) {
the_space()->check_mangled_unused_area_complete();
}
}
void OneContigSpaceCardGeneration::record_spaces_top() {
assert(ZapUnusedHeapArea, "Not mangling unused space");
the_space()->set_top_for_allocations();
}
void OneContigSpaceCardGeneration::verify() {
the_space()->verify();
}
void OneContigSpaceCardGeneration::print_on(outputStream* st) const {
Generation::print_on(st);
st->print(" the");
the_space()->print_on(st);
}

Some files were not shown because too many files have changed in this diff Show More