Merge
This commit is contained in:
commit
cacadff395
@ -335,3 +335,4 @@ fd4f4f7561074dc0dbc1772c8489c7b902b6b8a9 jdk9-b87
|
||||
cf1dc4c035fb84693d4ae5ad818785cb4d1465d1 jdk9-b90
|
||||
122142a185381ce5cea959bf13b923d8cc333628 jdk9-b91
|
||||
106c06398f7ab330eef9e335fbd3a5a8ead23b77 jdk9-b92
|
||||
331fda57dfd323c61804ba0472776790de572937 jdk9-b93
|
||||
|
@ -335,3 +335,4 @@ c847a53b38d2fffb87afc483c74db05eced9b4f4 jdk9-b89
|
||||
29cc8228d62319af21cad7c90817671e0813b6bd jdk9-b90
|
||||
75843e0a9371d445a3c9b440bab85e50b5dc287c jdk9-b91
|
||||
f7d70caad89ad0c43bb057bca0aad6f17ce05a6a jdk9-b92
|
||||
27e9c8d8091e2447ea7ef3e3103e9b7dd286e03a jdk9-b93
|
||||
|
@ -495,3 +495,4 @@ bc48b669bc6610fac97e16593050c0f559cf6945 jdk9-b88
|
||||
7fe46dc64bb3a8df554b24cde0153ffb24f39c5e jdk9-b90
|
||||
3fd5c2ca4c20c183628b6dbeb8df821a961419e3 jdk9-b91
|
||||
53cb98d68a1aeb08d29c89d6da748de60c448e37 jdk9-b92
|
||||
d8b24776484cc4dfd19f50b23eaa18a80a161371 jdk9-b93
|
||||
|
@ -38,6 +38,7 @@ int pathmap_open(const char* name) {
|
||||
int fd;
|
||||
char alt_path[PATH_MAX + 1], *alt_path_end;
|
||||
const char *s;
|
||||
int free_space;
|
||||
|
||||
if (!alt_root_initialized) {
|
||||
alt_root_initialized = -1;
|
||||
@ -48,14 +49,22 @@ int pathmap_open(const char* name) {
|
||||
return open(name, O_RDONLY);
|
||||
}
|
||||
|
||||
strcpy(alt_path, alt_root);
|
||||
alt_path_end = alt_path + strlen(alt_path);
|
||||
|
||||
// Strip path items one by one and try to open file with alt_root prepended
|
||||
if (strlen(alt_root) + strlen(name) < PATH_MAX) {
|
||||
// Buffer too small.
|
||||
return -1;
|
||||
}
|
||||
|
||||
strncpy(alt_path, alt_root, PATH_MAX);
|
||||
alt_path[PATH_MAX] = '\0';
|
||||
alt_path_end = alt_path + strlen(alt_path);
|
||||
free_space = PATH_MAX + 1 - (alt_path_end-alt_path);
|
||||
|
||||
// Strip path items one by one and try to open file with alt_root prepended.
|
||||
s = name;
|
||||
while (1) {
|
||||
strcat(alt_path, s);
|
||||
s += 1;
|
||||
strncat(alt_path, s, free_space);
|
||||
s += 1; // Skip /.
|
||||
|
||||
fd = open(alt_path, O_RDONLY);
|
||||
if (fd >= 0) {
|
||||
@ -70,7 +79,8 @@ int pathmap_open(const char* name) {
|
||||
break;
|
||||
}
|
||||
|
||||
*alt_path_end = 0;
|
||||
// Cut off what we appended above.
|
||||
*alt_path_end = '\0';
|
||||
}
|
||||
|
||||
return -1;
|
||||
|
@ -774,72 +774,78 @@ err:
|
||||
|
||||
// process segments from interpreter (ld.so or ld-linux.so)
|
||||
static bool read_interp_segments(struct ps_prochandle* ph) {
|
||||
ELF_EHDR interp_ehdr;
|
||||
ELF_EHDR interp_ehdr;
|
||||
|
||||
if (read_elf_header(ph->core->interp_fd, &interp_ehdr) != true) {
|
||||
print_debug("interpreter is not a valid ELF file\n");
|
||||
return false;
|
||||
}
|
||||
if (read_elf_header(ph->core->interp_fd, &interp_ehdr) != true) {
|
||||
print_debug("interpreter is not a valid ELF file\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (read_lib_segments(ph, ph->core->interp_fd, &interp_ehdr, ph->core->ld_base_addr) != true) {
|
||||
print_debug("can't read segments of interpreter\n");
|
||||
return false;
|
||||
}
|
||||
if (read_lib_segments(ph, ph->core->interp_fd, &interp_ehdr, ph->core->ld_base_addr) != true) {
|
||||
print_debug("can't read segments of interpreter\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
// process segments of a a.out
|
||||
static bool read_exec_segments(struct ps_prochandle* ph, ELF_EHDR* exec_ehdr) {
|
||||
int i = 0;
|
||||
ELF_PHDR* phbuf = NULL;
|
||||
ELF_PHDR* exec_php = NULL;
|
||||
int i = 0;
|
||||
ELF_PHDR* phbuf = NULL;
|
||||
ELF_PHDR* exec_php = NULL;
|
||||
|
||||
if ((phbuf = read_program_header_table(ph->core->exec_fd, exec_ehdr)) == NULL)
|
||||
return false;
|
||||
if ((phbuf = read_program_header_table(ph->core->exec_fd, exec_ehdr)) == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (exec_php = phbuf, i = 0; i < exec_ehdr->e_phnum; i++) {
|
||||
switch (exec_php->p_type) {
|
||||
for (exec_php = phbuf, i = 0; i < exec_ehdr->e_phnum; i++) {
|
||||
switch (exec_php->p_type) {
|
||||
|
||||
// add mappings for PT_LOAD segments
|
||||
case PT_LOAD: {
|
||||
// add only non-writable segments of non-zero filesz
|
||||
if (!(exec_php->p_flags & PF_W) && exec_php->p_filesz != 0) {
|
||||
if (add_map_info(ph, ph->core->exec_fd, exec_php->p_offset, exec_php->p_vaddr, exec_php->p_filesz) == NULL) goto err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
// add mappings for PT_LOAD segments
|
||||
case PT_LOAD: {
|
||||
// add only non-writable segments of non-zero filesz
|
||||
if (!(exec_php->p_flags & PF_W) && exec_php->p_filesz != 0) {
|
||||
if (add_map_info(ph, ph->core->exec_fd, exec_php->p_offset, exec_php->p_vaddr, exec_php->p_filesz) == NULL) goto err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// read the interpreter and it's segments
|
||||
case PT_INTERP: {
|
||||
char interp_name[BUF_SIZE];
|
||||
// read the interpreter and it's segments
|
||||
case PT_INTERP: {
|
||||
char interp_name[BUF_SIZE + 1];
|
||||
|
||||
pread(ph->core->exec_fd, interp_name, MIN(exec_php->p_filesz, BUF_SIZE), exec_php->p_offset);
|
||||
print_debug("ELF interpreter %s\n", interp_name);
|
||||
// read interpreter segments as well
|
||||
if ((ph->core->interp_fd = pathmap_open(interp_name)) < 0) {
|
||||
print_debug("can't open runtime loader\n");
|
||||
goto err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
// BUF_SIZE is PATH_MAX + NAME_MAX + 1.
|
||||
if (exec_php->p_filesz > BUF_SIZE) {
|
||||
goto err;
|
||||
}
|
||||
pread(ph->core->exec_fd, interp_name, exec_php->p_filesz, exec_php->p_offset);
|
||||
interp_name[exec_php->p_filesz] = '\0';
|
||||
print_debug("ELF interpreter %s\n", interp_name);
|
||||
// read interpreter segments as well
|
||||
if ((ph->core->interp_fd = pathmap_open(interp_name)) < 0) {
|
||||
print_debug("can't open runtime loader\n");
|
||||
goto err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// from PT_DYNAMIC we want to read address of first link_map addr
|
||||
case PT_DYNAMIC: {
|
||||
ph->core->dynamic_addr = exec_php->p_vaddr;
|
||||
print_debug("address of _DYNAMIC is 0x%lx\n", ph->core->dynamic_addr);
|
||||
break;
|
||||
}
|
||||
// from PT_DYNAMIC we want to read address of first link_map addr
|
||||
case PT_DYNAMIC: {
|
||||
ph->core->dynamic_addr = exec_php->p_vaddr;
|
||||
print_debug("address of _DYNAMIC is 0x%lx\n", ph->core->dynamic_addr);
|
||||
break;
|
||||
}
|
||||
|
||||
} // switch
|
||||
exec_php++;
|
||||
} // for
|
||||
} // switch
|
||||
exec_php++;
|
||||
} // for
|
||||
|
||||
free(phbuf);
|
||||
return true;
|
||||
err:
|
||||
free(phbuf);
|
||||
return false;
|
||||
free(phbuf);
|
||||
return true;
|
||||
err:
|
||||
free(phbuf);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
@ -39,7 +39,6 @@ SUNWprivate_1.1 {
|
||||
jio_snprintf;
|
||||
jio_vfprintf;
|
||||
jio_vsnprintf;
|
||||
fork1;
|
||||
numa_warn;
|
||||
numa_error;
|
||||
|
||||
|
@ -34,7 +34,6 @@ SUNWprivate_1.1 {
|
||||
jio_snprintf;
|
||||
jio_vfprintf;
|
||||
jio_vsnprintf;
|
||||
fork1;
|
||||
numa_warn;
|
||||
numa_error;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -82,8 +82,7 @@ all: $(TraceGeneratedFiles)
|
||||
|
||||
GENERATE_CODE= \
|
||||
$(QUIETLY) echo $(LOG_INFO) Generating $@; \
|
||||
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
|
||||
test -f $@
|
||||
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@
|
||||
|
||||
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
|
||||
$(GENERATE_CODE)
|
||||
|
@ -34,7 +34,6 @@ SUNWprivate_1.1 {
|
||||
jio_snprintf;
|
||||
jio_vfprintf;
|
||||
jio_vsnprintf;
|
||||
fork1;
|
||||
numa_warn;
|
||||
numa_error;
|
||||
|
||||
|
@ -34,7 +34,6 @@ SUNWprivate_1.1 {
|
||||
jio_snprintf;
|
||||
jio_vfprintf;
|
||||
jio_vsnprintf;
|
||||
fork1;
|
||||
numa_warn;
|
||||
numa_error;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -83,8 +83,7 @@ all: $(TraceGeneratedFiles)
|
||||
|
||||
GENERATE_CODE= \
|
||||
$(QUIETLY) echo $(LOG_INFO) Generating $@; \
|
||||
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
|
||||
test -f $@
|
||||
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@
|
||||
|
||||
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
|
||||
$(GENERATE_CODE)
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -156,7 +156,7 @@ else
|
||||
export LD_LIBRARY_PATH
|
||||
fi
|
||||
|
||||
JPARMS="-XXaltjvm=$MYDIR -Dsun.java.launcher.is_altjvm=true $@ $JAVA_ARGS";
|
||||
JPARMS="-XXaltjvm=$MYDIR -Dsun.java.launcher.is_altjvm=true";
|
||||
|
||||
# Locate the java launcher
|
||||
LAUNCHER=$JDK/bin/java
|
||||
@ -181,8 +181,6 @@ init_gdb() {
|
||||
cd `pwd`
|
||||
handle SIGUSR1 nostop noprint
|
||||
handle SIGUSR2 nostop noprint
|
||||
set args $JPARMS
|
||||
file $LAUNCHER
|
||||
directory $GDBSRCDIR
|
||||
# Get us to a point where we can set breakpoints in libjvm.so
|
||||
set breakpoint pending on
|
||||
@ -194,11 +192,10 @@ delete 1
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
case "$MODE" in
|
||||
gdb)
|
||||
init_gdb
|
||||
$GDB -x $GDBSCR
|
||||
$GDB -x $GDBSCR --args $LAUNCHER $JPARMS "$@" $JAVA_ARGS
|
||||
rm -f $GDBSCR
|
||||
;;
|
||||
gud)
|
||||
@ -219,15 +216,15 @@ case "$MODE" in
|
||||
rm -f $GDBSCR
|
||||
;;
|
||||
dbx)
|
||||
$DBX -s $HOME/.dbxrc -c "loadobject -load libjvm.so; stop in JNI_CreateJavaVM; run $JPARMS; delete all" $LAUNCHER
|
||||
$DBX -s $HOME/.dbxrc -c "loadobject -load libjvm.so; stop in JNI_CreateJavaVM; run $JPARMS $@ $JAVA_ARGS; delete all" $LAUNCHER
|
||||
;;
|
||||
valgrind)
|
||||
echo Warning: Defaulting to 16Mb heap to make Valgrind run faster, use -Xmx for larger heap
|
||||
echo
|
||||
$VALGRIND --tool=memcheck --leak-check=yes --num-callers=50 $LAUNCHER -Xmx16m $JPARMS
|
||||
$VALGRIND --tool=memcheck --leak-check=yes --num-callers=50 $LAUNCHER -Xmx16m $JPARMS "$@" $JAVA_ARGS
|
||||
;;
|
||||
run)
|
||||
LD_PRELOAD=$PRELOADING exec $LAUNCHER $JPARMS
|
||||
LD_PRELOAD=$PRELOADING exec $LAUNCHER $JPARMS "$@" $JAVA_ARGS
|
||||
;;
|
||||
*)
|
||||
echo Error: Internal error, unknown launch mode \"$MODE\"
|
||||
|
@ -34,7 +34,6 @@ SUNWprivate_1.1 {
|
||||
jio_snprintf;
|
||||
jio_vfprintf;
|
||||
jio_vsnprintf;
|
||||
fork1;
|
||||
numa_warn;
|
||||
numa_error;
|
||||
|
||||
|
@ -34,7 +34,6 @@ SUNWprivate_1.1 {
|
||||
jio_snprintf;
|
||||
jio_vfprintf;
|
||||
jio_vsnprintf;
|
||||
fork1;
|
||||
numa_warn;
|
||||
numa_error;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -82,8 +82,7 @@ all: $(TraceGeneratedFiles)
|
||||
|
||||
GENERATE_CODE= \
|
||||
$(QUIETLY) echo $(LOG_INFO) Generating $@; \
|
||||
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
|
||||
test -f $@
|
||||
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@
|
||||
|
||||
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
|
||||
$(GENERATE_CODE)
|
||||
|
@ -550,19 +550,6 @@ else
|
||||
#LINK_INTO = LIBJVM
|
||||
endif
|
||||
|
||||
# Solaris platforms collect lots of redundant file-ident lines,
|
||||
# to the point of wasting a significant percentage of file space.
|
||||
# (The text is stored in ELF .comment sections, contributed by
|
||||
# all "#pragma ident" directives in header and source files.)
|
||||
# This command "compresses" the .comment sections simply by
|
||||
# removing repeated lines. The data can be extracted from
|
||||
# binaries in the field by using "mcs -p libjvm.so" or the older
|
||||
# command "what libjvm.so".
|
||||
LINK_LIB.CXX/POST_HOOK += $(MCS) -c $@ || exit 1;
|
||||
# (The exit 1 is necessary to cause a build failure if the command fails and
|
||||
# multiple commands are strung together, and the final semicolon is necessary
|
||||
# since the hook must terminate itself as a valid command.)
|
||||
|
||||
# Also, strip debug and line number information (worth about 1.7Mb).
|
||||
# If we can create .debuginfo files, then the VM is stripped in vm.make
|
||||
# and this macro is not used.
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -78,8 +78,7 @@ all: $(TraceGeneratedFiles)
|
||||
|
||||
GENERATE_CODE= \
|
||||
$(QUIETLY) echo $(LOG_INFO) Generating $@; \
|
||||
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
|
||||
test -f $@
|
||||
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@
|
||||
|
||||
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
|
||||
$(GENERATE_CODE)
|
||||
|
@ -45,6 +45,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/jni/ToStringInInterfaceTest \
|
||||
$(HOTSPOT_TOPDIR)/test/runtime/SameObject \
|
||||
#
|
||||
|
||||
BUILD_HOTSPOT_JTREG_OUTPUT_DIR := $(BUILD_OUTPUT)/support/test/hotspot/jtreg/native
|
||||
|
@ -19,7 +19,7 @@
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
# Generic compiler settings
|
||||
@ -54,7 +54,11 @@ CXX=cl.exe
|
||||
# improving the quality of crash log stack traces involving jvm.dll.
|
||||
|
||||
# These are always used in all compiles
|
||||
CXX_FLAGS=$(EXTRA_CFLAGS) /nologo /W3 /WX
|
||||
CXX_FLAGS=$(EXTRA_CFLAGS) /nologo /W3
|
||||
|
||||
!if "$(WARNINGS_AS_ERRORS)" != "false"
|
||||
CXX_FLAGS=$(CXX_FLAGS) /WX
|
||||
!endif
|
||||
|
||||
# Let's add debug information when Full Debug Symbols is enabled
|
||||
!if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
|
||||
@ -167,7 +171,7 @@ LD_FLAGS= $(LD_FLAGS) /map /debug
|
||||
!endif
|
||||
|
||||
|
||||
!if $(MSC_VER) >= 1600
|
||||
!if $(MSC_VER) >= 1600
|
||||
LD_FLAGS= $(LD_FLAGS) psapi.lib
|
||||
!endif
|
||||
|
||||
@ -191,4 +195,3 @@ RC_FLAGS=/D "HS_VER=$(HS_VER)" \
|
||||
!if "$(MFC_DEBUG)" == "true"
|
||||
RC_FLAGS = $(RC_FLAGS) /D "_DEBUG"
|
||||
!endif
|
||||
|
||||
|
@ -31,6 +31,8 @@
|
||||
SLASH_JAVA ?= J:
|
||||
PATH_SEP = ;
|
||||
|
||||
MAKE_ARGS += WARNINGS_AS_ERRORS=$(WARNINGS_AS_ERRORS)
|
||||
|
||||
# Need PLATFORM (os-arch combo names) for jdk and hotspot, plus libarch name
|
||||
ifeq ($(ARCH_DATA_MODEL),32)
|
||||
ARCH_DATA_MODEL=32
|
||||
|
@ -5572,7 +5572,6 @@ instruct loadN2P_unscaled(iRegPdst dst, memory mem) %{
|
||||
|
||||
instruct loadN2P_klass_unscaled(iRegPdst dst, memory mem) %{
|
||||
match(Set dst (DecodeNKlass (LoadNKlass mem)));
|
||||
// SAPJVM GL 2014-05-21 Differs.
|
||||
predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0 &&
|
||||
_kids[0]->_leaf->as_Load()->is_unordered());
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
@ -10949,7 +10948,7 @@ instruct cmpFastLock(flagsReg crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iR
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
|
||||
__ compiler_fast_lock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
|
||||
$tmp3$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
UseBiasedLocking && !UseOptoBiasInlining); // SAPJVM MD 2014-11-06 UseOptoBiasInlining
|
||||
UseBiasedLocking && !UseOptoBiasInlining);
|
||||
// If locking was successfull, crx should indicate 'EQ'.
|
||||
// The compiler generates a branch to the runtime call to
|
||||
// _complete_monitor_locking_Java for the case where crx is 'NE'.
|
||||
|
@ -61,7 +61,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
nativeMovConstReg_at(addr())->set_narrow_oop(no, code());
|
||||
}
|
||||
} else {
|
||||
assert((address) (nativeMovConstReg_at(addr())->data()) == x, "data must match");
|
||||
guarantee((address) (nativeMovConstReg_at(addr())->data()) == x, "data must match");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) {
|
||||
masm.patchable_sethi(x, destreg);
|
||||
int len = buffer - masm.pc();
|
||||
for (int i = 0; i < len; i++) {
|
||||
assert(instaddr[i] == buffer[i], "instructions must match");
|
||||
guarantee(instaddr[i] == buffer[i], "instructions must match");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -84,7 +84,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
inst &= ~Assembler::simm( -1, 13);
|
||||
inst |= Assembler::simm(simm13, 13);
|
||||
if (verify_only) {
|
||||
assert(ip->long_at(0) == inst, "instructions must match");
|
||||
guarantee(ip->long_at(0) == inst, "instructions must match");
|
||||
} else {
|
||||
ip->set_long_at(0, inst);
|
||||
}
|
||||
@ -102,15 +102,15 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
inst &= ~Assembler::hi22(-1);
|
||||
inst |= Assembler::hi22((intptr_t)np);
|
||||
if (verify_only) {
|
||||
assert(ip->long_at(0) == inst, "instructions must match");
|
||||
guarantee(ip->long_at(0) == inst, "instructions must match");
|
||||
} else {
|
||||
ip->set_long_at(0, inst);
|
||||
}
|
||||
inst2 = ip->long_at( NativeInstruction::nop_instruction_size );
|
||||
guarantee(Assembler::inv_op(inst2)==Assembler::arith_op, "arith op");
|
||||
if (verify_only) {
|
||||
assert(ip->long_at(NativeInstruction::nop_instruction_size) == NativeInstruction::set_data32_simm13( inst2, (intptr_t)np),
|
||||
"instructions must match");
|
||||
guarantee(ip->long_at(NativeInstruction::nop_instruction_size) == NativeInstruction::set_data32_simm13( inst2, (intptr_t)np),
|
||||
"instructions must match");
|
||||
} else {
|
||||
ip->set_long_at(NativeInstruction::nop_instruction_size, NativeInstruction::set_data32_simm13( inst2, (intptr_t)np));
|
||||
}
|
||||
@ -127,7 +127,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
inst |= Assembler::hi22((intptr_t)x);
|
||||
// (ignore offset; it doesn't play into the sethi)
|
||||
if (verify_only) {
|
||||
assert(ip->long_at(0) == inst, "instructions must match");
|
||||
guarantee(ip->long_at(0) == inst, "instructions must match");
|
||||
} else {
|
||||
ip->set_long_at(0, inst);
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
which == Assembler::imm_operand, "format unpacks ok");
|
||||
if (which == Assembler::imm_operand) {
|
||||
if (verify_only) {
|
||||
assert(*pd_address_in_code() == x, "instructions must match");
|
||||
guarantee(*pd_address_in_code() == x, "instructions must match");
|
||||
} else {
|
||||
*pd_address_in_code() = x;
|
||||
}
|
||||
@ -50,13 +50,13 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
// both compressed oops and compressed classes look the same
|
||||
if (Universe::heap()->is_in_reserved((oop)x)) {
|
||||
if (verify_only) {
|
||||
assert(*(uint32_t*) disp == oopDesc::encode_heap_oop((oop)x), "instructions must match");
|
||||
guarantee(*(uint32_t*) disp == oopDesc::encode_heap_oop((oop)x), "instructions must match");
|
||||
} else {
|
||||
*(int32_t*) disp = oopDesc::encode_heap_oop((oop)x);
|
||||
}
|
||||
} else {
|
||||
if (verify_only) {
|
||||
assert(*(uint32_t*) disp == Klass::encode_klass((Klass*)x), "instructions must match");
|
||||
guarantee(*(uint32_t*) disp == Klass::encode_klass((Klass*)x), "instructions must match");
|
||||
} else {
|
||||
*(int32_t*) disp = Klass::encode_klass((Klass*)x);
|
||||
}
|
||||
@ -67,14 +67,14 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
address disp = Assembler::locate_operand(ip, which);
|
||||
address next_ip = Assembler::locate_next_instruction(ip);
|
||||
if (verify_only) {
|
||||
assert(*(int32_t*) disp == (x - next_ip), "instructions must match");
|
||||
guarantee(*(int32_t*) disp == (x - next_ip), "instructions must match");
|
||||
} else {
|
||||
*(int32_t*) disp = x - next_ip;
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (verify_only) {
|
||||
assert(*pd_address_in_code() == (x + o), "instructions must match");
|
||||
guarantee(*pd_address_in_code() == (x + o), "instructions must match");
|
||||
} else {
|
||||
*pd_address_in_code() = x + o;
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ uint32_t crc32c_multiply(uint32_t a, uint32_t b) {
|
||||
b_pow_x_table[0] = b;
|
||||
for (int k = 0; k < D; ++k) {
|
||||
// If "a" has non-zero coefficient at x**k,/ add ((b * x**k) mod P) to the result.
|
||||
if ((a & (uint64_t)(1 << (D - 1 - k))) != 0) product ^= b_pow_x_table[k];
|
||||
if ((a & (((uint32_t)1) << (D - 1 - k))) != 0) product ^= b_pow_x_table[k];
|
||||
|
||||
// Compute b_pow_x_table[k+1] = (b ** x**(k+1)) mod P.
|
||||
if (b_pow_x_table[k] & 1) {
|
||||
|
@ -1611,7 +1611,7 @@ static jlong double_signflip_pool[2*2];
|
||||
void TemplateTable::fneg() {
|
||||
transition(ftos, ftos);
|
||||
if (UseSSE >= 1) {
|
||||
static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000);
|
||||
static jlong *float_signflip = double_quadword(&float_signflip_pool[1], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
|
||||
__ xorps(xmm0, ExternalAddress((address) float_signflip));
|
||||
} else {
|
||||
LP64_ONLY(ShouldNotReachHere());
|
||||
@ -1622,7 +1622,8 @@ void TemplateTable::fneg() {
|
||||
void TemplateTable::dneg() {
|
||||
transition(dtos, dtos);
|
||||
if (UseSSE >= 2) {
|
||||
static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000);
|
||||
static jlong *double_signflip =
|
||||
double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
|
||||
__ xorpd(xmm0, ExternalAddress((address) double_signflip));
|
||||
} else {
|
||||
#ifdef _LP64
|
||||
|
@ -652,7 +652,7 @@ public:
|
||||
result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu /
|
||||
cores_per_cpu();
|
||||
}
|
||||
return result;
|
||||
return (result == 0 ? 1 : result);
|
||||
}
|
||||
|
||||
static intx L1_line_size() {
|
||||
|
@ -37,5 +37,9 @@ void VM_Version::initialize() {
|
||||
warning("Unaligned memory access is not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseUnalignedAccesses, false);
|
||||
}
|
||||
// Disable prefetching for Zero
|
||||
if (! FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
|
||||
warning("Prefetching is not available for a Zero VM");
|
||||
}
|
||||
FLAG_SET_DEFAULT(AllocatePrefetchDistance, 0);
|
||||
}
|
||||
|
@ -1112,7 +1112,7 @@ public class HotSpotVMConfig {
|
||||
@HotSpotVMField(name = "JavaThread::_osthread", type = "OSThread*", get = HotSpotVMField.Type.OFFSET) @Stable public int osThreadOffset;
|
||||
@HotSpotVMField(name = "JavaThread::_dirty_card_queue", type = "DirtyCardQueue", get = HotSpotVMField.Type.OFFSET) @Stable public int javaThreadDirtyCardQueueOffset;
|
||||
@HotSpotVMField(name = "JavaThread::_is_method_handle_return", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int threadIsMethodHandleReturnOffset;
|
||||
@HotSpotVMField(name = "JavaThread::_satb_mark_queue", type = "ObjPtrQueue", get = HotSpotVMField.Type.OFFSET) @Stable public int javaThreadSatbMarkQueueOffset;
|
||||
@HotSpotVMField(name = "JavaThread::_satb_mark_queue", type = "SATBMarkQueue", get = HotSpotVMField.Type.OFFSET) @Stable public int javaThreadSatbMarkQueueOffset;
|
||||
@HotSpotVMField(name = "JavaThread::_vm_result", type = "oop", get = HotSpotVMField.Type.OFFSET) @Stable public int threadObjectResultOffset;
|
||||
@HotSpotVMField(name = "JavaThread::_jvmci_counters", type = "jlong*", get = HotSpotVMField.Type.OFFSET) @Stable public int jvmciCountersThreadOffset;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -48,7 +48,6 @@ JVM_ENTRY_NO_ENV(void*, JVM_RegisterSignal(jint sig, void* handler))
|
||||
: handler;
|
||||
switch (sig) {
|
||||
/* The following are already used by the VM. */
|
||||
case INTERRUPT_SIGNAL:
|
||||
case SIGFPE:
|
||||
case SIGILL:
|
||||
case SIGSEGV:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -113,7 +113,6 @@
|
||||
/* Signal definitions */
|
||||
|
||||
#define BREAK_SIGNAL SIGQUIT /* Thread dumping support. */
|
||||
#define INTERRUPT_SIGNAL SIGUSR1 /* Interruptible I/O support. */
|
||||
#define SHUTDOWN1_SIGNAL SIGHUP /* Shutdown Hooks support. */
|
||||
#define SHUTDOWN2_SIGNAL SIGINT
|
||||
#define SHUTDOWN3_SIGNAL SIGTERM
|
||||
|
@ -33,153 +33,337 @@
|
||||
#ifndef __STDC_FORMAT_MACROS
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#endif
|
||||
// 'allocation.inline.hpp' triggers the inclusion of 'inttypes.h' which defines macros
|
||||
// required by the definitions in 'globalDefinitions.hpp'. But these macros in 'inttypes.h'
|
||||
// are only defined if '__STDC_FORMAT_MACROS' is defined!
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/threadCritical.hpp"
|
||||
|
||||
#include "loadlib_aix.hpp"
|
||||
// for CritSect
|
||||
#include "misc_aix.hpp"
|
||||
#include "porting_aix.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#include "loadlib_aix.hpp"
|
||||
#include "porting_aix.hpp"
|
||||
|
||||
// For loadquery()
|
||||
#include <sys/ldr.h>
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Implementation for LoadedLibraryModule
|
||||
// Use raw malloc instead of os::malloc - this code gets used for error reporting.
|
||||
|
||||
// output debug info
|
||||
void LoadedLibraryModule::print(outputStream* os) const {
|
||||
os->print("%15.15s: text: " INTPTR_FORMAT " - " INTPTR_FORMAT
|
||||
", data: " INTPTR_FORMAT " - " INTPTR_FORMAT " ",
|
||||
shortname, text_from, text_to, data_from, data_to);
|
||||
os->print(" %s", fullpath);
|
||||
if (strlen(membername) > 0) {
|
||||
os->print("(%s)", membername);
|
||||
// A class to "intern" eternal strings.
|
||||
// TODO: similar coding exists in AIX version of dladdr and potentially elsewhere: consolidate!
|
||||
class StringList {
|
||||
|
||||
char** _list;
|
||||
int _cap;
|
||||
int _num;
|
||||
|
||||
// Enlarge list. If oom, leave old list intact and return false.
|
||||
bool enlarge() {
|
||||
int cap2 = _cap + 64;
|
||||
char** l2 = (char**) ::realloc(_list, sizeof(char*) * cap2);
|
||||
if (!l2) {
|
||||
return false;
|
||||
}
|
||||
_list = l2;
|
||||
_cap = cap2;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Append string to end of list.
|
||||
// Returns NULL if oom.
|
||||
char* append(const char* s) {
|
||||
if (_cap == _num) {
|
||||
if (!enlarge()) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
assert0(_cap > _num);
|
||||
char* s2 = ::strdup(s);
|
||||
if (!s2) {
|
||||
return NULL;
|
||||
}
|
||||
_list[_num] = s2;
|
||||
trcVerbose("StringDir: added %s at pos %d", s2, _num);
|
||||
_num ++;
|
||||
return s2;
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
StringList()
|
||||
: _list(NULL)
|
||||
, _cap(0)
|
||||
, _num(0)
|
||||
{}
|
||||
|
||||
// String is copied into the list; pointer to copy is returned.
|
||||
// Returns NULL if oom.
|
||||
char* add (const char* s) {
|
||||
for (int i = 0; i < _num; i++) {
|
||||
if (strcmp(_list[i], s) == 0) {
|
||||
return _list[i];
|
||||
}
|
||||
}
|
||||
return append(s);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
static StringList g_stringlist;
|
||||
|
||||
//////////////////////
|
||||
|
||||
// Entries are kept in a linked list ordered by text address. Entries are not
|
||||
// eternal - this list is rebuilt on every reload.
|
||||
// Note that we do not hand out those entries, but copies of them.
|
||||
|
||||
struct entry_t {
|
||||
entry_t* next;
|
||||
loaded_module_t info;
|
||||
};
|
||||
|
||||
static void print_entry(const entry_t* e, outputStream* os) {
|
||||
const loaded_module_t* const lm = &(e->info);
|
||||
os->print(" %c text: " INTPTR_FORMAT " - " INTPTR_FORMAT
|
||||
", data: " INTPTR_FORMAT " - " INTPTR_FORMAT " "
|
||||
"%s",
|
||||
(lm->is_in_vm ? '*' : ' '),
|
||||
lm->text, (uintptr_t)lm->text + lm->text_len,
|
||||
lm->data, (uintptr_t)lm->data + lm->data_len,
|
||||
lm->path);
|
||||
if (lm->member) {
|
||||
os->print("(%s)", lm->member);
|
||||
}
|
||||
os->cr();
|
||||
}
|
||||
|
||||
static entry_t* g_first = NULL;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Implementation for LoadedLibraries
|
||||
|
||||
// class variables
|
||||
LoadedLibraryModule LoadedLibraries::tab[MAX_MODULES];
|
||||
int LoadedLibraries::num_loaded = 0;
|
||||
|
||||
// Checks whether the address p points to any of the loaded code segments.
|
||||
// If it does, returns the LoadedLibraryModule entry. If not, returns NULL.
|
||||
// static
|
||||
const LoadedLibraryModule* LoadedLibraries::find_for_text_address(const unsigned char* p) {
|
||||
|
||||
if (num_loaded == 0) {
|
||||
reload();
|
||||
}
|
||||
for (int i = 0; i < num_loaded; i++) {
|
||||
if (tab[i].is_in_text(p)) {
|
||||
return &tab[i];
|
||||
static entry_t* find_entry_for_text_address(const void* p) {
|
||||
for (entry_t* e = g_first; e; e = e->next) {
|
||||
if ((uintptr_t)p >= (uintptr_t)e->info.text &&
|
||||
(uintptr_t)p < ((uintptr_t)e->info.text + e->info.text_len)) {
|
||||
return e;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Checks whether the address p points to any of the loaded data segments.
|
||||
// If it does, returns the LoadedLibraryModule entry. If not, returns NULL.
|
||||
// static
|
||||
const LoadedLibraryModule* LoadedLibraries::find_for_data_address(const unsigned char* p) {
|
||||
if (num_loaded == 0) {
|
||||
reload();
|
||||
}
|
||||
for (int i = 0; i < num_loaded; i++) {
|
||||
if (tab[i].is_in_data(p)) {
|
||||
return &tab[i];
|
||||
static entry_t* find_entry_for_data_address(const void* p) {
|
||||
for (entry_t* e = g_first; e; e = e->next) {
|
||||
if ((uintptr_t)p >= (uintptr_t)e->info.data &&
|
||||
(uintptr_t)p < ((uintptr_t)e->info.data + e->info.data_len)) {
|
||||
return e;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Rebuild the internal table of LoadedLibraryModule objects
|
||||
// static
|
||||
void LoadedLibraries::reload() {
|
||||
|
||||
ThreadCritical cs;
|
||||
|
||||
// discard old content
|
||||
num_loaded = 0;
|
||||
|
||||
// Call loadquery(L_GETINFO..) to get a list of all loaded Dlls from AIX.
|
||||
size_t buf_size = 4096;
|
||||
char* loadquery_buf = AllocateHeap(buf_size, mtInternal);
|
||||
|
||||
while(loadquery(L_GETINFO, loadquery_buf, buf_size) == -1) {
|
||||
if (errno == ENOMEM) {
|
||||
buf_size *= 2;
|
||||
loadquery_buf = ReallocateHeap(loadquery_buf, buf_size, mtInternal);
|
||||
} else {
|
||||
FreeHeap(loadquery_buf);
|
||||
// Ensure that the uintptr_t pointer is valid
|
||||
assert(errno != EFAULT, "loadquery: Invalid uintptr_t in info buffer.");
|
||||
fprintf(stderr, "loadquery failed (%d %s)", errno, strerror(errno));
|
||||
return;
|
||||
}
|
||||
// Adds a new entry to the list (ordered by text address ascending).
|
||||
static void add_entry_to_list(entry_t* e, entry_t** start) {
|
||||
entry_t* last = NULL;
|
||||
entry_t* e2 = *start;
|
||||
while (e2 && e2->info.text < e->info.text) {
|
||||
last = e2;
|
||||
e2 = e2->next;
|
||||
}
|
||||
if (last) {
|
||||
last->next = e;
|
||||
} else {
|
||||
*start = e;
|
||||
}
|
||||
e->next = e2;
|
||||
}
|
||||
|
||||
// Iterate over the loadquery result. For details see sys/ldr.h on AIX.
|
||||
const struct ld_info* p = (struct ld_info*) loadquery_buf;
|
||||
static void free_entry_list(entry_t** start) {
|
||||
entry_t* e = *start;
|
||||
while (e) {
|
||||
entry_t* const e2 = e->next;
|
||||
::free(e);
|
||||
e = e2;
|
||||
}
|
||||
*start = NULL;
|
||||
}
|
||||
|
||||
// Ensure we have all loaded libs.
|
||||
bool all_loaded = false;
|
||||
while(num_loaded < MAX_MODULES) {
|
||||
LoadedLibraryModule& mod = tab[num_loaded];
|
||||
mod.text_from = (const unsigned char*) p->ldinfo_textorg;
|
||||
mod.text_to = (const unsigned char*) (((char*)p->ldinfo_textorg) + p->ldinfo_textsize);
|
||||
mod.data_from = (const unsigned char*) p->ldinfo_dataorg;
|
||||
mod.data_to = (const unsigned char*) (((char*)p->ldinfo_dataorg) + p->ldinfo_datasize);
|
||||
sprintf(mod.fullpath, "%.*s", sizeof(mod.fullpath), p->ldinfo_filename);
|
||||
// do we have a member name as well (see ldr.h)?
|
||||
const char* p_mbr_name = p->ldinfo_filename + strlen(p->ldinfo_filename) + 1;
|
||||
if (*p_mbr_name) {
|
||||
sprintf(mod.membername, "%.*s", sizeof(mod.membername), p_mbr_name);
|
||||
|
||||
// Rebuild the internal module table. If an error occurs, old table remains
|
||||
// unchanged.
|
||||
static bool reload_table() {
|
||||
|
||||
bool rc = false;
|
||||
|
||||
trcVerbose("reload module table...");
|
||||
|
||||
entry_t* new_list = NULL;
|
||||
const struct ld_info* ldi = NULL;
|
||||
|
||||
// Call loadquery(L_GETINFO..) to get a list of all loaded Dlls from AIX. loadquery
|
||||
// requires a large enough buffer.
|
||||
uint8_t* buffer = NULL;
|
||||
size_t buflen = 1024;
|
||||
for (;;) {
|
||||
buffer = (uint8_t*) ::realloc(buffer, buflen);
|
||||
if (loadquery(L_GETINFO, buffer, buflen) == -1) {
|
||||
if (errno == ENOMEM) {
|
||||
buflen *= 2;
|
||||
} else {
|
||||
trcVerbose("loadquery failed (%d)", errno);
|
||||
goto cleanup;
|
||||
}
|
||||
} else {
|
||||
mod.membername[0] = '\0';
|
||||
}
|
||||
|
||||
// fill in the short name
|
||||
const char* p_slash = strrchr(mod.fullpath, '/');
|
||||
if (p_slash) {
|
||||
sprintf(mod.shortname, "%.*s", sizeof(mod.shortname), p_slash + 1);
|
||||
} else {
|
||||
sprintf(mod.shortname, "%.*s", sizeof(mod.shortname), mod.fullpath);
|
||||
}
|
||||
num_loaded ++;
|
||||
|
||||
// next entry...
|
||||
if (p->ldinfo_next) {
|
||||
p = (struct ld_info*)(((char*)p) + p->ldinfo_next);
|
||||
} else {
|
||||
all_loaded = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
FreeHeap(loadquery_buf);
|
||||
trcVerbose("loadquery buffer size is %llu.", buflen);
|
||||
|
||||
// Ensure we have all loaded libs
|
||||
assert(all_loaded, "loadquery returned more entries then expected. Please increase MAX_MODULES");
|
||||
// Iterate over the loadquery result. For details see sys/ldr.h on AIX.
|
||||
ldi = (struct ld_info*) buffer;
|
||||
|
||||
for (;;) {
|
||||
|
||||
entry_t* e = (entry_t*) ::malloc(sizeof(entry_t));
|
||||
if (!e) {
|
||||
trcVerbose("OOM.");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
memset(e, 0, sizeof(entry_t));
|
||||
|
||||
e->info.text = ldi->ldinfo_textorg;
|
||||
e->info.text_len = ldi->ldinfo_textsize;
|
||||
e->info.data = ldi->ldinfo_dataorg;
|
||||
e->info.data_len = ldi->ldinfo_datasize;
|
||||
|
||||
e->info.path = g_stringlist.add(ldi->ldinfo_filename);
|
||||
if (!e->info.path) {
|
||||
trcVerbose("OOM.");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
// Extract short name
|
||||
{
|
||||
const char* p = strrchr(e->info.path, '/');
|
||||
if (p) {
|
||||
p ++;
|
||||
e->info.shortname = p;
|
||||
} else {
|
||||
e->info.shortname = e->info.path;
|
||||
}
|
||||
}
|
||||
|
||||
// Do we have a member name as well (see ldr.h)?
|
||||
const char* p_mbr_name =
|
||||
ldi->ldinfo_filename + strlen(ldi->ldinfo_filename) + 1;
|
||||
if (*p_mbr_name) {
|
||||
e->info.member = g_stringlist.add(p_mbr_name);
|
||||
if (!e->info.member) {
|
||||
trcVerbose("OOM.");
|
||||
goto cleanup;
|
||||
}
|
||||
} else {
|
||||
e->info.member = NULL;
|
||||
}
|
||||
|
||||
if (strcmp(e->info.shortname, "libjvm.so") == 0) {
|
||||
// Note that this, theoretically, is fuzzy. We may accidentally contain
|
||||
// more than one libjvm.so. But that is improbable, so lets go with this
|
||||
// solution.
|
||||
e->info.is_in_vm = true;
|
||||
}
|
||||
|
||||
trcVerbose("entry: %p %llu, %p %llu, %s %s %s, %d",
|
||||
e->info.text, e->info.text_len,
|
||||
e->info.data, e->info.data_len,
|
||||
e->info.path, e->info.shortname,
|
||||
(e->info.member ? e->info.member : "NULL"),
|
||||
e->info.is_in_vm
|
||||
);
|
||||
|
||||
// Add to list.
|
||||
add_entry_to_list(e, &new_list);
|
||||
|
||||
// Next entry...
|
||||
if (ldi->ldinfo_next) {
|
||||
ldi = (struct ld_info*)(((char*)ldi) + ldi->ldinfo_next);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// We are done. All is well. Free old list and swap to new one.
|
||||
if (g_first) {
|
||||
free_entry_list(&g_first);
|
||||
}
|
||||
g_first = new_list;
|
||||
new_list = NULL;
|
||||
|
||||
rc = true;
|
||||
|
||||
cleanup:
|
||||
|
||||
if (new_list) {
|
||||
free_entry_list(&new_list);
|
||||
}
|
||||
|
||||
::free(buffer);
|
||||
|
||||
return rc;
|
||||
|
||||
} // end LoadedLibraries::reload()
|
||||
|
||||
|
||||
// output loaded libraries table
|
||||
//static
|
||||
void LoadedLibraries::print(outputStream* os) {
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Externals
|
||||
|
||||
for (int i = 0; i < num_loaded; i++) {
|
||||
tab[i].print(os);
|
||||
}
|
||||
static MiscUtils::CritSect g_cs;
|
||||
|
||||
// Rebuild the internal module table. If an error occurs, old table remains
|
||||
// unchanged.
|
||||
bool LoadedLibraries::reload() {
|
||||
MiscUtils::AutoCritSect lck(&g_cs);
|
||||
return reload_table();
|
||||
}
|
||||
|
||||
void LoadedLibraries::print(outputStream* os) {
|
||||
MiscUtils::AutoCritSect lck(&g_cs);
|
||||
if (!g_first) {
|
||||
reload_table();
|
||||
}
|
||||
for (entry_t* e = g_first; e; e = e->next) {
|
||||
print_entry(e, os);
|
||||
os->cr();
|
||||
}
|
||||
}
|
||||
|
||||
bool LoadedLibraries::find_for_text_address(const void* p,
|
||||
loaded_module_t* info) {
|
||||
MiscUtils::AutoCritSect lck(&g_cs);
|
||||
if (!g_first) {
|
||||
reload_table();
|
||||
}
|
||||
const entry_t* const e = find_entry_for_text_address(p);
|
||||
if (e) {
|
||||
if (info) {
|
||||
*info = e->info;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool LoadedLibraries::find_for_data_address (
|
||||
const void* p,
|
||||
loaded_module_t* info // optional. can be NULL:
|
||||
) {
|
||||
MiscUtils::AutoCritSect lck(&g_cs);
|
||||
if (!g_first) {
|
||||
reload_table();
|
||||
}
|
||||
const entry_t* const e = find_entry_for_data_address(p);
|
||||
if (e) {
|
||||
if (info) {
|
||||
*info = e->info;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -26,73 +26,47 @@
|
||||
// Loadlib_aix.cpp contains support code for analysing the memory
|
||||
// layout of loaded binaries in ones own process space.
|
||||
//
|
||||
// It is needed, among other things, to provide a dladdr() emulation, because
|
||||
// that one is not provided by AIX
|
||||
// It is needed, among other things, to provide dladdr(3), which is
|
||||
// missing on AIX.
|
||||
|
||||
#ifndef OS_AIX_VM_LOADLIB_AIX_HPP
|
||||
#define OS_AIX_VM_LOADLIB_AIX_HPP
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
class outputStream;
|
||||
|
||||
// This class holds information about a single loaded library module.
|
||||
// Struct holds information about a single loaded library module.
|
||||
// Note that on AIX, a single library can be spread over multiple
|
||||
// uintptr_t range on a module base, eg.
|
||||
// uintptr_t ranges on a module base, eg.
|
||||
// libC.a(shr3_64.o) or libC.a(shrcore_64.o).
|
||||
class LoadedLibraryModule {
|
||||
|
||||
friend class LoadedLibraries;
|
||||
// Note: all pointers to strings (path, member) point to strings which are immortal.
|
||||
struct loaded_module_t {
|
||||
|
||||
char fullpath[512]; // eg /usr/lib/libC.a
|
||||
char shortname[30]; // eg libC.a
|
||||
char membername[30]; // eg shrcore_64.o
|
||||
const unsigned char* text_from;
|
||||
const unsigned char* text_to;
|
||||
const unsigned char* data_from;
|
||||
const unsigned char* data_to;
|
||||
// Points to the full path of the lodaed module, e.g.
|
||||
// "/usr/lib/libC.a".
|
||||
const char* path;
|
||||
|
||||
public:
|
||||
// Host library name without path
|
||||
const char* shortname;
|
||||
|
||||
const char* get_fullpath() const {
|
||||
return fullpath;
|
||||
}
|
||||
const char* get_shortname() const {
|
||||
return shortname;
|
||||
}
|
||||
const char* get_membername() const {
|
||||
return membername;
|
||||
}
|
||||
// Points to the object file (AIX specific stuff)
|
||||
// e.g "shrcore_64.o".
|
||||
const char* member;
|
||||
|
||||
// text_from, text_to: returns the range of the text (code)
|
||||
// segment for that module
|
||||
const unsigned char* get_text_from() const {
|
||||
return text_from;
|
||||
}
|
||||
const unsigned char* get_text_to() const {
|
||||
return text_to;
|
||||
}
|
||||
// Text area from, to
|
||||
const void* text;
|
||||
size_t text_len;
|
||||
|
||||
// data_from/data_to: returns the range of the data
|
||||
// segment for that module
|
||||
const unsigned char* get_data_from() const {
|
||||
return data_from;
|
||||
}
|
||||
const unsigned char* get_data_to() const {
|
||||
return data_to;
|
||||
}
|
||||
// Data area from, to
|
||||
const void* data;
|
||||
size_t data_len;
|
||||
|
||||
// returns true if the
|
||||
bool is_in_text(const unsigned char* p) const {
|
||||
return p >= text_from && p < text_to ? true : false;
|
||||
}
|
||||
// True if this module is part of the vm.
|
||||
bool is_in_vm;
|
||||
|
||||
bool is_in_data(const unsigned char* p) const {
|
||||
return p >= data_from && p < data_to ? true : false;
|
||||
}
|
||||
|
||||
// output debug info
|
||||
void print(outputStream* os) const;
|
||||
|
||||
}; // end LoadedLibraryModule
|
||||
};
|
||||
|
||||
// This class is a singleton holding a map of all loaded binaries
|
||||
// in the AIX process space.
|
||||
@ -100,29 +74,31 @@ class LoadedLibraries
|
||||
// : AllStatic (including allocation.hpp just for AllStatic is overkill.)
|
||||
{
|
||||
|
||||
private:
|
||||
|
||||
enum {MAX_MODULES = 100};
|
||||
static LoadedLibraryModule tab[MAX_MODULES];
|
||||
static int num_loaded;
|
||||
|
||||
public:
|
||||
|
||||
// rebuild the internal table of LoadedLibraryModule objects
|
||||
static void reload();
|
||||
// Rebuild the internal module table. If an error occurs, internal module
|
||||
// table remains untouched.
|
||||
static bool reload();
|
||||
|
||||
// checks whether the address p points to any of the loaded code segments.
|
||||
// If it does, returns the LoadedLibraryModule entry. If not, returns NULL.
|
||||
static const LoadedLibraryModule* find_for_text_address(const unsigned char* p);
|
||||
// Check whether the given address points into the text segment of a
|
||||
// loaded module. Return true if this is the case.
|
||||
// Optionally, information about the module is returned (info)
|
||||
static bool find_for_text_address (
|
||||
const void* p,
|
||||
loaded_module_t* info // Optional, leave NULL if not needed.
|
||||
);
|
||||
|
||||
// checks whether the address p points to any of the loaded data segments.
|
||||
// If it does, returns the LoadedLibraryModule entry. If not, returns NULL.
|
||||
static const LoadedLibraryModule* find_for_data_address(const unsigned char* p);
|
||||
// Check whether the given address points into the data segment of a
|
||||
// loaded module. Return true if this is the case.
|
||||
// Optionally, information about the module is returned (info)
|
||||
static bool find_for_data_address (
|
||||
const void* p,
|
||||
loaded_module_t* info // Optional, leave NULL if not needed.
|
||||
);
|
||||
|
||||
// output debug info
|
||||
// Output debug info
|
||||
static void print(outputStream* os);
|
||||
|
||||
}; // end LoadedLibraries
|
||||
|
||||
};
|
||||
|
||||
#endif // OS_AIX_VM_LOADLIB_AIX_HPP
|
||||
|
61
hotspot/src/os/aix/vm/misc_aix.cpp
Normal file
61
hotspot/src/os/aix/vm/misc_aix.cpp
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright 2015 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "misc_aix.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
void MiscUtils::init_critsect(MiscUtils::critsect_t* cs) {
|
||||
const int rc = pthread_mutex_init(cs, NULL);
|
||||
assert0(rc == 0);
|
||||
}
|
||||
|
||||
void MiscUtils::free_critsect(MiscUtils::critsect_t* cs) {
|
||||
const int rc = pthread_mutex_destroy(cs);
|
||||
assert0(rc == 0);
|
||||
}
|
||||
|
||||
void MiscUtils::enter_critsect(MiscUtils::critsect_t* cs) {
|
||||
const int rc = pthread_mutex_lock(cs);
|
||||
assert0(rc == 0);
|
||||
}
|
||||
|
||||
void MiscUtils::leave_critsect(MiscUtils::critsect_t* cs) {
|
||||
const int rc = pthread_mutex_unlock(cs);
|
||||
assert0(rc == 0);
|
||||
}
|
||||
|
||||
bool MiscUtils::is_readable_pointer(const void* p) {
|
||||
if (!CanUseSafeFetch32()) {
|
||||
return true;
|
||||
}
|
||||
int* const aligned = (int*) align_size_down((intptr_t)p, 4);
|
||||
int cafebabe = 0xcafebabe;
|
||||
int deadbeef = 0xdeadbeef;
|
||||
return (SafeFetch32(aligned, cafebabe) != cafebabe) ||
|
||||
(SafeFetch32(aligned, deadbeef) != deadbeef);
|
||||
}
|
||||
|
||||
|
101
hotspot/src/os/aix/vm/misc_aix.hpp
Normal file
101
hotspot/src/os/aix/vm/misc_aix.hpp
Normal file
@ -0,0 +1,101 @@
|
||||
/*
|
||||
* Copyright 2012, 2015 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#ifndef OS_AIX_VM_MISC_AIX_HPP
|
||||
#define OS_AIX_VM_MISC_AIX_HPP
|
||||
|
||||
// misc_aix.hpp, misc_aix.cpp: convenience functions needed for the OpenJDK AIX
|
||||
// port.
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
// Trace if verbose to tty.
|
||||
#define trcVerbose(fmt, ...) { \
|
||||
if (Verbose) { \
|
||||
fprintf(stderr, fmt, ##__VA_ARGS__); \
|
||||
fputc('\n', stderr); fflush(stderr); \
|
||||
} \
|
||||
}
|
||||
#define ERRBYE(s) { trcVerbose(s); return -1; }
|
||||
#define trc(fmt, ...)
|
||||
|
||||
#define assert0(b) assert((b), "")
|
||||
#define guarantee0(b) guarantee((b), "")
|
||||
template <class T1, class T2> bool is_aligned_to(T1 what, T2 alignment) {
|
||||
return (((uintx)(what)) & (((uintx)(alignment)) - 1)) == 0 ? true : false;
|
||||
}
|
||||
|
||||
// CritSect: simple critical section implementation using pthread mutexes.
|
||||
namespace MiscUtils {
|
||||
typedef pthread_mutex_t critsect_t;
|
||||
|
||||
void init_critsect(MiscUtils::critsect_t* cs);
|
||||
void free_critsect(MiscUtils::critsect_t* cs);
|
||||
void enter_critsect(MiscUtils::critsect_t* cs);
|
||||
void leave_critsect(MiscUtils::critsect_t* cs);
|
||||
|
||||
// Need to wrap this in an object because we need to dynamically initialize
|
||||
// critical section (because of windows, where there is no way to initialize
|
||||
// a CRITICAL_SECTION statically. On Unix, we could use
|
||||
// PTHREAD_MUTEX_INITIALIZER).
|
||||
|
||||
// Note: The critical section does NOT get cleaned up in the destructor. That is
|
||||
// by design: the CritSect class is only ever used as global objects whose
|
||||
// lifetime spans the whole VM life; in that context we don't want the lock to
|
||||
// be cleaned up when global C++ objects are destroyed, but to continue to work
|
||||
// correctly right to the very end of the process life.
|
||||
class CritSect {
|
||||
critsect_t _cs;
|
||||
public:
|
||||
CritSect() { init_critsect(&_cs); }
|
||||
//~CritSect() { free_critsect(&_cs); }
|
||||
void enter() { enter_critsect(&_cs); }
|
||||
void leave() { leave_critsect(&_cs); }
|
||||
};
|
||||
|
||||
class AutoCritSect {
|
||||
CritSect* const _pcsobj;
|
||||
public:
|
||||
AutoCritSect(CritSect* pcsobj)
|
||||
: _pcsobj(pcsobj)
|
||||
{
|
||||
_pcsobj->enter();
|
||||
}
|
||||
~AutoCritSect() {
|
||||
_pcsobj->leave();
|
||||
}
|
||||
};
|
||||
|
||||
// Returns true if pointer can be dereferenced without triggering a segment
|
||||
// violation. Returns false if pointer is invalid.
|
||||
// Note: Depends on stub routines; prior to stub routine generation, will
|
||||
// always return true. Use CanUseSafeFetch32 to handle this case.
|
||||
bool is_readable_pointer(const void* p);
|
||||
|
||||
}
|
||||
|
||||
#endif // OS_AIX_VM_MISC_AIX_HPP
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "loadlib_aix.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
#include "misc_aix.hpp"
|
||||
#include "mutex_aix.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "os_aix.inline.hpp"
|
||||
@ -159,23 +160,10 @@ typedef stackslot_t* stackptr_t;
|
||||
#define PV_8_Compat 0x308000 /* Power PC 8 */
|
||||
#endif
|
||||
|
||||
#define trcVerbose(fmt, ...) { /* PPC port */ \
|
||||
if (Verbose) { \
|
||||
fprintf(stderr, fmt, ##__VA_ARGS__); \
|
||||
fputc('\n', stderr); fflush(stderr); \
|
||||
} \
|
||||
}
|
||||
#define trc(fmt, ...) /* PPC port */
|
||||
|
||||
#define ERRBYE(s) { \
|
||||
trcVerbose(s); \
|
||||
return -1; \
|
||||
}
|
||||
|
||||
// Query dimensions of the stack of the calling thread.
|
||||
static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
|
||||
|
||||
// function to check a given stack pointer against given stack limits
|
||||
// Function to check a given stack pointer against given stack limits.
|
||||
inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
|
||||
if (((uintptr_t)sp) & 0x7) {
|
||||
return false;
|
||||
@ -189,7 +177,7 @@ inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t s
|
||||
return true;
|
||||
}
|
||||
|
||||
// returns true if function is a valid codepointer
|
||||
// Returns true if function is a valid codepointer.
|
||||
inline bool is_valid_codepointer(codeptr_t p) {
|
||||
if (!p) {
|
||||
return false;
|
||||
@ -197,7 +185,7 @@ inline bool is_valid_codepointer(codeptr_t p) {
|
||||
if (((uintptr_t)p) & 0x3) {
|
||||
return false;
|
||||
}
|
||||
if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
|
||||
if (!LoadedLibraries::find_for_text_address(p, NULL)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -1387,26 +1375,15 @@ bool os::address_is_in_vm(address addr) {
|
||||
|
||||
// Input could be a real pc or a function pointer literal. The latter
|
||||
// would be a function descriptor residing in the data segment of a module.
|
||||
|
||||
const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
|
||||
if (lib) {
|
||||
if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
loaded_module_t lm;
|
||||
if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
|
||||
return lm.is_in_vm;
|
||||
} else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
|
||||
return lm.is_in_vm;
|
||||
} else {
|
||||
lib = LoadedLibraries::find_for_data_address(addr);
|
||||
if (lib) {
|
||||
if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Resolve an AIX function descriptor literal to a code pointer.
|
||||
@ -1418,21 +1395,18 @@ bool os::address_is_in_vm(address addr) {
|
||||
// NULL is returned.
|
||||
static address resolve_function_descriptor_to_code_pointer(address p) {
|
||||
|
||||
const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
|
||||
if (lib) {
|
||||
// its a real code pointer
|
||||
if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
|
||||
// It is a real code pointer.
|
||||
return p;
|
||||
} else {
|
||||
lib = LoadedLibraries::find_for_data_address(p);
|
||||
if (lib) {
|
||||
// pointer to data segment, potential function descriptor
|
||||
address code_entry = (address)(((FunctionDescriptor*)p)->entry());
|
||||
if (LoadedLibraries::find_for_text_address(code_entry)) {
|
||||
// Its a function descriptor
|
||||
return code_entry;
|
||||
}
|
||||
} else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
|
||||
// Pointer to data segment, potential function descriptor.
|
||||
address code_entry = (address)(((FunctionDescriptor*)p)->entry());
|
||||
if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
|
||||
// It is a function descriptor.
|
||||
return code_entry;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1461,7 +1435,6 @@ static int getModuleName(codeptr_t pc, // [in] program counte
|
||||
char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
|
||||
) {
|
||||
|
||||
// initialize output parameters
|
||||
if (p_name && namelen > 0) {
|
||||
*p_name = '\0';
|
||||
}
|
||||
@ -1469,16 +1442,15 @@ static int getModuleName(codeptr_t pc, // [in] program counte
|
||||
*p_errmsg = '\0';
|
||||
}
|
||||
|
||||
const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
|
||||
if (lib) {
|
||||
if (p_name && namelen > 0) {
|
||||
sprintf(p_name, "%.*s", namelen, lib->get_shortname());
|
||||
if (p_name && namelen > 0) {
|
||||
loaded_module_t lm;
|
||||
if (LoadedLibraries::find_for_text_address(pc, &lm) != NULL) {
|
||||
strncpy(p_name, lm.shortname, namelen);
|
||||
p_name[namelen - 1] = '\0';
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
trcVerbose("pc outside any module");
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -1690,7 +1662,6 @@ void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
|
||||
print_signal_handler(st, SIGPIPE, buf, buflen);
|
||||
print_signal_handler(st, SIGXFSZ, buf, buflen);
|
||||
print_signal_handler(st, SIGILL , buf, buflen);
|
||||
print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
|
||||
print_signal_handler(st, SR_signum, buf, buflen);
|
||||
print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
|
||||
print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
|
||||
@ -3309,7 +3280,6 @@ void os::run_periodic_checks() {
|
||||
}
|
||||
|
||||
DO_SIGNAL_CHECK(SR_signum);
|
||||
DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
|
||||
}
|
||||
|
||||
typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
|
||||
@ -3351,10 +3321,6 @@ void os::Aix::check_signal_handler(int sig) {
|
||||
jvmHandler = (address)user_handler();
|
||||
break;
|
||||
|
||||
case INTERRUPT_SIGNAL:
|
||||
jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
|
||||
break;
|
||||
|
||||
default:
|
||||
if (sig == SR_signum) {
|
||||
jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
|
||||
@ -3787,18 +3753,11 @@ bool os::find(address addr, outputStream* st) {
|
||||
|
||||
st->print(PTR_FORMAT ": ", addr);
|
||||
|
||||
const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
|
||||
if (lib) {
|
||||
lib->print(st);
|
||||
loaded_module_t lm;
|
||||
if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
|
||||
LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
|
||||
st->print("%s", lm.path);
|
||||
return true;
|
||||
} else {
|
||||
lib = LoadedLibraries::find_for_data_address(addr);
|
||||
if (lib) {
|
||||
lib->print(st);
|
||||
return true;
|
||||
} else {
|
||||
st->print_cr("(outside any module)");
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
@ -3965,9 +3924,6 @@ int os::available(int fd, jlong *bytes) {
|
||||
if (::fstat64(fd, &buf64) >= 0) {
|
||||
mode = buf64.st_mode;
|
||||
if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
|
||||
// XXX: is the following call interruptible? If so, this might
|
||||
// need to go through the INTERRUPT_IO() wrapper as for other
|
||||
// blocking, interruptible calls in this file.
|
||||
int n;
|
||||
if (::ioctl(fd, FIONREAD, &n) >= 0) {
|
||||
*bytes = n;
|
||||
|
@ -201,6 +201,7 @@ static pid_t filename_to_pid(const char* filename) {
|
||||
// the backing store files. Returns true if the directory is considered
|
||||
// a secure location. Returns false if the statbuf is a symbolic link or
|
||||
// if an error occurred.
|
||||
//
|
||||
static bool is_statbuf_secure(struct stat *statp) {
|
||||
if (S_ISLNK(statp->st_mode) || !S_ISDIR(statp->st_mode)) {
|
||||
// The path represents a link or some non-directory file type,
|
||||
@ -209,15 +210,18 @@ static bool is_statbuf_secure(struct stat *statp) {
|
||||
return false;
|
||||
}
|
||||
// We have an existing directory, check if the permissions are safe.
|
||||
//
|
||||
if ((statp->st_mode & (S_IWGRP|S_IWOTH)) != 0) {
|
||||
// The directory is open for writing and could be subjected
|
||||
// to a symlink or a hard link attack. Declare it insecure.
|
||||
//
|
||||
return false;
|
||||
}
|
||||
// See if the uid of the directory matches the effective uid of the process.
|
||||
//
|
||||
if (statp->st_uid != geteuid()) {
|
||||
// If user is not root then see if the uid of the directory matches the effective uid of the process.
|
||||
uid_t euid = geteuid();
|
||||
if ((euid != 0) && (statp->st_uid != euid)) {
|
||||
// The directory was not created by this user, declare it insecure.
|
||||
//
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -228,6 +232,7 @@ static bool is_statbuf_secure(struct stat *statp) {
|
||||
// the backing store files. Returns true if the directory exists
|
||||
// and is considered a secure location. Returns false if the path
|
||||
// is a symbolic link or if an error occurred.
|
||||
//
|
||||
static bool is_directory_secure(const char* path) {
|
||||
struct stat statbuf;
|
||||
int result = 0;
|
||||
|
@ -23,11 +23,13 @@
|
||||
*/
|
||||
|
||||
#include "asm/assembler.hpp"
|
||||
#include "loadlib_aix.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "loadlib_aix.hpp"
|
||||
// For CritSect
|
||||
#include "misc_aix.hpp"
|
||||
#include "porting_aix.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
#include <demangle.h>
|
||||
@ -45,23 +47,6 @@
|
||||
|
||||
#define PTRDIFF_BYTES(p1,p2) (((ptrdiff_t)p1) - ((ptrdiff_t)p2))
|
||||
|
||||
// Align a pointer without having to cast.
|
||||
inline char* align_ptr_up(char* ptr, intptr_t alignment) {
|
||||
return (char*) align_size_up((intptr_t)ptr, alignment);
|
||||
}
|
||||
|
||||
// Trace if verbose to tty.
|
||||
// I use these now instead of the Xtrace system because the latter is
|
||||
// not available at init time, hence worthless. Until we fix this, all
|
||||
// tracing here is done with -XX:+Verbose.
|
||||
#define trcVerbose(fmt, ...) { \
|
||||
if (Verbose) { \
|
||||
fprintf(stderr, fmt, ##__VA_ARGS__); \
|
||||
fputc('\n', stderr); fflush(stderr); \
|
||||
} \
|
||||
}
|
||||
#define ERRBYE(s) { trcVerbose(s); return -1; }
|
||||
|
||||
// Unfortunately, the interface of dladdr makes the implementator
|
||||
// responsible for maintaining memory for function name/library
|
||||
// name. I guess this is because most OS's keep those values as part
|
||||
@ -139,18 +124,37 @@ extern "C" int getFuncName(
|
||||
ERRBYE("invalid program counter");
|
||||
}
|
||||
|
||||
// We see random but frequent crashes in this function since some months mainly on shutdown
|
||||
// (-XX:+DumpInfoAtExit). It appears the page we are reading is randomly disappearing while
|
||||
// we read it (?).
|
||||
// As the pc cannot be trusted to be anything sensible lets make all reads via SafeFetch. Also
|
||||
// bail if this is not a text address right now.
|
||||
if (!LoadedLibraries::find_for_text_address(pc, NULL)) {
|
||||
ERRBYE("not a text address");
|
||||
}
|
||||
|
||||
// .. (Note that is_readable_pointer returns true if safefetch stubs are not there yet;
|
||||
// in that case I try reading the traceback table unsafe - I rather risk secondary crashes in
|
||||
// error files than not having a callstack.)
|
||||
#define CHECK_POINTER_READABLE(p) \
|
||||
if (!MiscUtils::is_readable_pointer(p)) { \
|
||||
ERRBYE("pc not readable"); \
|
||||
}
|
||||
|
||||
codeptr_t pc2 = pc;
|
||||
|
||||
// make sure the pointer is word aligned.
|
||||
// Make sure the pointer is word aligned.
|
||||
pc2 = (codeptr_t) align_ptr_up((char*)pc2, 4);
|
||||
CHECK_POINTER_READABLE(pc2)
|
||||
|
||||
// Find start of traceback table.
|
||||
// (starts after code, is marked by word-aligned (32bit) zeros)
|
||||
while ((*pc2 != NULL) && (searchcount++ < MAX_FUNC_SEARCH_LEN)) {
|
||||
CHECK_POINTER_READABLE(pc2)
|
||||
pc2++;
|
||||
}
|
||||
if (*pc2 != 0) {
|
||||
ERRBYE("could not find traceback table within 5000 bytes of program counter");
|
||||
ERRBYE("no traceback table found");
|
||||
}
|
||||
//
|
||||
// Set up addressability to the traceback table
|
||||
@ -162,7 +166,7 @@ extern "C" int getFuncName(
|
||||
if (tb->tb.lang >= 0xf && tb->tb.lang <= 0xfb) {
|
||||
// Language specifiers, go from 0 (C) to 14 (Objective C).
|
||||
// According to spec, 0xf-0xfa reserved, 0xfb-0xff reserved for ibm.
|
||||
ERRBYE("not a traceback table");
|
||||
ERRBYE("no traceback table found");
|
||||
}
|
||||
|
||||
// Existence of fields in the tbtable extension are contingent upon
|
||||
@ -173,6 +177,8 @@ extern "C" int getFuncName(
|
||||
if (tb->tb.fixedparms != 0 || tb->tb.floatparms != 0)
|
||||
pc2++;
|
||||
|
||||
CHECK_POINTER_READABLE(pc2)
|
||||
|
||||
if (tb->tb.has_tboff == TRUE) {
|
||||
|
||||
// I want to know the displacement
|
||||
@ -182,7 +188,7 @@ extern "C" int getFuncName(
|
||||
|
||||
// Weed out the cases where we did find the wrong traceback table.
|
||||
if (pc < start_of_procedure) {
|
||||
ERRBYE("could not find (the real) traceback table within 5000 bytes of program counter");
|
||||
ERRBYE("no traceback table found");
|
||||
}
|
||||
|
||||
// return the displacement
|
||||
@ -204,15 +210,24 @@ extern "C" int getFuncName(
|
||||
if (tb->tb.has_ctl == TRUE)
|
||||
pc2 += (*pc2) + 1; // don't care
|
||||
|
||||
CHECK_POINTER_READABLE(pc2)
|
||||
|
||||
//
|
||||
// return function name if it exists.
|
||||
//
|
||||
if (p_name && namelen > 0) {
|
||||
if (tb->tb.name_present) {
|
||||
// Copy name from text because it may not be zero terminated.
|
||||
// 256 is good enough for most cases; do not use large buffers here.
|
||||
char buf[256];
|
||||
const short l = MIN2<short>(*((short*)pc2), sizeof(buf) - 1);
|
||||
memcpy(buf, (char*)pc2 + sizeof(short), l);
|
||||
buf[l] = '\0';
|
||||
// Be very careful.
|
||||
int i = 0; char* const p = (char*)pc2 + sizeof(short);
|
||||
while (i < l && MiscUtils::is_readable_pointer(p + i)) {
|
||||
buf[i] = p[i];
|
||||
i++;
|
||||
}
|
||||
buf[i] = '\0';
|
||||
|
||||
p_name[0] = '\0';
|
||||
|
||||
@ -275,7 +290,8 @@ int dladdr(void* addr, Dl_info* info) {
|
||||
info->dli_saddr = NULL;
|
||||
|
||||
address p = (address) addr;
|
||||
const LoadedLibraryModule* lib = NULL;
|
||||
loaded_module_t lm;
|
||||
bool found = false;
|
||||
|
||||
enum { noclue, code, data } type = noclue;
|
||||
|
||||
@ -284,28 +300,28 @@ int dladdr(void* addr, Dl_info* info) {
|
||||
// Note: input address may be a function. I accept both a pointer to
|
||||
// the entry of a function and a pointer to the function decriptor.
|
||||
// (see ppc64 ABI)
|
||||
lib = LoadedLibraries::find_for_text_address(p);
|
||||
if (lib) {
|
||||
found = LoadedLibraries::find_for_text_address(p, &lm);
|
||||
if (found) {
|
||||
type = code;
|
||||
}
|
||||
|
||||
if (!lib) {
|
||||
if (!found) {
|
||||
// Not a pointer into any text segment. Is it a function descriptor?
|
||||
const FunctionDescriptor* const pfd = (const FunctionDescriptor*) p;
|
||||
p = pfd->entry();
|
||||
if (p) {
|
||||
lib = LoadedLibraries::find_for_text_address(p);
|
||||
if (lib) {
|
||||
found = LoadedLibraries::find_for_text_address(p, &lm);
|
||||
if (found) {
|
||||
type = code;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!lib) {
|
||||
if (!found) {
|
||||
// Neither direct code pointer nor function descriptor. A data ptr?
|
||||
p = (address)addr;
|
||||
lib = LoadedLibraries::find_for_data_address(p);
|
||||
if (lib) {
|
||||
found = LoadedLibraries::find_for_data_address(p, &lm);
|
||||
if (found) {
|
||||
type = data;
|
||||
}
|
||||
}
|
||||
@ -313,12 +329,10 @@ int dladdr(void* addr, Dl_info* info) {
|
||||
// If we did find the shared library this address belongs to (either
|
||||
// code or data segment) resolve library path and, if possible, the
|
||||
// symbol name.
|
||||
if (lib) {
|
||||
const char* const interned_libpath =
|
||||
dladdr_fixed_strings.intern(lib->get_fullpath());
|
||||
if (interned_libpath) {
|
||||
info->dli_fname = interned_libpath;
|
||||
}
|
||||
if (found) {
|
||||
|
||||
// No need to intern the libpath, that one is already interned one layer below.
|
||||
info->dli_fname = lm.path;
|
||||
|
||||
if (type == code) {
|
||||
|
||||
@ -328,7 +342,7 @@ int dladdr(void* addr, Dl_info* info) {
|
||||
int displacement = 0;
|
||||
|
||||
if (getFuncName((codeptr_t) p, funcname, sizeof(funcname), &displacement,
|
||||
NULL, NULL, 0, true /* demangle */) == 0) {
|
||||
NULL, NULL, 0, false) == 0) {
|
||||
if (funcname[0] != '\0') {
|
||||
const char* const interned = dladdr_fixed_strings.intern(funcname);
|
||||
info->dli_sname = interned;
|
||||
|
@ -27,13 +27,6 @@
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
// PPC port only:
|
||||
#define assert0(b) assert( (b), "" )
|
||||
#define guarantee0(b) assert( (b), "" )
|
||||
template <class T1, class T2> bool is_aligned_to(T1 what, T2 alignment) {
|
||||
return ( ((uintx)(what)) & (((uintx)(alignment)) - 1) ) == 0 ? true : false;
|
||||
}
|
||||
|
||||
// Header file to contain porting-relevant code which does not have a
|
||||
// home anywhere else and which can not go into os_<platform>.h because
|
||||
// that header is included inside the os class definition, hence all
|
||||
@ -68,14 +61,10 @@ extern "C"
|
||||
#endif
|
||||
int dladdr(void *addr, Dl_info *info);
|
||||
|
||||
|
||||
// The semantics in this file are thus that codeptr_t is a *real code ptr*.
|
||||
// This means that any function taking codeptr_t as arguments will assume
|
||||
// a real codeptr and won't handle function descriptors (eg getFuncName),
|
||||
// whereas functions taking address as args will deal with function
|
||||
// descriptors (eg os::dll_address_to_library_name).
|
||||
typedef unsigned int* codeptr_t;
|
||||
|
||||
struct tbtable;
|
||||
|
||||
// helper function - given a program counter, tries to locate the traceback table and
|
||||
// returns info from it (like, most importantly, function name, displacement of the
|
||||
// pc inside the function, and the traceback table itself.
|
||||
@ -87,65 +76,9 @@ int getFuncName(
|
||||
char* p_name, size_t namelen, // [out] optional: user provided buffer for the function name
|
||||
int* p_displacement, // [out] optional: displacement
|
||||
const struct tbtable** p_tb, // [out] optional: ptr to traceback table to get further information
|
||||
char* p_errmsg, size_t errmsglen,// [out] optional: user provided buffer for error messages
|
||||
bool demangle = true // [in] whether to demangle the name
|
||||
char* p_errmsg, size_t errmsglen, // [out] optional: user provided buffer for error messages
|
||||
bool demangle // [in] whether to demangle the name
|
||||
);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// A simple critical section which shall be based upon OS critical
|
||||
// sections (CRITICAL_SECTION resp. Posix Mutex) and nothing else.
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
namespace MiscUtils {
|
||||
typedef pthread_mutex_t critsect_t;
|
||||
|
||||
inline void init_critsect(MiscUtils::critsect_t* cs) {
|
||||
pthread_mutex_init(cs, NULL);
|
||||
}
|
||||
inline void free_critsect(MiscUtils::critsect_t* cs) {
|
||||
pthread_mutex_destroy(cs);
|
||||
}
|
||||
inline void enter_critsect(MiscUtils::critsect_t* cs) {
|
||||
pthread_mutex_lock(cs);
|
||||
}
|
||||
inline void leave_critsect(MiscUtils::critsect_t* cs) {
|
||||
pthread_mutex_unlock(cs);
|
||||
}
|
||||
|
||||
// Need to wrap this in an object because we need to dynamically initialize
|
||||
// critical section (because of windows, where there is no way to initialize
|
||||
// a CRITICAL_SECTION statically. On Unix, we could use
|
||||
// PTHREAD_MUTEX_INITIALIZER)
|
||||
|
||||
// Note: The critical section does NOT get cleaned up in the destructor. That is
|
||||
// by design: the CritSect class is only ever used as global objects whose
|
||||
// lifetime spans the whole VM life; in that context we don't want the lock to
|
||||
// be cleaned up when global C++ objects are destroyed, but to continue to work
|
||||
// correctly right to the very end of the process life.
|
||||
class CritSect {
|
||||
critsect_t _cs;
|
||||
public:
|
||||
CritSect() { init_critsect(&_cs); }
|
||||
//~CritSect() { free_critsect(&_cs); }
|
||||
void enter() { enter_critsect(&_cs); }
|
||||
void leave() { leave_critsect(&_cs); }
|
||||
};
|
||||
|
||||
class AutoCritSect {
|
||||
CritSect* const _pcsobj;
|
||||
public:
|
||||
AutoCritSect(CritSect* pcsobj)
|
||||
: _pcsobj(pcsobj)
|
||||
{
|
||||
_pcsobj->enter();
|
||||
}
|
||||
~AutoCritSect() {
|
||||
_pcsobj->leave();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // OS_AIX_VM_PORTING_AIX_HPP
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,7 +47,6 @@ JVM_ENTRY_NO_ENV(void*, JVM_RegisterSignal(jint sig, void* handler))
|
||||
: handler;
|
||||
switch (sig) {
|
||||
/* The following are already used by the VM. */
|
||||
case INTERRUPT_SIGNAL:
|
||||
case SIGFPE:
|
||||
case SIGILL:
|
||||
case SIGSEGV:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -107,7 +107,6 @@
|
||||
/* Signal definitions */
|
||||
|
||||
#define BREAK_SIGNAL SIGQUIT /* Thread dumping support. */
|
||||
#define INTERRUPT_SIGNAL SIGUSR1 /* Interruptible I/O support. */
|
||||
#define SHUTDOWN1_SIGNAL SIGHUP /* Shutdown Hooks support. */
|
||||
#define SHUTDOWN2_SIGNAL SIGINT
|
||||
#define SHUTDOWN3_SIGNAL SIGTERM
|
||||
|
@ -1777,7 +1777,6 @@ void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
|
||||
print_signal_handler(st, SIGPIPE, buf, buflen);
|
||||
print_signal_handler(st, SIGXFSZ, buf, buflen);
|
||||
print_signal_handler(st, SIGILL , buf, buflen);
|
||||
print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
|
||||
print_signal_handler(st, SR_signum, buf, buflen);
|
||||
print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
|
||||
print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
|
||||
@ -3345,7 +3344,6 @@ void os::run_periodic_checks() {
|
||||
}
|
||||
|
||||
DO_SIGNAL_CHECK(SR_signum);
|
||||
DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
|
||||
}
|
||||
|
||||
typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
|
||||
@ -3391,10 +3389,6 @@ void os::Bsd::check_signal_handler(int sig) {
|
||||
jvmHandler = (address)user_handler();
|
||||
break;
|
||||
|
||||
case INTERRUPT_SIGNAL:
|
||||
jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
|
||||
break;
|
||||
|
||||
default:
|
||||
if (sig == SR_signum) {
|
||||
jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
|
||||
@ -3913,9 +3907,6 @@ int os::available(int fd, jlong *bytes) {
|
||||
if (::fstat(fd, &buf) >= 0) {
|
||||
mode = buf.st_mode;
|
||||
if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
|
||||
// XXX: is the following call interruptible? If so, this might
|
||||
// need to go through the INTERRUPT_IO() wrapper as for other
|
||||
// blocking, interruptible calls in this file.
|
||||
int n;
|
||||
if (::ioctl(fd, FIONREAD, &n) >= 0) {
|
||||
*bytes = n;
|
||||
|
@ -217,9 +217,9 @@ static bool is_statbuf_secure(struct stat *statp) {
|
||||
//
|
||||
return false;
|
||||
}
|
||||
// See if the uid of the directory matches the effective uid of the process.
|
||||
//
|
||||
if (statp->st_uid != geteuid()) {
|
||||
// If user is not root then see if the uid of the directory matches the effective uid of the process.
|
||||
uid_t euid = geteuid();
|
||||
if ((euid != 0) && (statp->st_uid != euid)) {
|
||||
// The directory was not created by this user, declare it insecure.
|
||||
//
|
||||
return false;
|
||||
|
@ -254,6 +254,8 @@ LinuxAttachOperation* LinuxAttachListener::read_request(int s) {
|
||||
do {
|
||||
int n;
|
||||
RESTARTABLE(read(s, buf+off, left), n);
|
||||
assert(n <= left, "buffer was too small, impossible!");
|
||||
buf[max_len - 1] = '\0';
|
||||
if (n == -1) {
|
||||
return NULL; // reset by peer or other error
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,7 +47,6 @@ JVM_ENTRY_NO_ENV(void*, JVM_RegisterSignal(jint sig, void* handler))
|
||||
: handler;
|
||||
switch (sig) {
|
||||
/* The following are already used by the VM. */
|
||||
case INTERRUPT_SIGNAL:
|
||||
case SIGFPE:
|
||||
case SIGILL:
|
||||
case SIGSEGV:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -88,7 +88,6 @@
|
||||
/* Signal definitions */
|
||||
|
||||
#define BREAK_SIGNAL SIGQUIT /* Thread dumping support. */
|
||||
#define INTERRUPT_SIGNAL SIGUSR1 /* Interruptible I/O support. */
|
||||
#define SHUTDOWN1_SIGNAL SIGHUP /* Shutdown Hooks support. */
|
||||
#define SHUTDOWN2_SIGNAL SIGINT
|
||||
#define SHUTDOWN3_SIGNAL SIGTERM
|
||||
|
@ -2302,7 +2302,6 @@ void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
|
||||
print_signal_handler(st, SIGPIPE, buf, buflen);
|
||||
print_signal_handler(st, SIGXFSZ, buf, buflen);
|
||||
print_signal_handler(st, SIGILL , buf, buflen);
|
||||
print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
|
||||
print_signal_handler(st, SR_signum, buf, buflen);
|
||||
print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
|
||||
print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
|
||||
@ -2820,7 +2819,6 @@ int os::Linux::sched_getcpu_syscall(void) {
|
||||
// Something to do with the numa-aware allocator needs these symbols
|
||||
extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
|
||||
extern "C" JNIEXPORT void numa_error(char *where) { }
|
||||
extern "C" JNIEXPORT int fork1() { return fork(); }
|
||||
|
||||
|
||||
// If we are running with libnuma version > 2, then we should
|
||||
@ -4254,7 +4252,9 @@ int os::Linux::get_our_sigflags(int sig) {
|
||||
|
||||
void os::Linux::set_our_sigflags(int sig, int flags) {
|
||||
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
|
||||
sigflags[sig] = flags;
|
||||
if (sig > 0 && sig < MAXSIGNUM) {
|
||||
sigflags[sig] = flags;
|
||||
}
|
||||
}
|
||||
|
||||
void os::Linux::set_signal_handler(int sig, bool set_installed) {
|
||||
@ -4496,7 +4496,6 @@ void os::run_periodic_checks() {
|
||||
}
|
||||
|
||||
DO_SIGNAL_CHECK(SR_signum);
|
||||
DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
|
||||
}
|
||||
|
||||
typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
|
||||
@ -4542,10 +4541,6 @@ void os::Linux::check_signal_handler(int sig) {
|
||||
jvmHandler = (address)user_handler();
|
||||
break;
|
||||
|
||||
case INTERRUPT_SIGNAL:
|
||||
jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
|
||||
break;
|
||||
|
||||
default:
|
||||
if (sig == SR_signum) {
|
||||
jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
|
||||
@ -5130,9 +5125,6 @@ int os::available(int fd, jlong *bytes) {
|
||||
if (::fstat64(fd, &buf64) >= 0) {
|
||||
mode = buf64.st_mode;
|
||||
if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
|
||||
// XXX: is the following call interruptible? If so, this might
|
||||
// need to go through the INTERRUPT_IO() wrapper as for other
|
||||
// blocking, interruptible calls in this file.
|
||||
int n;
|
||||
if (::ioctl(fd, FIONREAD, &n) >= 0) {
|
||||
*bytes = n;
|
||||
@ -5937,22 +5929,20 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
|
||||
char core_pattern[core_pattern_len] = {0};
|
||||
|
||||
int core_pattern_file = ::open("/proc/sys/kernel/core_pattern", O_RDONLY);
|
||||
if (core_pattern_file != -1) {
|
||||
ssize_t ret = ::read(core_pattern_file, core_pattern, core_pattern_len);
|
||||
::close(core_pattern_file);
|
||||
|
||||
if (ret > 0) {
|
||||
char *last_char = core_pattern + strlen(core_pattern) - 1;
|
||||
|
||||
if (*last_char == '\n') {
|
||||
*last_char = '\0';
|
||||
}
|
||||
}
|
||||
if (core_pattern_file == -1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (strlen(core_pattern) == 0) {
|
||||
ssize_t ret = ::read(core_pattern_file, core_pattern, core_pattern_len);
|
||||
::close(core_pattern_file);
|
||||
if (ret <= 0 || ret >= core_pattern_len || core_pattern[0] == '\n') {
|
||||
return -1;
|
||||
}
|
||||
if (core_pattern[ret-1] == '\n') {
|
||||
core_pattern[ret-1] = '\0';
|
||||
} else {
|
||||
core_pattern[ret] = '\0';
|
||||
}
|
||||
|
||||
char *pid_pos = strstr(core_pattern, "%p");
|
||||
int written;
|
||||
|
@ -177,6 +177,10 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment) {
|
||||
return aligned_base;
|
||||
}
|
||||
|
||||
int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
|
||||
return vsnprintf(buf, len, fmt, args);
|
||||
}
|
||||
|
||||
void os::Posix::print_load_average(outputStream* st) {
|
||||
st->print("load average:");
|
||||
double loadavg[3];
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -73,11 +73,6 @@ JVM_ENTRY_NO_ENV(void*, JVM_RegisterSignal(jint sig, void* handler))
|
||||
if (os::Solaris::is_sig_ignored(sig)) return (void*)1;
|
||||
}
|
||||
|
||||
/* Check parameterized signals. Don't allow sharing of our interrupt signal */
|
||||
if (sig == os::Solaris::SIGinterrupt()) {
|
||||
return (void *)-1;
|
||||
}
|
||||
|
||||
void* oldHandler = os::signal(sig, newHandler);
|
||||
if (oldHandler == os::user_handler()) {
|
||||
return (void *)2;
|
||||
|
@ -87,7 +87,6 @@
|
||||
/* Signal definitions */
|
||||
|
||||
#define BREAK_SIGNAL SIGQUIT /* Thread dumping support. */
|
||||
#define INTERRUPT_SIGNAL SIGUSR1 /* Interruptible I/O support. */
|
||||
#define ASYNC_SIGNAL SIGUSR2 /* Watcher & async err support. */
|
||||
#define SHUTDOWN1_SIGNAL SIGHUP /* Shutdown Hooks support. */
|
||||
#define SHUTDOWN2_SIGNAL SIGINT
|
||||
@ -95,8 +94,7 @@
|
||||
/* alternative signals used with -XX:+UseAltSigs (or for backward
|
||||
compatibility with 1.2, -Xusealtsigs) flag. Chosen to be
|
||||
unlikely to conflict with applications embedding the vm */
|
||||
#define ALT_INTERRUPT_SIGNAL (SIGRTMIN + SIGRTMAX)/2 /* alternate intio signal */
|
||||
#define ALT_ASYNC_SIGNAL ALT_INTERRUPT_SIGNAL+1 /* alternate async signal */
|
||||
#define ALT_ASYNC_SIGNAL (SIGRTMIN + SIGRTMAX)/2 /* alternate async signal */
|
||||
|
||||
/* With 1.4.1 libjsig added versioning: used in os_solaris.cpp and jsig.c */
|
||||
#define JSIG_VERSION_1_4_1 0x30140100
|
||||
|
@ -138,11 +138,6 @@
|
||||
#define LGRP_RSRC_MEM 1 /* memory resources */
|
||||
#endif
|
||||
|
||||
// see thr_setprio(3T) for the basis of these numbers
|
||||
#define MinimumPriority 0
|
||||
#define NormalPriority 64
|
||||
#define MaximumPriority 127
|
||||
|
||||
// Values for ThreadPriorityPolicy == 1
|
||||
int prio_policy1[CriticalPriority+1] = {
|
||||
-99999, 0, 16, 32, 48, 64,
|
||||
@ -1003,8 +998,9 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
|
||||
// defined for >= Solaris 10. This allows builds on earlier versions
|
||||
// of Solaris to take advantage of the newly reserved Solaris JVM signals
|
||||
// With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
|
||||
// and -XX:+UseAltSigs does nothing since these should have no conflict
|
||||
// With SIGJVM1, SIGJVM2, ASYNC_SIGNAL is SIGJVM2 and -XX:+UseAltSigs does
|
||||
// nothing since these should have no conflict. Previously INTERRUPT_SIGNAL
|
||||
// was SIGJVM1.
|
||||
//
|
||||
#if !defined(SIGJVM1)
|
||||
#define SIGJVM1 39
|
||||
@ -1013,7 +1009,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
|
||||
debug_only(static bool signal_sets_initialized = false);
|
||||
static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
|
||||
int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
|
||||
|
||||
int os::Solaris::_SIGasync = ASYNC_SIGNAL;
|
||||
|
||||
bool os::Solaris::is_sig_ignored(int sig) {
|
||||
@ -1058,17 +1054,13 @@ void os::Solaris::signal_sets_init() {
|
||||
sigaddset(&unblocked_sigs, SIGFPE);
|
||||
|
||||
if (isJVM1available) {
|
||||
os::Solaris::set_SIGinterrupt(SIGJVM1);
|
||||
os::Solaris::set_SIGasync(SIGJVM2);
|
||||
} else if (UseAltSigs) {
|
||||
os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
|
||||
os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
|
||||
} else {
|
||||
os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
|
||||
os::Solaris::set_SIGasync(ASYNC_SIGNAL);
|
||||
}
|
||||
|
||||
sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
|
||||
sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
|
||||
|
||||
if (!ReduceSignalUsage) {
|
||||
@ -1939,8 +1931,6 @@ void os::print_siginfo(outputStream* st, void* siginfo) {
|
||||
static int Maxsignum = 0;
|
||||
static int *ourSigFlags = NULL;
|
||||
|
||||
extern "C" void sigINTRHandler(int, siginfo_t*, void*);
|
||||
|
||||
int os::Solaris::get_our_sigflags(int sig) {
|
||||
assert(ourSigFlags!=NULL, "signal data structure not initialized");
|
||||
assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
|
||||
@ -2005,8 +1995,7 @@ static void print_signal_handler(outputStream* st, int sig,
|
||||
os::Posix::print_sa_flags(st, sa.sa_flags);
|
||||
|
||||
// Check: is it our handler?
|
||||
if (handler == CAST_FROM_FN_PTR(address, signalHandler) ||
|
||||
handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
|
||||
if (handler == CAST_FROM_FN_PTR(address, signalHandler)) {
|
||||
// It is our signal handler
|
||||
// check for flags
|
||||
if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
|
||||
@ -2026,13 +2015,11 @@ void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
|
||||
print_signal_handler(st, SIGPIPE, buf, buflen);
|
||||
print_signal_handler(st, SIGXFSZ, buf, buflen);
|
||||
print_signal_handler(st, SIGILL , buf, buflen);
|
||||
print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
|
||||
print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
|
||||
print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
|
||||
print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
|
||||
print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
|
||||
print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
|
||||
print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
|
||||
print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
|
||||
}
|
||||
|
||||
@ -3146,7 +3133,7 @@ static int myMax = 0;
|
||||
static int myCur = 0;
|
||||
static bool priocntl_enable = false;
|
||||
|
||||
static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
|
||||
static const int criticalPrio = FXCriticalPriority;
|
||||
static int java_MaxPriority_to_os_priority = 0; // Saved mapping
|
||||
|
||||
|
||||
@ -3796,7 +3783,6 @@ void os::os_exception_wrapper(java_call_t f, JavaValue* value,
|
||||
// SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
|
||||
// os::Solaris::SIGasync
|
||||
// It should be consulted by handlers for any of those signals.
|
||||
// It explicitly does not recognize os::Solaris::SIGinterrupt
|
||||
//
|
||||
// The caller of this routine must pass in the three arguments supplied
|
||||
// to the function referred to in the "sa_sigaction" (not the "sa_handler")
|
||||
@ -3818,20 +3804,6 @@ void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
|
||||
errno = orig_errno;
|
||||
}
|
||||
|
||||
// Do not delete - if guarantee is ever removed, a signal handler (even empty)
|
||||
// is needed to provoke threads blocked on IO to return an EINTR
|
||||
// Note: this explicitly does NOT call JVM_handle_solaris_signal and
|
||||
// does NOT participate in signal chaining due to requirement for
|
||||
// NOT setting SA_RESTART to make EINTR work.
|
||||
extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
|
||||
if (UseSignalChaining) {
|
||||
struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
|
||||
if (actp && actp->sa_handler) {
|
||||
vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This boolean allows users to forward their own non-matching signals
|
||||
// to JVM_handle_solaris_signal, harmlessly.
|
||||
bool os::Solaris::signal_handlers_are_installed = false;
|
||||
@ -3969,13 +3941,6 @@ void os::Solaris::set_signal_handler(int sig, bool set_installed,
|
||||
// not using stack banging
|
||||
if (!UseStackBanging && sig == SIGSEGV) {
|
||||
sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
|
||||
} else if (sig == os::Solaris::SIGinterrupt()) {
|
||||
// Interruptible i/o requires SA_RESTART cleared so EINTR
|
||||
// is returned instead of restarting system calls
|
||||
sigemptyset(&sigAct.sa_mask);
|
||||
sigAct.sa_handler = NULL;
|
||||
sigAct.sa_flags = SA_SIGINFO;
|
||||
sigAct.sa_sigaction = sigINTRHandler;
|
||||
} else {
|
||||
sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
|
||||
}
|
||||
@ -4027,7 +3992,6 @@ void os::run_periodic_checks() {
|
||||
}
|
||||
|
||||
// See comments above for using JVM1/JVM2 and UseAltSigs
|
||||
DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
|
||||
DO_SIGNAL_CHECK(os::Solaris::SIGasync());
|
||||
|
||||
}
|
||||
@ -4072,12 +4036,9 @@ void os::Solaris::check_signal_handler(int sig) {
|
||||
break;
|
||||
|
||||
default:
|
||||
int intrsig = os::Solaris::SIGinterrupt();
|
||||
int asynsig = os::Solaris::SIGasync();
|
||||
|
||||
if (sig == intrsig) {
|
||||
jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
|
||||
} else if (sig == asynsig) {
|
||||
if (sig == asynsig) {
|
||||
jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
|
||||
} else {
|
||||
return;
|
||||
@ -4148,8 +4109,7 @@ void os::Solaris::install_signal_handlers() {
|
||||
set_signal_handler(SIGFPE, true, true);
|
||||
|
||||
|
||||
if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
|
||||
|
||||
if (os::Solaris::SIGasync() > OLDMAXSIGNUM) {
|
||||
// Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
|
||||
// can not register overridable signals which might be > 32
|
||||
if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
|
||||
@ -4159,8 +4119,6 @@ void os::Solaris::install_signal_handlers() {
|
||||
}
|
||||
}
|
||||
|
||||
// Never ok to chain our SIGinterrupt
|
||||
set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
|
||||
set_signal_handler(os::Solaris::SIGasync(), true, true);
|
||||
|
||||
if (libjsig_is_loaded && !libjsigdone) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,14 @@
|
||||
|
||||
// Solaris_OS defines the interface to Solaris operating systems
|
||||
|
||||
// see thr_setprio(3T) for the basis of these numbers
|
||||
#define MinimumPriority 0
|
||||
#define NormalPriority 64
|
||||
#define MaximumPriority 127
|
||||
|
||||
// FX/60 is critical thread class/priority on T4
|
||||
#define FXCriticalPriority 60
|
||||
|
||||
// Information about the protection of the page at address '0' on this os.
|
||||
static bool zero_page_read_protected() { return true; }
|
||||
|
||||
@ -114,16 +122,13 @@ class Solaris {
|
||||
static void save_preinstalled_handler(int, struct sigaction&);
|
||||
static void check_signal_handler(int sig);
|
||||
// For overridable signals
|
||||
static int _SIGinterrupt; // user-overridable INTERRUPT_SIGNAL
|
||||
static int _SIGasync; // user-overridable ASYNC_SIGNAL
|
||||
static void set_SIGinterrupt(int newsig) { _SIGinterrupt = newsig; }
|
||||
static void set_SIGasync(int newsig) { _SIGasync = newsig; }
|
||||
|
||||
public:
|
||||
// Large Page Support--ISM.
|
||||
static bool largepage_range(char* addr, size_t size);
|
||||
|
||||
static int SIGinterrupt() { return _SIGinterrupt; }
|
||||
static int SIGasync() { return _SIGasync; }
|
||||
static address handler_start, handler_end; // start and end pc of thr_sighndlrinfo
|
||||
|
||||
|
@ -191,7 +191,8 @@ int Win32AttachListener::enqueue(char* cmd, char* arg0, char* arg1, char* arg2,
|
||||
// check that all paramteres to the operation
|
||||
if (strlen(cmd) > AttachOperation::name_length_max) return ATTACH_ERROR_ILLEGALARG;
|
||||
if (strlen(arg0) > AttachOperation::arg_length_max) return ATTACH_ERROR_ILLEGALARG;
|
||||
if (strlen(arg0) > AttachOperation::arg_length_max) return ATTACH_ERROR_ILLEGALARG;
|
||||
if (strlen(arg1) > AttachOperation::arg_length_max) return ATTACH_ERROR_ILLEGALARG;
|
||||
if (strlen(arg2) > AttachOperation::arg_length_max) return ATTACH_ERROR_ILLEGALARG;
|
||||
if (strlen(pipename) > Win32AttachOperation::pipe_name_max) return ATTACH_ERROR_ILLEGALARG;
|
||||
|
||||
// check for a well-formed pipename
|
||||
|
@ -1608,6 +1608,15 @@ void os::get_summary_os_info(char* buf, size_t buflen) {
|
||||
if (nl != NULL) *nl = '\0';
|
||||
}
|
||||
|
||||
int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
|
||||
int ret = vsnprintf(buf, len, fmt, args);
|
||||
// Get the correct buffer size if buf is too small
|
||||
if (ret < 0) {
|
||||
return _vscprintf(fmt, args);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void os::print_os_info_brief(outputStream* st) {
|
||||
os::print_os_info(st);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -325,8 +325,6 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
|
||||
}
|
||||
}
|
||||
|
||||
guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs");
|
||||
|
||||
if (sig == os::Solaris::SIGasync()) {
|
||||
if (thread || vmthread) {
|
||||
OSThread::SR_handler(t, uc);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -382,8 +382,6 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
|
||||
}
|
||||
}
|
||||
|
||||
guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs");
|
||||
|
||||
if (sig == os::Solaris::SIGasync()) {
|
||||
if(thread || vmthread){
|
||||
OSThread::SR_handler(t, uc);
|
||||
|
@ -873,6 +873,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
|
||||
|
||||
// Figure new capacity for each section.
|
||||
csize_t new_capacity[SECT_LIMIT];
|
||||
memset(new_capacity, 0, sizeof(csize_t) * SECT_LIMIT);
|
||||
csize_t new_total_cap
|
||||
= figure_expanded_capacities(which_cs, amount, new_capacity);
|
||||
|
||||
|
@ -92,6 +92,7 @@
|
||||
|
||||
// Used for backward compatibility reasons:
|
||||
// - to check NameAndType_info signatures more aggressively
|
||||
// - to disallow argument and require ACC_STATIC for <clinit> methods
|
||||
#define JAVA_7_VERSION 51
|
||||
|
||||
// Extension method support.
|
||||
@ -1997,9 +1998,7 @@ methodHandle ClassFileParser::parse_method(bool is_interface,
|
||||
} else if ((flags & JVM_ACC_STATIC) == JVM_ACC_STATIC) {
|
||||
flags &= JVM_ACC_STATIC | JVM_ACC_STRICT;
|
||||
} else {
|
||||
// As of major_version 51, a method named <clinit> without ACC_STATIC is
|
||||
// just another method. So, do a normal method modifer check.
|
||||
verify_legal_method_modifiers(flags, is_interface, name, CHECK_(nullHandle));
|
||||
classfile_parse_error("Method <clinit> is not static in class file %s", CHECK_(nullHandle));
|
||||
}
|
||||
} else {
|
||||
verify_legal_method_modifiers(flags, is_interface, name, CHECK_(nullHandle));
|
||||
@ -5159,6 +5158,14 @@ int ClassFileParser::verify_legal_method_signature(Symbol* name, Symbol* signatu
|
||||
return -2;
|
||||
}
|
||||
|
||||
// Class initializers cannot have args for class format version >= 51.
|
||||
if (name == vmSymbols::class_initializer_name() &&
|
||||
signature != vmSymbols::void_method_signature() &&
|
||||
_major_version >= JAVA_7_VERSION) {
|
||||
throwIllegalSignature("Method", name, signature, CHECK_0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned int args_size = 0;
|
||||
char buf[fixed_buffer_size];
|
||||
char* p = signature->as_utf8_flexible_buffer(THREAD, buf, fixed_buffer_size);
|
||||
@ -5182,8 +5189,8 @@ int ClassFileParser::verify_legal_method_signature(Symbol* name, Symbol* signatu
|
||||
// The first non-signature thing better be a ')'
|
||||
if ((length > 0) && (*p++ == JVM_SIGNATURE_ENDFUNC)) {
|
||||
length--;
|
||||
if (name == vmSymbols::object_initializer_name()) {
|
||||
// All "<init>" methods must return void
|
||||
if (name->utf8_length() > 0 && name->byte_at(0) == '<') {
|
||||
// All internal methods must return void
|
||||
if ((length == 1) && (p[0] == JVM_SIGNATURE_VOID)) {
|
||||
return args_size;
|
||||
}
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "classfile/bytecodeAssembler.hpp"
|
||||
#include "classfile/defaultMethods.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
@ -74,7 +75,6 @@ class PseudoScope : public ResourceObj {
|
||||
}
|
||||
};
|
||||
|
||||
#ifndef PRODUCT
|
||||
static void print_slot(outputStream* str, Symbol* name, Symbol* signature) {
|
||||
ResourceMark rm;
|
||||
str->print("%s%s", name->as_C_string(), signature->as_C_string());
|
||||
@ -87,7 +87,6 @@ static void print_method(outputStream* str, Method* mo, bool with_class=true) {
|
||||
}
|
||||
print_slot(str, mo->name(), mo->signature());
|
||||
}
|
||||
#endif // ndef PRODUCT
|
||||
|
||||
/**
|
||||
* Perform a depth-first iteration over the class hierarchy, applying
|
||||
@ -246,21 +245,22 @@ class HierarchyVisitor : StackObj {
|
||||
}
|
||||
};
|
||||
|
||||
#ifndef PRODUCT
|
||||
class PrintHierarchy : public HierarchyVisitor<PrintHierarchy> {
|
||||
private:
|
||||
outputStream* _st;
|
||||
public:
|
||||
|
||||
bool visit() {
|
||||
InstanceKlass* cls = current_class();
|
||||
streamIndentor si(tty, current_depth() * 2);
|
||||
tty->indent().print_cr("%s", cls->name()->as_C_string());
|
||||
streamIndentor si(_st, current_depth() * 2);
|
||||
_st->indent().print_cr("%s", cls->name()->as_C_string());
|
||||
return true;
|
||||
}
|
||||
|
||||
void* new_node_data(InstanceKlass* cls) { return NULL; }
|
||||
void free_node_data(void* data) { return; }
|
||||
|
||||
PrintHierarchy(outputStream* st = tty) : _st(st) {}
|
||||
};
|
||||
#endif // ndef PRODUCT
|
||||
|
||||
// Used to register InstanceKlass objects and all related metadata structures
|
||||
// (Methods, ConstantPools) as "in-use" by the current thread so that they can't
|
||||
@ -434,9 +434,11 @@ class MethodFamily : public ResourceObj {
|
||||
} else if (num_defaults > 1) {
|
||||
_exception_message = generate_conflicts_message(&qualified_methods,CHECK);
|
||||
_exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
|
||||
if (TraceDefaultMethods) {
|
||||
_exception_message->print_value_on(tty);
|
||||
tty->cr();
|
||||
if (log_is_enabled(Debug, defaultmethods)) {
|
||||
ResourceMark rm;
|
||||
outputStream* logstream = LogHandle(defaultmethods)::debug_stream();
|
||||
_exception_message->print_value_on(logstream);
|
||||
logstream->cr();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -450,27 +452,6 @@ class MethodFamily : public ResourceObj {
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_sig_on(outputStream* str, Symbol* signature, int indent) const {
|
||||
streamIndentor si(str, indent * 2);
|
||||
|
||||
str->indent().print_cr("Logical Method %s:", signature->as_C_string());
|
||||
|
||||
streamIndentor si2(str);
|
||||
for (int i = 0; i < _members.length(); ++i) {
|
||||
str->indent();
|
||||
print_method(str, _members.at(i).first);
|
||||
if (_members.at(i).second == DISQUALIFIED) {
|
||||
str->print(" (disqualified)");
|
||||
}
|
||||
str->cr();
|
||||
}
|
||||
|
||||
if (_selected_target != NULL) {
|
||||
print_selected(str, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void print_selected(outputStream* str, int indent) const {
|
||||
assert(has_target(), "Should be called otherwise");
|
||||
streamIndentor si(str, indent * 2);
|
||||
@ -478,7 +459,7 @@ class MethodFamily : public ResourceObj {
|
||||
print_method(str, _selected_target);
|
||||
Klass* method_holder = _selected_target->method_holder();
|
||||
if (!method_holder->is_interface()) {
|
||||
tty->print(" : in superclass");
|
||||
str->print(" : in superclass");
|
||||
}
|
||||
str->cr();
|
||||
}
|
||||
@ -489,7 +470,6 @@ class MethodFamily : public ResourceObj {
|
||||
streamIndentor si(str, indent * 2);
|
||||
str->indent().print_cr("%s: %s", _exception_name->as_C_string(), _exception_message->as_C_string());
|
||||
}
|
||||
#endif // ndef PRODUCT
|
||||
};
|
||||
|
||||
Symbol* MethodFamily::generate_no_defaults_message(TRAPS) const {
|
||||
@ -608,11 +588,9 @@ class EmptyVtableSlot : public ResourceObj {
|
||||
bool is_bound() { return _binding != NULL; }
|
||||
MethodFamily* get_binding() { return _binding; }
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_on(outputStream* str) const {
|
||||
print_slot(str, name(), signature());
|
||||
}
|
||||
#endif // ndef PRODUCT
|
||||
};
|
||||
|
||||
static bool already_in_vtable_slots(GrowableArray<EmptyVtableSlot*>* slots, Method* m) {
|
||||
@ -681,17 +659,18 @@ static GrowableArray<EmptyVtableSlot*>* find_empty_vtable_slots(
|
||||
super = super->java_super();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (TraceDefaultMethods) {
|
||||
tty->print_cr("Slots that need filling:");
|
||||
streamIndentor si(tty);
|
||||
if (log_is_enabled(Debug, defaultmethods)) {
|
||||
log_debug(defaultmethods)("Slots that need filling:");
|
||||
ResourceMark rm;
|
||||
outputStream* logstream = LogHandle(defaultmethods)::debug_stream();
|
||||
streamIndentor si(logstream);
|
||||
for (int i = 0; i < slots->length(); ++i) {
|
||||
tty->indent();
|
||||
slots->at(i)->print_on(tty);
|
||||
tty->cr();
|
||||
logstream->indent();
|
||||
slots->at(i)->print_on(logstream);
|
||||
logstream->cr();
|
||||
}
|
||||
}
|
||||
#endif // ndef PRODUCT
|
||||
|
||||
return slots;
|
||||
}
|
||||
|
||||
@ -812,46 +791,32 @@ void DefaultMethods::generate_default_methods(
|
||||
KeepAliveVisitor loadKeepAlive(&keepAlive);
|
||||
loadKeepAlive.run(klass);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (TraceDefaultMethods) {
|
||||
ResourceMark rm; // be careful with these!
|
||||
tty->print_cr("%s %s requires default method processing",
|
||||
klass->is_interface() ? "Interface" : "Class",
|
||||
klass->name()->as_klass_external_name());
|
||||
PrintHierarchy printer;
|
||||
if (log_is_enabled(Debug, defaultmethods)) {
|
||||
ResourceMark rm;
|
||||
log_debug(defaultmethods)("%s %s requires default method processing",
|
||||
klass->is_interface() ? "Interface" : "Class",
|
||||
klass->name()->as_klass_external_name());
|
||||
PrintHierarchy printer(LogHandle(defaultmethods)::debug_stream());
|
||||
printer.run(klass);
|
||||
}
|
||||
#endif // ndef PRODUCT
|
||||
|
||||
GrowableArray<EmptyVtableSlot*>* empty_slots =
|
||||
find_empty_vtable_slots(klass, mirandas, CHECK);
|
||||
|
||||
for (int i = 0; i < empty_slots->length(); ++i) {
|
||||
EmptyVtableSlot* slot = empty_slots->at(i);
|
||||
#ifndef PRODUCT
|
||||
if (TraceDefaultMethods) {
|
||||
streamIndentor si(tty, 2);
|
||||
tty->indent().print("Looking for default methods for slot ");
|
||||
slot->print_on(tty);
|
||||
tty->cr();
|
||||
if (log_is_enabled(Debug, defaultmethods)) {
|
||||
outputStream* logstream = LogHandle(defaultmethods)::debug_stream();
|
||||
streamIndentor si(logstream, 2);
|
||||
logstream->indent().print("Looking for default methods for slot ");
|
||||
slot->print_on(logstream);
|
||||
logstream->cr();
|
||||
}
|
||||
#endif // ndef PRODUCT
|
||||
|
||||
generate_erased_defaults(klass, empty_slots, slot, CHECK);
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
if (TraceDefaultMethods) {
|
||||
tty->print_cr("Creating defaults and overpasses...");
|
||||
}
|
||||
#endif // ndef PRODUCT
|
||||
|
||||
log_debug(defaultmethods)("Creating defaults and overpasses...");
|
||||
create_defaults_and_exceptions(empty_slots, klass, CHECK);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (TraceDefaultMethods) {
|
||||
tty->print_cr("Default method processing complete");
|
||||
}
|
||||
#endif // ndef PRODUCT
|
||||
log_debug(defaultmethods)("Default method processing complete");
|
||||
}
|
||||
|
||||
static int assemble_method_error(
|
||||
@ -947,18 +912,18 @@ static void create_defaults_and_exceptions(
|
||||
MethodFamily* method = slot->get_binding();
|
||||
BytecodeBuffer buffer;
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (TraceDefaultMethods) {
|
||||
tty->print("for slot: ");
|
||||
slot->print_on(tty);
|
||||
tty->cr();
|
||||
if (log_is_enabled(Debug, defaultmethods)) {
|
||||
ResourceMark rm;
|
||||
outputStream* logstream = LogHandle(defaultmethods)::debug_stream();
|
||||
logstream->print("for slot: ");
|
||||
slot->print_on(logstream);
|
||||
logstream->cr();
|
||||
if (method->has_target()) {
|
||||
method->print_selected(tty, 1);
|
||||
method->print_selected(logstream, 1);
|
||||
} else if (method->throws_exception()) {
|
||||
method->print_exception(tty, 1);
|
||||
method->print_exception(logstream, 1);
|
||||
}
|
||||
}
|
||||
#endif // ndef PRODUCT
|
||||
|
||||
if (method->has_target()) {
|
||||
Method* selected = method->get_selected_target();
|
||||
@ -982,12 +947,9 @@ static void create_defaults_and_exceptions(
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (TraceDefaultMethods) {
|
||||
tty->print_cr("Created %d overpass methods", overpasses.length());
|
||||
tty->print_cr("Created %d default methods", defaults.length());
|
||||
}
|
||||
#endif // ndef PRODUCT
|
||||
|
||||
log_debug(defaultmethods)("Created %d overpass methods", overpasses.length());
|
||||
log_debug(defaultmethods)("Created %d default methods", defaults.length());
|
||||
|
||||
if (overpasses.length() > 0) {
|
||||
switchover_constant_pool(&bpool, klass, &overpasses, CHECK);
|
||||
|
@ -2846,7 +2846,7 @@ void ClassVerifier::verify_invoke_instructions(
|
||||
if (sig_stream.type() != T_VOID) {
|
||||
if (method_name == vmSymbols::object_initializer_name()) {
|
||||
// <init> method must have a void return type
|
||||
/* Unreachable? Class file parser verifies that <init> methods have
|
||||
/* Unreachable? Class file parser verifies that methods with '<' have
|
||||
* void return */
|
||||
verify_error(ErrorContext::bad_code(bci),
|
||||
"Return type must be void in <init> method");
|
||||
|
@ -674,10 +674,6 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
|
||||
return nm;
|
||||
}
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable:4355) // warning C4355: 'this' : used in base member initializer list
|
||||
#endif
|
||||
// For native wrappers
|
||||
nmethod::nmethod(
|
||||
Method* method,
|
||||
@ -767,10 +763,6 @@ nmethod::nmethod(
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
|
||||
return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
|
||||
}
|
||||
@ -2303,7 +2295,7 @@ void nmethod::oops_do_marking_epilogue() {
|
||||
assert(cur != NULL, "not NULL-terminated");
|
||||
nmethod* next = cur->_oops_do_mark_link;
|
||||
cur->_oops_do_mark_link = NULL;
|
||||
cur->verify_oop_relocations();
|
||||
DEBUG_ONLY(cur->verify_oop_relocations());
|
||||
NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));
|
||||
cur = next;
|
||||
}
|
||||
|
@ -438,10 +438,10 @@ void Relocation::const_set_data_value(address x) {
|
||||
void Relocation::const_verify_data_value(address x) {
|
||||
#ifdef _LP64
|
||||
if (format() == relocInfo::narrow_oop_in_const) {
|
||||
assert(*(narrowOop*)addr() == oopDesc::encode_heap_oop((oop) x), "must agree");
|
||||
guarantee(*(narrowOop*)addr() == oopDesc::encode_heap_oop((oop) x), "must agree");
|
||||
} else {
|
||||
#endif
|
||||
assert(*(address*)addr() == x, "must agree");
|
||||
guarantee(*(address*)addr() == x, "must agree");
|
||||
#ifdef _LP64
|
||||
}
|
||||
#endif
|
||||
|
@ -1542,9 +1542,7 @@ void CMSCollector::acquire_control_and_collect(bool full,
|
||||
do_compaction_work(clear_all_soft_refs);
|
||||
|
||||
// Has the GC time limit been exceeded?
|
||||
size_t max_eden_size = _young_gen->max_capacity() -
|
||||
_young_gen->to()->capacity() -
|
||||
_young_gen->from()->capacity();
|
||||
size_t max_eden_size = _young_gen->max_eden_size();
|
||||
GCCause::Cause gc_cause = gch->gc_cause();
|
||||
size_policy()->check_gc_overhead_limit(_young_gen->used(),
|
||||
_young_gen->eden()->used(),
|
||||
@ -7350,6 +7348,14 @@ void SweepClosure::initialize_free_range(HeapWord* freeFinger,
|
||||
|
||||
set_freeFinger(freeFinger);
|
||||
set_freeRangeInFreeLists(freeRangeInFreeLists);
|
||||
if (CMSTestInFreeList) {
|
||||
if (freeRangeInFreeLists) {
|
||||
FreeChunk* fc = (FreeChunk*) freeFinger;
|
||||
assert(fc->is_free(), "A chunk on the free list should be free.");
|
||||
assert(fc->size() > 0, "Free range should have a size");
|
||||
assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Note that the sweeper runs concurrently with mutators. Thus,
|
||||
@ -7502,7 +7508,12 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
|
||||
|
||||
void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
|
||||
const size_t size = fc->size();
|
||||
|
||||
// Chunks that cannot be coalesced are not in the
|
||||
// free lists.
|
||||
if (CMSTestInFreeList && !fc->cantCoalesce()) {
|
||||
assert(_sp->verify_chunk_in_free_list(fc),
|
||||
"free chunk should be in free lists");
|
||||
}
|
||||
// a chunk that is already free, should not have been
|
||||
// marked in the bit map
|
||||
HeapWord* const addr = (HeapWord*) fc;
|
||||
@ -7609,6 +7620,9 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
|
||||
// of the adaptive free list allocator.
|
||||
const bool fcInFreeLists = fc->is_free();
|
||||
assert((HeapWord*)fc <= _limit, "sweep invariant");
|
||||
if (CMSTestInFreeList && fcInFreeLists) {
|
||||
assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
|
||||
}
|
||||
|
||||
if (CMSTraceSweeper) {
|
||||
gclog_or_tty->print_cr(" -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
|
||||
@ -7660,7 +7674,11 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
|
||||
if (freeRangeInFreeLists()) {
|
||||
FreeChunk* const ffc = (FreeChunk*)freeFinger();
|
||||
assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
|
||||
"Size of free range is inconsistent with chunk size.");
|
||||
"Size of free range is inconsistent with chunk size.");
|
||||
if (CMSTestInFreeList) {
|
||||
assert(_sp->verify_chunk_in_free_list(ffc),
|
||||
"Chunk is not in free lists");
|
||||
}
|
||||
_sp->coalDeath(ffc->size());
|
||||
_sp->removeFreeChunkFromFreeLists(ffc);
|
||||
set_freeRangeInFreeLists(false);
|
||||
@ -7729,6 +7747,12 @@ void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
|
||||
assert(size > 0,
|
||||
"A zero sized chunk cannot be added to the free lists.");
|
||||
if (!freeRangeInFreeLists()) {
|
||||
if (CMSTestInFreeList) {
|
||||
FreeChunk* fc = (FreeChunk*) chunk;
|
||||
fc->set_size(size);
|
||||
assert(!_sp->verify_chunk_in_free_list(fc),
|
||||
"chunk should not be in free lists yet");
|
||||
}
|
||||
if (CMSTraceSweeper) {
|
||||
gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
|
||||
p2i(chunk), size);
|
||||
|
@ -57,10 +57,6 @@
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning( push )
|
||||
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
|
||||
#endif
|
||||
ParScanThreadState::ParScanThreadState(Space* to_space_,
|
||||
ParNewGeneration* young_gen_,
|
||||
Generation* old_gen_,
|
||||
@ -104,9 +100,6 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
|
||||
_old_gen_closure.set_generation(old_gen_);
|
||||
_old_gen_root_closure.set_generation(old_gen_);
|
||||
}
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning( pop )
|
||||
#endif
|
||||
|
||||
void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
|
||||
size_t plab_word_size) {
|
||||
@ -597,10 +590,6 @@ void ParNewGenTask::work(uint worker_id) {
|
||||
par_scan_state.evacuate_followers_closure().do_void();
|
||||
}
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning( push )
|
||||
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
|
||||
#endif
|
||||
ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
|
||||
: DefNewGeneration(rs, initial_byte_size, "PCopy"),
|
||||
_overflow_list(NULL),
|
||||
@ -643,9 +632,6 @@ ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
|
||||
ParallelGCThreads, CHECK);
|
||||
}
|
||||
}
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning( pop )
|
||||
#endif
|
||||
|
||||
// ParNewGeneration::
|
||||
ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -244,30 +244,6 @@ public:
|
||||
bool should_force() PRODUCT_RETURN_( return false; );
|
||||
};
|
||||
|
||||
// this will enable a variety of different statistics per GC task
|
||||
#define _MARKING_STATS_ 0
|
||||
// this will enable the higher verbose levels
|
||||
#define _MARKING_VERBOSE_ 0
|
||||
|
||||
#if _MARKING_STATS_
|
||||
#define statsOnly(statement) \
|
||||
do { \
|
||||
statement ; \
|
||||
} while (0)
|
||||
#else // _MARKING_STATS_
|
||||
#define statsOnly(statement) \
|
||||
do { \
|
||||
} while (0)
|
||||
#endif // _MARKING_STATS_
|
||||
|
||||
typedef enum {
|
||||
no_verbose = 0, // verbose turned off
|
||||
stats_verbose, // only prints stats at the end of marking
|
||||
low_verbose, // low verbose, mostly per region and per major event
|
||||
medium_verbose, // a bit more detailed than low
|
||||
high_verbose // per object verbose
|
||||
} CMVerboseLevel;
|
||||
|
||||
class YoungList;
|
||||
|
||||
// Root Regions are regions that are not empty at the beginning of a
|
||||
@ -415,9 +391,6 @@ protected:
|
||||
// time of remark.
|
||||
volatile bool _concurrent_marking_in_progress;
|
||||
|
||||
// Verbose level
|
||||
CMVerboseLevel _verbose_level;
|
||||
|
||||
// All of these times are in ms
|
||||
NumberSeq _init_times;
|
||||
NumberSeq _remark_times;
|
||||
@ -746,31 +719,12 @@ public:
|
||||
|
||||
bool has_aborted() { return _has_aborted; }
|
||||
|
||||
// This prints the global/local fingers. It is used for debugging.
|
||||
NOT_PRODUCT(void print_finger();)
|
||||
|
||||
void print_summary_info();
|
||||
|
||||
void print_worker_threads_on(outputStream* st) const;
|
||||
|
||||
void print_on_error(outputStream* st) const;
|
||||
|
||||
// The following indicate whether a given verbose level has been
|
||||
// set. Notice that anything above stats is conditional to
|
||||
// _MARKING_VERBOSE_ having been set to 1
|
||||
bool verbose_stats() {
|
||||
return _verbose_level >= stats_verbose;
|
||||
}
|
||||
bool verbose_low() {
|
||||
return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
|
||||
}
|
||||
bool verbose_medium() {
|
||||
return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
|
||||
}
|
||||
bool verbose_high() {
|
||||
return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
|
||||
}
|
||||
|
||||
// Liveness counting
|
||||
|
||||
// Utility routine to set an exclusive range of cards on the given
|
||||
@ -818,16 +772,13 @@ public:
|
||||
size_t* marked_bytes_array,
|
||||
BitMap* task_card_bm);
|
||||
|
||||
// Counts the given memory region in the task/worker counting
|
||||
// data structures for the given worker id.
|
||||
inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
|
||||
|
||||
// Counts the given object in the given task/worker counting
|
||||
// data structures.
|
||||
inline void count_object(oop obj,
|
||||
HeapRegion* hr,
|
||||
size_t* marked_bytes_array,
|
||||
BitMap* task_card_bm);
|
||||
BitMap* task_card_bm,
|
||||
size_t word_size);
|
||||
|
||||
// Attempts to mark the given object and, if successful, counts
|
||||
// the object in the given task/worker counting structures.
|
||||
@ -969,43 +920,6 @@ private:
|
||||
size_t* _marked_bytes_array;
|
||||
BitMap* _card_bm;
|
||||
|
||||
// LOTS of statistics related with this task
|
||||
#if _MARKING_STATS_
|
||||
NumberSeq _all_clock_intervals_ms;
|
||||
double _interval_start_time_ms;
|
||||
|
||||
size_t _aborted;
|
||||
size_t _aborted_overflow;
|
||||
size_t _aborted_cm_aborted;
|
||||
size_t _aborted_yield;
|
||||
size_t _aborted_timed_out;
|
||||
size_t _aborted_satb;
|
||||
size_t _aborted_termination;
|
||||
|
||||
size_t _steal_attempts;
|
||||
size_t _steals;
|
||||
|
||||
size_t _clock_due_to_marking;
|
||||
size_t _clock_due_to_scanning;
|
||||
|
||||
size_t _local_pushes;
|
||||
size_t _local_pops;
|
||||
size_t _local_max_size;
|
||||
size_t _objs_scanned;
|
||||
|
||||
size_t _global_pushes;
|
||||
size_t _global_pops;
|
||||
size_t _global_max_size;
|
||||
|
||||
size_t _global_transfers_to;
|
||||
size_t _global_transfers_from;
|
||||
|
||||
size_t _regions_claimed;
|
||||
size_t _objs_found_on_bitmap;
|
||||
|
||||
size_t _satb_buffers_processed;
|
||||
#endif // _MARKING_STATS_
|
||||
|
||||
// it updates the local fields after this task has claimed
|
||||
// a new region to scan
|
||||
void setup_for_region(HeapRegion* hr);
|
||||
@ -1139,10 +1053,6 @@ public:
|
||||
|
||||
// it prints statistics associated with this task
|
||||
void print_stats();
|
||||
|
||||
#if _MARKING_STATS_
|
||||
void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; }
|
||||
#endif // _MARKING_STATS_
|
||||
};
|
||||
|
||||
// Class that's used to to print out per-region liveness
|
||||
|
@ -89,9 +89,7 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
|
||||
size_t region_size_bytes = mr.byte_size();
|
||||
uint index = hr->hrm_index();
|
||||
|
||||
assert(!hr->is_continues_humongous(), "should not be HC region");
|
||||
assert(hr == g1h->heap_region_containing(start), "sanity");
|
||||
assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
|
||||
assert(marked_bytes_array != NULL, "pre-condition");
|
||||
assert(task_card_bm != NULL, "pre-condition");
|
||||
|
||||
@ -116,23 +114,23 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
|
||||
set_card_bitmap_range(task_card_bm, start_idx, end_idx, false /* is_par */);
|
||||
}
|
||||
|
||||
// Counts the given memory region in the task/worker counting
|
||||
// data structures for the given worker id.
|
||||
inline void ConcurrentMark::count_region(MemRegion mr,
|
||||
HeapRegion* hr,
|
||||
uint worker_id) {
|
||||
size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
|
||||
BitMap* task_card_bm = count_card_bitmap_for(worker_id);
|
||||
count_region(mr, hr, marked_bytes_array, task_card_bm);
|
||||
}
|
||||
|
||||
// Counts the given object in the given task/worker counting data structures.
|
||||
inline void ConcurrentMark::count_object(oop obj,
|
||||
HeapRegion* hr,
|
||||
size_t* marked_bytes_array,
|
||||
BitMap* task_card_bm) {
|
||||
MemRegion mr((HeapWord*)obj, obj->size());
|
||||
count_region(mr, hr, marked_bytes_array, task_card_bm);
|
||||
BitMap* task_card_bm,
|
||||
size_t word_size) {
|
||||
assert(!hr->is_continues_humongous(), "Cannot enter count_object with continues humongous");
|
||||
if (!hr->is_starts_humongous()) {
|
||||
MemRegion mr((HeapWord*)obj, word_size);
|
||||
count_region(mr, hr, marked_bytes_array, task_card_bm);
|
||||
} else {
|
||||
do {
|
||||
MemRegion mr(hr->bottom(), hr->top());
|
||||
count_region(mr, hr, marked_bytes_array, task_card_bm);
|
||||
hr = _g1h->next_region_in_humongous(hr);
|
||||
} while (hr != NULL);
|
||||
}
|
||||
}
|
||||
|
||||
// Attempts to mark the given object and, if successful, counts
|
||||
@ -141,10 +139,9 @@ inline bool ConcurrentMark::par_mark_and_count(oop obj,
|
||||
HeapRegion* hr,
|
||||
size_t* marked_bytes_array,
|
||||
BitMap* task_card_bm) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_nextMarkBitMap->parMark(addr)) {
|
||||
if (_nextMarkBitMap->parMark((HeapWord*)obj)) {
|
||||
// Update the task specific count data for the object.
|
||||
count_object(obj, hr, marked_bytes_array, task_card_bm);
|
||||
count_object(obj, hr, marked_bytes_array, task_card_bm, obj->size());
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -157,10 +154,10 @@ inline bool ConcurrentMark::par_mark_and_count(oop obj,
|
||||
size_t word_size,
|
||||
HeapRegion* hr,
|
||||
uint worker_id) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_nextMarkBitMap->parMark(addr)) {
|
||||
MemRegion mr(addr, word_size);
|
||||
count_region(mr, hr, worker_id);
|
||||
if (_nextMarkBitMap->parMark((HeapWord*)obj)) {
|
||||
size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
|
||||
BitMap* task_card_bm = count_card_bitmap_for(worker_id);
|
||||
count_object(obj, hr, marked_bytes_array, task_card_bm, word_size);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -242,19 +239,9 @@ inline void CMTask::push(oop obj) {
|
||||
assert(!_g1h->is_obj_ill(obj), "invariant");
|
||||
assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
|
||||
|
||||
if (_cm->verbose_high()) {
|
||||
gclog_or_tty->print_cr("[%u] pushing " PTR_FORMAT, _worker_id, p2i((void*) obj));
|
||||
}
|
||||
|
||||
if (!_task_queue->push(obj)) {
|
||||
// The local task queue looks full. We need to push some entries
|
||||
// to the global stack.
|
||||
|
||||
if (_cm->verbose_medium()) {
|
||||
gclog_or_tty->print_cr("[%u] task queue overflow, "
|
||||
"moving entries to the global stack",
|
||||
_worker_id);
|
||||
}
|
||||
move_entries_to_global_stack();
|
||||
|
||||
// this should succeed since, even if we overflow the global
|
||||
@ -263,12 +250,6 @@ inline void CMTask::push(oop obj) {
|
||||
bool success = _task_queue->push(obj);
|
||||
assert(success, "invariant");
|
||||
}
|
||||
|
||||
statsOnly( size_t tmp_size = (size_t)_task_queue->size();
|
||||
if (tmp_size > _local_max_size) {
|
||||
_local_max_size = tmp_size;
|
||||
}
|
||||
++_local_pushes );
|
||||
}
|
||||
|
||||
inline bool CMTask::is_below_finger(oop obj, HeapWord* global_finger) const {
|
||||
@ -306,18 +287,12 @@ inline void CMTask::process_grey_object(oop obj) {
|
||||
assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
|
||||
assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
|
||||
|
||||
if (_cm->verbose_high()) {
|
||||
gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT,
|
||||
_worker_id, p2i((void*) obj));
|
||||
}
|
||||
|
||||
size_t obj_size = obj->size();
|
||||
_words_scanned += obj_size;
|
||||
|
||||
if (scan) {
|
||||
obj->oop_iterate(_cm_oop_closure);
|
||||
}
|
||||
statsOnly( ++_objs_scanned );
|
||||
check_limits();
|
||||
}
|
||||
|
||||
@ -325,12 +300,6 @@ inline void CMTask::process_grey_object(oop obj) {
|
||||
|
||||
inline void CMTask::make_reference_grey(oop obj, HeapRegion* hr) {
|
||||
if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) {
|
||||
|
||||
if (_cm->verbose_high()) {
|
||||
gclog_or_tty->print_cr("[%u] marked object " PTR_FORMAT,
|
||||
_worker_id, p2i(obj));
|
||||
}
|
||||
|
||||
// No OrderAccess:store_load() is needed. It is implicit in the
|
||||
// CAS done in CMBitMap::parMark() call in the routine above.
|
||||
HeapWord* global_finger = _cm->finger();
|
||||
@ -362,13 +331,6 @@ inline void CMTask::make_reference_grey(oop obj, HeapRegion* hr) {
|
||||
// references, and the metadata is built-in.
|
||||
process_grey_object<false>(obj);
|
||||
} else {
|
||||
if (_cm->verbose_high()) {
|
||||
gclog_or_tty->print_cr("[%u] below a finger (local: " PTR_FORMAT
|
||||
", global: " PTR_FORMAT ") pushing "
|
||||
PTR_FORMAT " on mark stack",
|
||||
_worker_id, p2i(_finger),
|
||||
p2i(global_finger), p2i(obj));
|
||||
}
|
||||
push(obj);
|
||||
}
|
||||
}
|
||||
@ -376,11 +338,6 @@ inline void CMTask::make_reference_grey(oop obj, HeapRegion* hr) {
|
||||
}
|
||||
|
||||
inline void CMTask::deal_with_reference(oop obj) {
|
||||
if (_cm->verbose_high()) {
|
||||
gclog_or_tty->print_cr("[%u] we're dealing with reference = " PTR_FORMAT,
|
||||
_worker_id, p2i((void*) obj));
|
||||
}
|
||||
|
||||
increment_refs_reached();
|
||||
|
||||
HeapWord* objAddr = (HeapWord*) obj;
|
||||
@ -391,7 +348,7 @@ inline void CMTask::deal_with_reference(oop obj) {
|
||||
// Only get the containing region if the object is not marked on the
|
||||
// bitmap (otherwise, it's a waste of time since we won't do
|
||||
// anything with it).
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
if (!hr->obj_allocated_since_next_marking(obj)) {
|
||||
make_reference_grey(obj, hr);
|
||||
}
|
||||
@ -411,7 +368,7 @@ inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
|
||||
assert(obj != NULL, "pre-condition");
|
||||
HeapWord* addr = (HeapWord*) obj;
|
||||
if (hr == NULL) {
|
||||
hr = _g1h->heap_region_containing_raw(addr);
|
||||
hr = _g1h->heap_region_containing(addr);
|
||||
} else {
|
||||
assert(hr->is_in(addr), "pre-condition");
|
||||
}
|
||||
@ -420,16 +377,6 @@ inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
|
||||
// header it's impossible to get back a HC region.
|
||||
assert(!hr->is_continues_humongous(), "sanity");
|
||||
|
||||
// We cannot assert that word_size == obj->size() given that obj
|
||||
// might not be in a consistent state (another thread might be in
|
||||
// the process of copying it). So the best thing we can do is to
|
||||
// assert that word_size is under an upper bound which is its
|
||||
// containing region's capacity.
|
||||
assert(word_size * HeapWordSize <= hr->capacity(),
|
||||
"size: " SIZE_FORMAT " capacity: " SIZE_FORMAT " " HR_FORMAT,
|
||||
word_size * HeapWordSize, hr->capacity(),
|
||||
HR_FORMAT_PARAMS(hr));
|
||||
|
||||
if (addr < hr->next_top_at_mark_start()) {
|
||||
if (!_nextMarkBitMap->isMarked(addr)) {
|
||||
par_mark_and_count(obj, word_size, hr, worker_id);
|
||||
|
@ -32,6 +32,18 @@
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) :
|
||||
// Dirty card queues are always active, so we create them with their
|
||||
// active field set to true.
|
||||
PtrQueue(qset, permanent, true /* active */)
|
||||
{ }
|
||||
|
||||
DirtyCardQueue::~DirtyCardQueue() {
|
||||
if (!is_permanent()) {
|
||||
flush();
|
||||
}
|
||||
}
|
||||
|
||||
bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
|
||||
bool consume,
|
||||
uint worker_i) {
|
||||
@ -40,7 +52,9 @@ bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
|
||||
res = apply_closure_to_buffer(cl, _buf, _index, _sz,
|
||||
consume,
|
||||
worker_i);
|
||||
if (res && consume) _index = _sz;
|
||||
if (res && consume) {
|
||||
_index = _sz;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -51,27 +65,27 @@ bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl,
|
||||
bool consume,
|
||||
uint worker_i) {
|
||||
if (cl == NULL) return true;
|
||||
for (size_t i = index; i < sz; i += oopSize) {
|
||||
int ind = byte_index_to_index((int)i);
|
||||
jbyte* card_ptr = (jbyte*)buf[ind];
|
||||
size_t limit = byte_index_to_index(sz);
|
||||
for (size_t i = byte_index_to_index(index); i < limit; ++i) {
|
||||
jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
|
||||
if (card_ptr != NULL) {
|
||||
// Set the entry to null, so we don't do it again (via the test
|
||||
// above) if we reconsider this buffer.
|
||||
if (consume) buf[ind] = NULL;
|
||||
if (!cl->do_card_ptr(card_ptr, worker_i)) return false;
|
||||
if (consume) {
|
||||
buf[i] = NULL;
|
||||
}
|
||||
if (!cl->do_card_ptr(card_ptr, worker_i)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
|
||||
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
|
||||
#endif // _MSC_VER
|
||||
|
||||
DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
|
||||
PtrQueueSet(notify_when_complete),
|
||||
_mut_process_closure(NULL),
|
||||
_shared_dirty_card_queue(this, true /*perm*/),
|
||||
_shared_dirty_card_queue(this, true /* permanent */),
|
||||
_free_ids(NULL),
|
||||
_processed_buffers_mut(0), _processed_buffers_rs_thread(0)
|
||||
{
|
||||
@ -83,13 +97,19 @@ uint DirtyCardQueueSet::num_par_ids() {
|
||||
return (uint)os::processor_count();
|
||||
}
|
||||
|
||||
void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl, Monitor* cbl_mon, Mutex* fl_lock,
|
||||
void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl,
|
||||
Monitor* cbl_mon,
|
||||
Mutex* fl_lock,
|
||||
int process_completed_threshold,
|
||||
int max_completed_queue,
|
||||
Mutex* lock, PtrQueueSet* fl_owner) {
|
||||
Mutex* lock,
|
||||
DirtyCardQueueSet* fl_owner) {
|
||||
_mut_process_closure = cl;
|
||||
PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold,
|
||||
max_completed_queue, fl_owner);
|
||||
PtrQueueSet::initialize(cbl_mon,
|
||||
fl_lock,
|
||||
process_completed_threshold,
|
||||
max_completed_queue,
|
||||
fl_owner);
|
||||
set_buffer_size(G1UpdateBufferSize);
|
||||
_shared_dirty_card_queue.set_lock(lock);
|
||||
_free_ids = new FreeIdSet((int) num_par_ids(), _cbl_mon);
|
||||
@ -103,7 +123,7 @@ void DirtyCardQueueSet::iterate_closure_all_threads(CardTableEntryClosure* cl,
|
||||
bool consume,
|
||||
uint worker_i) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
|
||||
for(JavaThread* t = Threads::first(); t; t = t->next()) {
|
||||
for (JavaThread* t = Threads::first(); t; t = t->next()) {
|
||||
bool b = t->dirty_card_queue().apply_closure(cl, consume);
|
||||
guarantee(b, "Should not be interrupted.");
|
||||
}
|
||||
@ -160,8 +180,7 @@ bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
|
||||
}
|
||||
|
||||
|
||||
BufferNode*
|
||||
DirtyCardQueueSet::get_completed_buffer(int stop_at) {
|
||||
BufferNode* DirtyCardQueueSet::get_completed_buffer(int stop_at) {
|
||||
BufferNode* nd = NULL;
|
||||
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
@ -178,14 +197,13 @@ DirtyCardQueueSet::get_completed_buffer(int stop_at) {
|
||||
_n_completed_buffers--;
|
||||
assert(_n_completed_buffers >= 0, "Invariant");
|
||||
}
|
||||
debug_only(assert_completed_buffer_list_len_correct_locked());
|
||||
DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
|
||||
return nd;
|
||||
}
|
||||
|
||||
bool DirtyCardQueueSet::
|
||||
apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
|
||||
uint worker_i,
|
||||
BufferNode* nd) {
|
||||
bool DirtyCardQueueSet::apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
|
||||
uint worker_i,
|
||||
BufferNode* nd) {
|
||||
if (nd != NULL) {
|
||||
void **buf = BufferNode::make_buffer_from_node(nd);
|
||||
size_t index = nd->index();
|
||||
@ -259,7 +277,7 @@ void DirtyCardQueueSet::clear() {
|
||||
}
|
||||
_n_completed_buffers = 0;
|
||||
_completed_buffers_tail = NULL;
|
||||
debug_only(assert_completed_buffer_list_len_correct_locked());
|
||||
DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
|
||||
}
|
||||
while (buffers_to_delete != NULL) {
|
||||
BufferNode* nd = buffers_to_delete;
|
||||
@ -291,10 +309,11 @@ void DirtyCardQueueSet::concatenate_logs() {
|
||||
for (JavaThread* t = Threads::first(); t; t = t->next()) {
|
||||
DirtyCardQueue& dcq = t->dirty_card_queue();
|
||||
if (dcq.size() != 0) {
|
||||
void **buf = t->dirty_card_queue().get_buf();
|
||||
void** buf = dcq.get_buf();
|
||||
// We must NULL out the unused entries, then enqueue.
|
||||
for (size_t i = 0; i < t->dirty_card_queue().get_index(); i += oopSize) {
|
||||
buf[PtrQueue::byte_index_to_index((int)i)] = NULL;
|
||||
size_t limit = dcq.byte_index_to_index(dcq.get_index());
|
||||
for (size_t i = 0; i < limit; ++i) {
|
||||
buf[i] = NULL;
|
||||
}
|
||||
enqueue_complete_buffer(dcq.get_buf(), dcq.get_index());
|
||||
dcq.reinitialize();
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class FreeIdSet;
|
||||
class DirtyCardQueueSet;
|
||||
|
||||
// A closure class for processing card table entries. Note that we don't
|
||||
// require these closure objects to be stack-allocated.
|
||||
@ -42,14 +43,11 @@ public:
|
||||
// A ptrQueue whose elements are "oops", pointers to object heads.
|
||||
class DirtyCardQueue: public PtrQueue {
|
||||
public:
|
||||
DirtyCardQueue(PtrQueueSet* qset_, bool perm = false) :
|
||||
// Dirty card queues are always active, so we create them with their
|
||||
// active field set to true.
|
||||
PtrQueue(qset_, perm, true /* active */) { }
|
||||
DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent = false);
|
||||
|
||||
// Flush before destroying; queue may be used to capture pending work while
|
||||
// doing something else, with auto-flush on completion.
|
||||
~DirtyCardQueue() { if (!is_permanent()) flush(); }
|
||||
~DirtyCardQueue();
|
||||
|
||||
// Process queue entries and release resources.
|
||||
void flush() { flush_impl(); }
|
||||
@ -72,7 +70,6 @@ public:
|
||||
bool consume = true,
|
||||
uint worker_i = 0);
|
||||
void **get_buf() { return _buf;}
|
||||
void set_buf(void **buf) {_buf = buf;}
|
||||
size_t get_index() { return _index;}
|
||||
void reinitialize() { _buf = 0; _sz = 0; _index = 0;}
|
||||
};
|
||||
@ -101,10 +98,13 @@ class DirtyCardQueueSet: public PtrQueueSet {
|
||||
public:
|
||||
DirtyCardQueueSet(bool notify_when_complete = true);
|
||||
|
||||
void initialize(CardTableEntryClosure* cl, Monitor* cbl_mon, Mutex* fl_lock,
|
||||
void initialize(CardTableEntryClosure* cl,
|
||||
Monitor* cbl_mon,
|
||||
Mutex* fl_lock,
|
||||
int process_completed_threshold,
|
||||
int max_completed_queue,
|
||||
Mutex* lock, PtrQueueSet* fl_owner = NULL);
|
||||
Mutex* lock,
|
||||
DirtyCardQueueSet* fl_owner = NULL);
|
||||
|
||||
// The number of parallel ids that can be claimed to allow collector or
|
||||
// mutator threads to do card-processing work.
|
||||
|
@ -110,9 +110,6 @@ void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_inf
|
||||
if (_retained_old_gc_alloc_region != NULL) {
|
||||
_retained_old_gc_alloc_region->record_retained_region();
|
||||
}
|
||||
|
||||
_g1h->alloc_buffer_stats(InCSetState::Young)->adjust_desired_plab_sz();
|
||||
_g1h->alloc_buffer_stats(InCSetState::Old)->adjust_desired_plab_sz();
|
||||
}
|
||||
|
||||
void G1DefaultAllocator::abandon_gc_alloc_regions() {
|
||||
|
@ -499,18 +499,14 @@ HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
|
||||
return _next_offset_threshold;
|
||||
}
|
||||
|
||||
void
|
||||
G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
|
||||
assert(new_top <= _end, "_end should have already been updated");
|
||||
|
||||
void G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* obj_top) {
|
||||
// The first BOT entry should have offset 0.
|
||||
reset_bot();
|
||||
alloc_block(_bottom, new_top);
|
||||
alloc_block(_bottom, obj_top);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void
|
||||
G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
|
||||
void G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
|
||||
G1BlockOffsetArray::print_on(out);
|
||||
out->print_cr(" next offset threshold: " PTR_FORMAT, p2i(_next_offset_threshold));
|
||||
out->print_cr(" next offset index: " SIZE_FORMAT, _next_offset_index);
|
||||
|
@ -361,17 +361,18 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
|
||||
// implementation, that's true because NULL is represented as 0, and thus
|
||||
// never exceeds the "_next_offset_threshold".
|
||||
void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
if (blk_end > _next_offset_threshold)
|
||||
if (blk_end > _next_offset_threshold) {
|
||||
alloc_block_work1(blk_start, blk_end);
|
||||
}
|
||||
}
|
||||
void alloc_block(HeapWord* blk, size_t size) {
|
||||
alloc_block(blk, blk+size);
|
||||
alloc_block(blk, blk+size);
|
||||
}
|
||||
|
||||
HeapWord* block_start_unsafe(const void* addr);
|
||||
HeapWord* block_start_unsafe_const(const void* addr) const;
|
||||
|
||||
void set_for_starts_humongous(HeapWord* new_top);
|
||||
void set_for_starts_humongous(HeapWord* obj_top);
|
||||
|
||||
virtual void print_on(outputStream* out) PRODUCT_RETURN;
|
||||
};
|
||||
|
@ -123,7 +123,6 @@ G1BlockOffsetArray::block_at_or_preceding(const void* addr,
|
||||
// to go back by.
|
||||
size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset);
|
||||
q -= (N_words * n_cards_back);
|
||||
assert(q >= gsp()->bottom(), "Went below bottom!");
|
||||
index -= n_cards_back;
|
||||
offset = _array->offset_array(index);
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
|
||||
T oop_or_narrowoop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(oop_or_narrowoop)) {
|
||||
oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(o);
|
||||
HeapRegion* hr = _g1h->heap_region_containing(o);
|
||||
assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset");
|
||||
hr->add_strong_code_root(_nm);
|
||||
}
|
||||
|
@ -320,12 +320,8 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
||||
// The header of the new object will be placed at the bottom of
|
||||
// the first region.
|
||||
HeapWord* new_obj = first_hr->bottom();
|
||||
// This will be the new end of the first region in the series that
|
||||
// should also match the end of the last region in the series.
|
||||
HeapWord* new_end = new_obj + word_size_sum;
|
||||
// This will be the new top of the first region that will reflect
|
||||
// this allocation.
|
||||
HeapWord* new_top = new_obj + word_size;
|
||||
// This will be the new top of the new object.
|
||||
HeapWord* obj_top = new_obj + word_size;
|
||||
|
||||
// First, we need to zero the header of the space that we will be
|
||||
// allocating. When we update top further down, some refinement
|
||||
@ -346,7 +342,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
||||
// will also update the BOT covering all the regions to reflect
|
||||
// that there is a single object that starts at the bottom of the
|
||||
// first region.
|
||||
first_hr->set_starts_humongous(new_top, new_end);
|
||||
first_hr->set_starts_humongous(obj_top);
|
||||
first_hr->set_allocation_context(context);
|
||||
// Then, if there are any, we will set up the "continues
|
||||
// humongous" regions.
|
||||
@ -356,9 +352,6 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
||||
hr->set_continues_humongous(first_hr);
|
||||
hr->set_allocation_context(context);
|
||||
}
|
||||
// If we have "continues humongous" regions (hr != NULL), then the
|
||||
// end of the last one should match new_end.
|
||||
assert(hr == NULL || hr->end() == new_end, "sanity");
|
||||
|
||||
// Up to this point no concurrent thread would have been able to
|
||||
// do any scanning on any region in this series. All the top
|
||||
@ -371,58 +364,39 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
||||
|
||||
// Now that the BOT and the object header have been initialized,
|
||||
// we can update top of the "starts humongous" region.
|
||||
assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
|
||||
"new_top should be in this region");
|
||||
first_hr->set_top(new_top);
|
||||
first_hr->set_top(MIN2(first_hr->end(), obj_top));
|
||||
if (_hr_printer.is_active()) {
|
||||
HeapWord* bottom = first_hr->bottom();
|
||||
HeapWord* end = first_hr->orig_end();
|
||||
if ((first + 1) == last) {
|
||||
// the series has a single humongous region
|
||||
_hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
|
||||
} else {
|
||||
// the series has more than one humongous regions
|
||||
_hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
|
||||
}
|
||||
_hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, first_hr->top());
|
||||
}
|
||||
|
||||
// Now, we will update the top fields of the "continues humongous"
|
||||
// regions. The reason we need to do this is that, otherwise,
|
||||
// these regions would look empty and this will confuse parts of
|
||||
// G1. For example, the code that looks for a consecutive number
|
||||
// of empty regions will consider them empty and try to
|
||||
// re-allocate them. We can extend is_empty() to also include
|
||||
// !is_continues_humongous(), but it is easier to just update the top
|
||||
// fields here. The way we set top for all regions (i.e., top ==
|
||||
// end for all regions but the last one, top == new_top for the
|
||||
// last one) is actually used when we will free up the humongous
|
||||
// region in free_humongous_region().
|
||||
// regions.
|
||||
hr = NULL;
|
||||
for (uint i = first + 1; i < last; ++i) {
|
||||
hr = region_at(i);
|
||||
if ((i + 1) == last) {
|
||||
// last continues humongous region
|
||||
assert(hr->bottom() < new_top && new_top <= hr->end(),
|
||||
assert(hr->bottom() < obj_top && obj_top <= hr->end(),
|
||||
"new_top should fall on this region");
|
||||
hr->set_top(new_top);
|
||||
_hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
|
||||
hr->set_top(obj_top);
|
||||
_hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, obj_top);
|
||||
} else {
|
||||
// not last one
|
||||
assert(new_top > hr->end(), "new_top should be above this region");
|
||||
assert(obj_top > hr->end(), "obj_top should be above this region");
|
||||
hr->set_top(hr->end());
|
||||
_hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
|
||||
}
|
||||
}
|
||||
// If we have continues humongous regions (hr != NULL), then the
|
||||
// end of the last one should match new_end and its top should
|
||||
// match new_top.
|
||||
assert(hr == NULL ||
|
||||
(hr->end() == new_end && hr->top() == new_top), "sanity");
|
||||
// If we have continues humongous regions (hr != NULL), its top should
|
||||
// match obj_top.
|
||||
assert(hr == NULL || (hr->top() == obj_top), "sanity");
|
||||
check_bitmaps("Humongous Region Allocation", first_hr);
|
||||
|
||||
assert(first_hr->used() == word_size * HeapWordSize, "invariant");
|
||||
increase_used(first_hr->used());
|
||||
_humongous_set.add(first_hr);
|
||||
increase_used(word_size * HeapWordSize);
|
||||
|
||||
for (uint i = first; i < last; ++i) {
|
||||
_humongous_set.add(region_at(i));
|
||||
}
|
||||
|
||||
return new_obj;
|
||||
}
|
||||
@ -1139,15 +1113,15 @@ public:
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
HeapRegionRemSet* hrrs = r->rem_set();
|
||||
|
||||
_g1h->reset_gc_time_stamps(r);
|
||||
|
||||
if (r->is_continues_humongous()) {
|
||||
// We'll assert that the strong code root list and RSet is empty
|
||||
assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
|
||||
assert(hrrs->occupied() == 0, "RSet should be empty");
|
||||
return false;
|
||||
} else {
|
||||
hrrs->clear();
|
||||
}
|
||||
|
||||
_g1h->reset_gc_time_stamps(r);
|
||||
hrrs->clear();
|
||||
// You might think here that we could clear just the cards
|
||||
// corresponding to the used region. But no: if we leave a dirty card
|
||||
// in a region we might allocate into, then it would prevent that card
|
||||
@ -1205,12 +1179,7 @@ public:
|
||||
if (hr->is_free()) {
|
||||
// We only generate output for non-empty regions.
|
||||
} else if (hr->is_starts_humongous()) {
|
||||
if (hr->region_num() == 1) {
|
||||
// single humongous region
|
||||
_hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
|
||||
} else {
|
||||
_hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
|
||||
}
|
||||
_hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
|
||||
} else if (hr->is_continues_humongous()) {
|
||||
_hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
|
||||
} else if (hr->is_archive()) {
|
||||
@ -1807,16 +1776,10 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
|
||||
|
||||
// Public methods.
|
||||
|
||||
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
|
||||
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
|
||||
#endif // _MSC_VER
|
||||
|
||||
|
||||
G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
CollectedHeap(),
|
||||
_g1_policy(policy_),
|
||||
_dirty_card_queue_set(false),
|
||||
_into_cset_dirty_card_queue_set(false),
|
||||
_is_alive_closure_cm(this),
|
||||
_is_alive_closure_stw(this),
|
||||
_ref_processor_cm(NULL),
|
||||
@ -2081,16 +2044,6 @@ jint G1CollectedHeap::initialize() {
|
||||
Shared_DirtyCardQ_lock,
|
||||
&JavaThread::dirty_card_queue_set());
|
||||
|
||||
// Initialize the card queue set used to hold cards containing
|
||||
// references into the collection set.
|
||||
_into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
|
||||
DirtyCardQ_CBL_mon,
|
||||
DirtyCardQ_FL_lock,
|
||||
-1, // never trigger processing
|
||||
-1, // no limit on length
|
||||
Shared_DirtyCardQ_lock,
|
||||
&JavaThread::dirty_card_queue_set());
|
||||
|
||||
// Here we allocate the dummy HeapRegion that is required by the
|
||||
// G1AllocRegion class.
|
||||
HeapRegion* dummy_region = _hrm.get_dummy_region();
|
||||
@ -2222,17 +2175,7 @@ size_t G1CollectedHeap::capacity() const {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
|
||||
assert(!hr->is_continues_humongous(), "pre-condition");
|
||||
hr->reset_gc_time_stamp();
|
||||
if (hr->is_starts_humongous()) {
|
||||
uint first_index = hr->hrm_index() + 1;
|
||||
uint last_index = hr->last_hc_index();
|
||||
for (uint i = first_index; i < last_index; i += 1) {
|
||||
HeapRegion* chr = region_at(i);
|
||||
assert(chr->is_continues_humongous(), "sanity");
|
||||
chr->reset_gc_time_stamp();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -2300,9 +2243,7 @@ class SumUsedClosure: public HeapRegionClosure {
|
||||
public:
|
||||
SumUsedClosure() : _used(0) {}
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->is_continues_humongous()) {
|
||||
_used += r->used();
|
||||
}
|
||||
_used += r->used();
|
||||
return false;
|
||||
}
|
||||
size_t result() { return _used; }
|
||||
@ -2523,9 +2464,9 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
|
||||
bool G1CollectedHeap::is_in(const void* p) const {
|
||||
if (_hrm.reserved().contains(p)) {
|
||||
// Given that we know that p is in the reserved space,
|
||||
// heap_region_containing_raw() should successfully
|
||||
// heap_region_containing() should successfully
|
||||
// return the containing region.
|
||||
HeapRegion* hr = heap_region_containing_raw(p);
|
||||
HeapRegion* hr = heap_region_containing(p);
|
||||
return hr->is_in(p);
|
||||
} else {
|
||||
return false;
|
||||
@ -3062,7 +3003,7 @@ public:
|
||||
r->verify(_vo, &failures);
|
||||
if (failures) {
|
||||
_failures = true;
|
||||
} else {
|
||||
} else if (!r->is_starts_humongous()) {
|
||||
VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
|
||||
r->object_iterate(¬_dead_yet_cl);
|
||||
if (_vo != VerifyOption_G1UseNextMarking) {
|
||||
@ -3613,7 +3554,7 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
|
||||
// The remembered set might contain references to already freed
|
||||
// regions. Filter out such entries to avoid failing card table
|
||||
// verification.
|
||||
if (!g1h->heap_region_containing(bs->addr_for(card_ptr))->is_free()) {
|
||||
if (g1h->is_in_closed_subset(bs->addr_for(card_ptr))) {
|
||||
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
|
||||
*card_ptr = CardTableModRefBS::dirty_card_val();
|
||||
_dcq.enqueue(card_ptr);
|
||||
@ -3735,8 +3676,7 @@ void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
|
||||
gclog_or_tty->print(" (to-space exhausted)");
|
||||
}
|
||||
gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
|
||||
g1_policy()->phase_times()->note_gc_end();
|
||||
g1_policy()->phase_times()->print(pause_time_sec);
|
||||
g1_policy()->print_phases(pause_time_sec);
|
||||
g1_policy()->print_detailed_heap_transition();
|
||||
} else {
|
||||
if (evacuation_failed()) {
|
||||
@ -3827,7 +3767,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
workers()->set_active_workers(active_workers);
|
||||
|
||||
double pause_start_sec = os::elapsedTime();
|
||||
g1_policy()->phase_times()->note_gc_start(active_workers, collector_state()->mark_in_progress());
|
||||
g1_policy()->note_gc_start(active_workers);
|
||||
log_gc_header();
|
||||
|
||||
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
|
||||
@ -5270,6 +5210,9 @@ void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_in
|
||||
|
||||
record_obj_copy_mem_stats();
|
||||
|
||||
_survivor_evac_stats.adjust_desired_plab_sz();
|
||||
_old_evac_stats.adjust_desired_plab_sz();
|
||||
|
||||
// Reset and re-enable the hot card cache.
|
||||
// Note the counts for the cards in the regions in the
|
||||
// collection set are reset when the collection set is freed.
|
||||
@ -5315,30 +5258,16 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
|
||||
}
|
||||
|
||||
void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
|
||||
FreeRegionList* free_list,
|
||||
bool par) {
|
||||
assert(hr->is_starts_humongous(), "this is only for starts humongous regions");
|
||||
FreeRegionList* free_list,
|
||||
bool par) {
|
||||
assert(hr->is_humongous(), "this is only for humongous regions");
|
||||
assert(free_list != NULL, "pre-condition");
|
||||
|
||||
size_t hr_capacity = hr->capacity();
|
||||
// We need to read this before we make the region non-humongous,
|
||||
// otherwise the information will be gone.
|
||||
uint last_index = hr->last_hc_index();
|
||||
hr->clear_humongous();
|
||||
free_region(hr, free_list, par);
|
||||
|
||||
uint i = hr->hrm_index() + 1;
|
||||
while (i < last_index) {
|
||||
HeapRegion* curr_hr = region_at(i);
|
||||
assert(curr_hr->is_continues_humongous(), "invariant");
|
||||
curr_hr->clear_humongous();
|
||||
free_region(curr_hr, free_list, par);
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
|
||||
const HeapRegionSetCount& humongous_regions_removed) {
|
||||
const HeapRegionSetCount& humongous_regions_removed) {
|
||||
if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
|
||||
MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
|
||||
_old_set.bulk_remove(old_regions_removed);
|
||||
@ -5498,8 +5427,6 @@ public:
|
||||
bool failures() { return _failures; }
|
||||
|
||||
virtual bool doHeapRegion(HeapRegion* hr) {
|
||||
if (hr->is_continues_humongous()) return false;
|
||||
|
||||
bool result = _g1h->verify_bitmaps(_caller, hr);
|
||||
if (!result) {
|
||||
_failures = true;
|
||||
@ -5773,11 +5700,10 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
||||
!r->rem_set()->is_empty()) {
|
||||
|
||||
if (G1TraceEagerReclaimHumongousObjects) {
|
||||
gclog_or_tty->print_cr("Live humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
|
||||
gclog_or_tty->print_cr("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
|
||||
region_idx,
|
||||
(size_t)obj->size() * HeapWordSize,
|
||||
p2i(r->bottom()),
|
||||
r->region_num(),
|
||||
r->rem_set()->occupied(),
|
||||
r->rem_set()->strong_code_roots_list_length(),
|
||||
next_bitmap->isMarked(r->bottom()),
|
||||
@ -5794,11 +5720,10 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
||||
PTR_FORMAT " is not.", p2i(r->bottom()));
|
||||
|
||||
if (G1TraceEagerReclaimHumongousObjects) {
|
||||
gclog_or_tty->print_cr("Dead humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
|
||||
gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
|
||||
region_idx,
|
||||
(size_t)obj->size() * HeapWordSize,
|
||||
p2i(r->bottom()),
|
||||
r->region_num(),
|
||||
r->rem_set()->occupied(),
|
||||
r->rem_set()->strong_code_roots_list_length(),
|
||||
next_bitmap->isMarked(r->bottom()),
|
||||
@ -5810,10 +5735,14 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
||||
if (next_bitmap->isMarked(r->bottom())) {
|
||||
next_bitmap->clear(r->bottom());
|
||||
}
|
||||
_freed_bytes += r->used();
|
||||
r->set_containing_set(NULL);
|
||||
_humongous_regions_removed.increment(1u, r->capacity());
|
||||
g1h->free_humongous_region(r, _free_region_list, false);
|
||||
do {
|
||||
HeapRegion* next = g1h->next_region_in_humongous(r);
|
||||
_freed_bytes += r->used();
|
||||
r->set_containing_set(NULL);
|
||||
_humongous_regions_removed.increment(1u, r->capacity());
|
||||
g1h->free_humongous_region(r, _free_region_list, false);
|
||||
r = next;
|
||||
} while (r != NULL);
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -6048,10 +5977,6 @@ public:
|
||||
}
|
||||
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (r->is_continues_humongous()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (r->is_empty()) {
|
||||
// Add free regions to the free list
|
||||
r->set_free();
|
||||
@ -6239,14 +6164,10 @@ public:
|
||||
_old_count(), _humongous_count(), _free_count(){ }
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
if (hr->is_continues_humongous()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (hr->is_young()) {
|
||||
// TODO
|
||||
} else if (hr->is_starts_humongous()) {
|
||||
assert(hr->containing_set() == _humongous_set, "Heap region %u is starts humongous but not in humongous set.", hr->hrm_index());
|
||||
} else if (hr->is_humongous()) {
|
||||
assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
|
||||
_humongous_count.increment(1u, hr->capacity());
|
||||
} else if (hr->is_empty()) {
|
||||
assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
|
||||
|
@ -757,12 +757,6 @@ protected:
|
||||
// The closure used to refine a single card.
|
||||
RefineCardTableEntryClosure* _refine_cte_cl;
|
||||
|
||||
// A DirtyCardQueueSet that is used to hold cards that contain
|
||||
// references into the current collection set. This is used to
|
||||
// update the remembered sets of the regions in the collection
|
||||
// set in the event of an evacuation failure.
|
||||
DirtyCardQueueSet _into_cset_dirty_card_queue_set;
|
||||
|
||||
// After a collection pause, make the regions in the CS into free
|
||||
// regions.
|
||||
void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
|
||||
@ -952,13 +946,6 @@ public:
|
||||
// A set of cards where updates happened during the GC
|
||||
DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
|
||||
|
||||
// A DirtyCardQueueSet that is used to hold cards that contain
|
||||
// references into the current collection set. This is used to
|
||||
// update the remembered sets of the regions in the collection
|
||||
// set in the event of an evacuation failure.
|
||||
DirtyCardQueueSet& into_cset_dirty_card_queue_set()
|
||||
{ return _into_cset_dirty_card_queue_set; }
|
||||
|
||||
// Create a G1CollectedHeap with the specified policy.
|
||||
// Must call the initialize method afterwards.
|
||||
// May not return if something goes wrong.
|
||||
@ -1178,7 +1165,6 @@ public:
|
||||
void prepend_to_freelist(FreeRegionList* list);
|
||||
void decrement_summary_bytes(size_t bytes);
|
||||
|
||||
// Returns "TRUE" iff "p" points into the committed areas of the heap.
|
||||
virtual bool is_in(const void* p) const;
|
||||
#ifdef ASSERT
|
||||
// Returns whether p is in one of the available areas of the heap. Slow but
|
||||
@ -1243,6 +1229,10 @@ public:
|
||||
// Return the region with the given index. It assumes the index is valid.
|
||||
inline HeapRegion* region_at(uint index) const;
|
||||
|
||||
// Return the next region (by index) that is part of the same
|
||||
// humongous object that hr is part of.
|
||||
inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
|
||||
|
||||
// Calculate the region index of the given address. Given address must be
|
||||
// within the heap.
|
||||
inline uint addr_to_region(HeapWord* addr) const;
|
||||
@ -1280,11 +1270,6 @@ public:
|
||||
|
||||
// Returns the HeapRegion that contains addr. addr must not be NULL.
|
||||
template <class T>
|
||||
inline HeapRegion* heap_region_containing_raw(const T addr) const;
|
||||
|
||||
// Returns the HeapRegion that contains addr. addr must not be NULL.
|
||||
// If addr is within a humongous continues region, it returns its humongous start region.
|
||||
template <class T>
|
||||
inline HeapRegion* heap_region_containing(const T addr) const;
|
||||
|
||||
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
|
||||
|
@ -65,6 +65,10 @@ inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
|
||||
// Return the region with the given index. It assumes the index is valid.
|
||||
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
|
||||
|
||||
inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
|
||||
return _hrm.next_region_in_humongous(hr);
|
||||
}
|
||||
|
||||
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
|
||||
assert(is_in_reserved(addr),
|
||||
"Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
@ -77,7 +81,7 @@ inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
|
||||
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
|
||||
assert(addr != NULL, "invariant");
|
||||
assert(is_in_g1_reserved((const void*) addr),
|
||||
"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
|
||||
@ -85,15 +89,6 @@ inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) con
|
||||
return _hrm.addr_to_region((HeapWord*) addr);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
|
||||
HeapRegion* hr = heap_region_containing_raw(addr);
|
||||
if (hr->is_continues_humongous()) {
|
||||
return hr->humongous_start_region();
|
||||
}
|
||||
return hr;
|
||||
}
|
||||
|
||||
inline void G1CollectedHeap::reset_gc_time_stamp() {
|
||||
_gc_time_stamp = 0;
|
||||
OrderAccess::fence();
|
||||
@ -124,9 +119,9 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
|
||||
assert_heap_not_locked();
|
||||
|
||||
// Assign the containing region to containing_hr so that we don't
|
||||
// have to keep calling heap_region_containing_raw() in the
|
||||
// have to keep calling heap_region_containing() in the
|
||||
// asserts below.
|
||||
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
|
||||
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
|
||||
assert(word_size > 0, "pre-condition");
|
||||
assert(containing_hr->is_in(start), "it should contain start");
|
||||
assert(containing_hr->is_young(), "it should be young");
|
||||
|
@ -437,6 +437,10 @@ void G1CollectorPolicy::init() {
|
||||
start_incremental_cset_building();
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::note_gc_start(uint num_active_workers) {
|
||||
phase_times()->note_gc_start(num_active_workers);
|
||||
}
|
||||
|
||||
// Create the jstat counters for the policy.
|
||||
void G1CollectorPolicy::initialize_gc_policy_counters() {
|
||||
_gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
|
||||
@ -807,7 +811,7 @@ void G1CollectorPolicy::record_full_collection_end() {
|
||||
// transitions and make sure we start with young GCs after the Full GC.
|
||||
collector_state()->set_gcs_are_young(true);
|
||||
collector_state()->set_last_young_gc(false);
|
||||
collector_state()->set_initiate_conc_mark_if_possible(false);
|
||||
collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
|
||||
collector_state()->set_during_initial_mark_pause(false);
|
||||
collector_state()->set_in_marking_window(false);
|
||||
collector_state()->set_in_marking_window_im(false);
|
||||
@ -888,7 +892,9 @@ void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
|
||||
collector_state()->set_last_young_gc(true);
|
||||
bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
|
||||
"skip last young-only gc");
|
||||
collector_state()->set_last_young_gc(should_continue_with_reclaim);
|
||||
collector_state()->set_in_marking_window(false);
|
||||
}
|
||||
|
||||
@ -903,8 +909,35 @@ double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) con
|
||||
return phase_times()->average_time_ms(phase);
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::young_other_time_ms() const {
|
||||
return phase_times()->young_cset_choice_time_ms() +
|
||||
phase_times()->young_free_cset_time_ms();
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::non_young_other_time_ms() const {
|
||||
return phase_times()->non_young_cset_choice_time_ms() +
|
||||
phase_times()->non_young_free_cset_time_ms();
|
||||
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::other_time_ms(double pause_time_ms) const {
|
||||
return pause_time_ms -
|
||||
average_time_ms(G1GCPhaseTimes::UpdateRS) -
|
||||
average_time_ms(G1GCPhaseTimes::ScanRS) -
|
||||
average_time_ms(G1GCPhaseTimes::ObjCopy) -
|
||||
average_time_ms(G1GCPhaseTimes::Termination);
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
|
||||
return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
|
||||
}
|
||||
|
||||
bool G1CollectorPolicy::about_to_start_mixed_phase() const {
|
||||
return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
|
||||
}
|
||||
|
||||
bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
|
||||
if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
|
||||
if (about_to_start_mixed_phase()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -972,11 +1005,8 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
||||
last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
|
||||
if (last_pause_included_initial_mark) {
|
||||
record_concurrent_mark_init_end(0.0);
|
||||
} else if (need_to_start_conc_mark("end of GC")) {
|
||||
// Note: this might have already been set, if during the last
|
||||
// pause we decided to start a cycle but at the beginning of
|
||||
// this pause we decided to postpone it. That's OK.
|
||||
collector_state()->set_initiate_conc_mark_if_possible(true);
|
||||
} else {
|
||||
maybe_start_marking();
|
||||
}
|
||||
|
||||
_mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
|
||||
@ -1010,19 +1040,6 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
||||
_recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
|
||||
if (recent_avg_pause_time_ratio() < 0.0 ||
|
||||
(recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
|
||||
#ifndef PRODUCT
|
||||
// Dump info to allow post-facto debugging
|
||||
gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
|
||||
gclog_or_tty->print_cr("-------------------------------------------");
|
||||
gclog_or_tty->print_cr("Recent GC Times (ms):");
|
||||
_recent_gc_times_ms->dump();
|
||||
gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
|
||||
_recent_prev_end_times_for_all_gcs_sec->dump();
|
||||
gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
|
||||
_recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
|
||||
// In debug mode, terminate the JVM if the user wants to debug at this point.
|
||||
assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
|
||||
#endif // !PRODUCT
|
||||
// Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
|
||||
// CR 6902692 by redoing the manner in which the ratio is incrementally computed.
|
||||
if (_recent_avg_pause_time_ratio < 0.0) {
|
||||
@ -1044,17 +1061,13 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
||||
if (collector_state()->last_young_gc()) {
|
||||
// This is supposed to to be the "last young GC" before we start
|
||||
// doing mixed GCs. Here we decide whether to start mixed GCs or not.
|
||||
assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
|
||||
|
||||
if (!last_pause_included_initial_mark) {
|
||||
if (next_gc_should_be_mixed("start mixed GCs",
|
||||
"do not start mixed GCs")) {
|
||||
collector_state()->set_gcs_are_young(false);
|
||||
}
|
||||
} else {
|
||||
ergo_verbose0(ErgoMixedGCs,
|
||||
"do not start mixed GCs",
|
||||
ergo_format_reason("concurrent cycle is about to start"));
|
||||
if (next_gc_should_be_mixed("start mixed GCs",
|
||||
"do not start mixed GCs")) {
|
||||
collector_state()->set_gcs_are_young(false);
|
||||
}
|
||||
|
||||
collector_state()->set_last_young_gc(false);
|
||||
}
|
||||
|
||||
@ -1065,6 +1078,8 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
||||
if (!next_gc_should_be_mixed("continue mixed GCs",
|
||||
"do not continue mixed GCs")) {
|
||||
collector_state()->set_gcs_are_young(true);
|
||||
|
||||
maybe_start_marking();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1132,37 +1147,17 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
||||
}
|
||||
}
|
||||
|
||||
double all_other_time_ms = pause_time_ms -
|
||||
(average_time_ms(G1GCPhaseTimes::UpdateRS) + average_time_ms(G1GCPhaseTimes::ScanRS) +
|
||||
average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::Termination));
|
||||
|
||||
double young_other_time_ms = 0.0;
|
||||
if (young_cset_region_length() > 0) {
|
||||
young_other_time_ms =
|
||||
phase_times()->young_cset_choice_time_ms() +
|
||||
phase_times()->young_free_cset_time_ms();
|
||||
_young_other_cost_per_region_ms_seq->add(young_other_time_ms /
|
||||
(double) young_cset_region_length());
|
||||
_young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
|
||||
young_cset_region_length());
|
||||
}
|
||||
double non_young_other_time_ms = 0.0;
|
||||
|
||||
if (old_cset_region_length() > 0) {
|
||||
non_young_other_time_ms =
|
||||
phase_times()->non_young_cset_choice_time_ms() +
|
||||
phase_times()->non_young_free_cset_time_ms();
|
||||
|
||||
_non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
|
||||
(double) old_cset_region_length());
|
||||
_non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
|
||||
old_cset_region_length());
|
||||
}
|
||||
|
||||
double constant_other_time_ms = all_other_time_ms -
|
||||
(young_other_time_ms + non_young_other_time_ms);
|
||||
_constant_other_time_ms_seq->add(constant_other_time_ms);
|
||||
|
||||
double survival_ratio = 0.0;
|
||||
if (_collection_set_bytes_used_before > 0) {
|
||||
survival_ratio = (double) _bytes_copied_during_gc /
|
||||
(double) _collection_set_bytes_used_before;
|
||||
}
|
||||
_constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
|
||||
|
||||
_pending_cards_seq->add((double) _pending_cards);
|
||||
_rs_lengths_seq->add((double) _max_rs_lengths);
|
||||
@ -1271,6 +1266,10 @@ void G1CollectorPolicy::print_detailed_heap_transition(bool full) const {
|
||||
gclog_or_tty->cr();
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::print_phases(double pause_time_sec) {
|
||||
phase_times()->print(pause_time_sec);
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
|
||||
double update_rs_processed_buffers,
|
||||
double goal_ms) {
|
||||
@ -1588,8 +1587,10 @@ void G1CollectorPolicy::update_survivors_policy() {
|
||||
HeapRegion::GrainWords * _max_survivor_regions, counters());
|
||||
}
|
||||
|
||||
bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
|
||||
GCCause::Cause gc_cause) {
|
||||
bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
|
||||
// We actually check whether we are marking here and not if we are in a
|
||||
// reclamation phase. This means that we will schedule a concurrent mark
|
||||
// even while we are still in the process of reclaiming memory.
|
||||
bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
|
||||
if (!during_cycle) {
|
||||
ergo_verbose1(ErgoConcCycles,
|
||||
@ -1609,8 +1610,7 @@ bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
G1CollectorPolicy::decide_on_conc_mark_initiation() {
|
||||
void G1CollectorPolicy::decide_on_conc_mark_initiation() {
|
||||
// We are about to decide on whether this pause will be an
|
||||
// initial-mark pause.
|
||||
|
||||
@ -1625,21 +1625,11 @@ G1CollectorPolicy::decide_on_conc_mark_initiation() {
|
||||
// gone over the initiating threshold and we should start a
|
||||
// concurrent marking cycle. So we might initiate one.
|
||||
|
||||
bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
|
||||
if (!during_cycle) {
|
||||
// The concurrent marking thread is not "during a cycle", i.e.,
|
||||
// it has completed the last one. So we can go ahead and
|
||||
// initiate a new cycle.
|
||||
if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
|
||||
// Initiate a new initial mark only if there is no marking or reclamation going
|
||||
// on.
|
||||
|
||||
collector_state()->set_during_initial_mark_pause(true);
|
||||
// We do not allow mixed GCs during marking.
|
||||
if (!collector_state()->gcs_are_young()) {
|
||||
collector_state()->set_gcs_are_young(true);
|
||||
ergo_verbose0(ErgoMixedGCs,
|
||||
"end mixed GCs",
|
||||
ergo_format_reason("concurrent cycle is about to start"));
|
||||
}
|
||||
|
||||
// And we can now clear initiate_conc_mark_if_possible() as
|
||||
// we've already acted on it.
|
||||
collector_state()->set_initiate_conc_mark_if_possible(false);
|
||||
@ -1943,6 +1933,15 @@ double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const
|
||||
return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::maybe_start_marking() {
|
||||
if (need_to_start_conc_mark("end of GC")) {
|
||||
// Note: this might have already been set, if during the last
|
||||
// pause we decided to start a cycle but at the beginning of
|
||||
// this pause we decided to postpone it. That's OK.
|
||||
collector_state()->set_initiate_conc_mark_if_possible(true);
|
||||
}
|
||||
}
|
||||
|
||||
bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
|
||||
const char* false_action_str) const {
|
||||
CollectionSetChooser* cset_chooser = _collectionSetChooser;
|
||||
|
@ -380,6 +380,11 @@ public:
|
||||
|
||||
protected:
|
||||
virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
|
||||
virtual double other_time_ms(double pause_time_ms) const;
|
||||
|
||||
double young_other_time_ms() const;
|
||||
double non_young_other_time_ms() const;
|
||||
double constant_other_time_ms(double pause_time_ms) const;
|
||||
|
||||
private:
|
||||
// Statistics kept per GC stoppage, pause or full.
|
||||
@ -529,6 +534,8 @@ private:
|
||||
// as a percentage of the current heap capacity.
|
||||
double reclaimable_bytes_perc(size_t reclaimable_bytes) const;
|
||||
|
||||
// Sets up marking if proper conditions are met.
|
||||
void maybe_start_marking();
|
||||
public:
|
||||
|
||||
G1CollectorPolicy();
|
||||
@ -549,6 +556,8 @@ public:
|
||||
|
||||
void init();
|
||||
|
||||
virtual void note_gc_start(uint num_active_workers);
|
||||
|
||||
// Create jstat counters for the policy.
|
||||
virtual void initialize_gc_policy_counters();
|
||||
|
||||
@ -563,6 +572,8 @@ public:
|
||||
|
||||
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
|
||||
|
||||
bool about_to_start_mixed_phase() const;
|
||||
|
||||
// Record the start and end of an evacuation pause.
|
||||
void record_collection_pause_start(double start_time_sec);
|
||||
void record_collection_pause_end(double pause_time_ms, size_t cards_scanned);
|
||||
@ -593,6 +604,8 @@ public:
|
||||
void print_heap_transition() const;
|
||||
void print_detailed_heap_transition(bool full = false) const;
|
||||
|
||||
virtual void print_phases(double pause_time_sec);
|
||||
|
||||
void record_stop_world_start();
|
||||
void record_concurrent_pause();
|
||||
|
||||
|
@ -136,7 +136,7 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
|
||||
_gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::note_gc_start(uint active_gc_threads, bool mark_in_progress) {
|
||||
void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
|
||||
assert(active_gc_threads > 0, "The number of threads must be > 0");
|
||||
assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads");
|
||||
_active_gc_threads = active_gc_threads;
|
||||
@ -362,6 +362,8 @@ class G1GCParPhasePrinter : public StackObj {
|
||||
};
|
||||
|
||||
void G1GCPhaseTimes::print(double pause_time_sec) {
|
||||
note_gc_end();
|
||||
|
||||
G1GCParPhasePrinter par_phase_printer(this);
|
||||
|
||||
if (_root_region_scan_wait_time_ms > 0.0) {
|
||||
|
@ -121,10 +121,11 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
void print_stats(int level, const char* str, size_t value);
|
||||
void print_stats(int level, const char* str, double value, uint workers);
|
||||
|
||||
void note_gc_end();
|
||||
|
||||
public:
|
||||
G1GCPhaseTimes(uint max_gc_threads);
|
||||
void note_gc_start(uint active_gc_threads, bool mark_in_progress);
|
||||
void note_gc_end();
|
||||
void note_gc_start(uint active_gc_threads);
|
||||
void print(double pause_time_sec);
|
||||
|
||||
// record the time a phase took in seconds
|
||||
|
@ -51,7 +51,6 @@ const char* G1HRPrinter::region_type_name(RegionType type) {
|
||||
case Eden: return "Eden";
|
||||
case Survivor: return "Survivor";
|
||||
case Old: return "Old";
|
||||
case SingleHumongous: return "SingleH";
|
||||
case StartsHumongous: return "StartsH";
|
||||
case ContinuesHumongous: return "ContinuesH";
|
||||
case Archive: return "Archive";
|
||||
|
@ -50,7 +50,6 @@ public:
|
||||
Eden,
|
||||
Survivor,
|
||||
Old,
|
||||
SingleHumongous,
|
||||
StartsHumongous,
|
||||
ContinuesHumongous,
|
||||
Archive
|
||||
|
@ -279,8 +279,8 @@ public:
|
||||
} else {
|
||||
assert(hr->is_empty(), "Should have been cleared in phase 2.");
|
||||
}
|
||||
hr->reset_during_compaction();
|
||||
}
|
||||
hr->reset_during_compaction();
|
||||
} else if (!hr->is_pinned()) {
|
||||
hr->compact();
|
||||
}
|
||||
@ -334,9 +334,6 @@ void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
|
||||
HeapWord* end = hr->end();
|
||||
FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
|
||||
|
||||
assert(hr->is_starts_humongous(),
|
||||
"Only the start of a humongous region should be freed.");
|
||||
|
||||
hr->set_containing_set(NULL);
|
||||
_humongous_regions_removed.increment(1u, hr->capacity());
|
||||
|
||||
@ -373,15 +370,12 @@ void G1PrepareCompactClosure::update_sets() {
|
||||
|
||||
bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
|
||||
if (hr->is_humongous()) {
|
||||
if (hr->is_starts_humongous()) {
|
||||
oop obj = oop(hr->bottom());
|
||||
if (obj->is_gc_marked()) {
|
||||
obj->forward_to(obj);
|
||||
} else {
|
||||
free_humongous_region(hr);
|
||||
}
|
||||
} else {
|
||||
assert(hr->is_continues_humongous(), "Invalid humongous.");
|
||||
oop obj = oop(hr->humongous_start_region()->bottom());
|
||||
if (hr->is_starts_humongous() && obj->is_gc_marked()) {
|
||||
obj->forward_to(obj);
|
||||
}
|
||||
if (!obj->is_gc_marked()) {
|
||||
free_humongous_region(hr);
|
||||
}
|
||||
} else if (!hr->is_pinned()) {
|
||||
prepare_for_compaction(hr, hr->end());
|
||||
|
@ -117,11 +117,6 @@ inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
|
||||
template <class T>
|
||||
inline void G1CMOopClosure::do_oop_nv(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
if (_cm->verbose_high()) {
|
||||
gclog_or_tty->print_cr("[%u] we're looking at location "
|
||||
"*" PTR_FORMAT " = " PTR_FORMAT,
|
||||
_task->worker_id(), p2i(p), p2i((void*) obj));
|
||||
}
|
||||
_task->deal_with_reference(obj);
|
||||
}
|
||||
|
||||
@ -227,7 +222,7 @@ inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
|
||||
|
||||
template <class T>
|
||||
void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
|
||||
if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
|
||||
if (_g1->heap_region_containing(new_obj)->is_young()) {
|
||||
_scanned_klass->record_modified_oops();
|
||||
}
|
||||
}
|
||||
|
@ -216,7 +216,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
oop const old,
|
||||
markOop const old_mark) {
|
||||
const size_t word_sz = old->size();
|
||||
HeapRegion* const from_region = _g1h->heap_region_containing_raw(old);
|
||||
HeapRegion* const from_region = _g1h->heap_region_containing(old);
|
||||
// +1 to make the -1 indexes valid...
|
||||
const int young_index = from_region->young_index_in_cset()+1;
|
||||
assert( (from_region->is_young() && young_index > 0) ||
|
||||
@ -294,9 +294,9 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
const bool is_from_young = state.is_young();
|
||||
const bool is_to_young = dest_state.is_young();
|
||||
assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(),
|
||||
assert(is_from_young == _g1h->heap_region_containing(old)->is_young(),
|
||||
"sanity");
|
||||
assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(),
|
||||
assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
|
||||
"sanity");
|
||||
G1StringDedup::enqueue_from_evacuation(is_from_young,
|
||||
is_to_young,
|
||||
@ -314,7 +314,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
oop* old_p = set_partial_array_mask(old);
|
||||
push_on_queue(old_p);
|
||||
} else {
|
||||
HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr);
|
||||
HeapRegion* const to_region = _g1h->heap_region_containing(obj_ptr);
|
||||
_scanner.set_region(to_region);
|
||||
obj->oop_iterate_backwards(&_scanner);
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
|
||||
// so that the heap remains parsable in case of evacuation failure.
|
||||
to_obj_array->set_length(end);
|
||||
}
|
||||
_scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
|
||||
_scanner.set_region(_g1h->heap_region_containing(to_obj));
|
||||
// Process indexes [start,end). It will also process the header
|
||||
// along with the first chunk (i.e., the chunk with start == 0).
|
||||
// Note that at this point the length field of to_obj_array is not
|
||||
@ -115,10 +115,7 @@ inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
|
||||
|
||||
template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
|
||||
if (!has_partial_array_mask(ref_to_scan)) {
|
||||
// Note: we can use "raw" versions of "region_containing" because
|
||||
// "obj_to_scan" is definitely in the heap, and is not in a
|
||||
// humongous region.
|
||||
HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
|
||||
HeapRegion* r = _g1h->heap_region_containing(ref_to_scan);
|
||||
do_oop_evac(ref_to_scan, r);
|
||||
} else {
|
||||
do_oop_partial_array((oop*)ref_to_scan);
|
||||
|
@ -40,42 +40,13 @@
|
||||
#include "utilities/intHisto.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
#define CARD_REPEAT_HISTO 0
|
||||
|
||||
#if CARD_REPEAT_HISTO
|
||||
static size_t ct_freq_sz;
|
||||
static jbyte* ct_freq = NULL;
|
||||
|
||||
void init_ct_freq_table(size_t heap_sz_bytes) {
|
||||
if (ct_freq == NULL) {
|
||||
ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size;
|
||||
ct_freq = new jbyte[ct_freq_sz];
|
||||
for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void ct_freq_note_card(size_t index) {
|
||||
assert(0 <= index && index < ct_freq_sz, "Bounds error.");
|
||||
if (ct_freq[index] < 100) { ct_freq[index]++; }
|
||||
}
|
||||
|
||||
static IntHistogram card_repeat_count(10, 10);
|
||||
|
||||
void ct_freq_update_histo_and_reset() {
|
||||
for (size_t j = 0; j < ct_freq_sz; j++) {
|
||||
card_repeat_count.add_entry(ct_freq[j]);
|
||||
ct_freq[j] = 0;
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
|
||||
: _g1(g1), _conc_refine_cards(0),
|
||||
_ct_bs(ct_bs), _g1p(_g1->g1_policy()),
|
||||
_cg1r(g1->concurrent_g1_refine()),
|
||||
_cset_rs_update_cl(NULL),
|
||||
_prev_period_summary()
|
||||
_prev_period_summary(),
|
||||
_into_cset_dirty_card_queue_set(false)
|
||||
{
|
||||
_cset_rs_update_cl = NEW_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, n_workers(), mtGC);
|
||||
for (uint i = 0; i < n_workers(); i++) {
|
||||
@ -84,6 +55,15 @@ G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
|
||||
if (G1SummarizeRSetStats) {
|
||||
_prev_period_summary.initialize(this);
|
||||
}
|
||||
// Initialize the card queue set used to hold cards containing
|
||||
// references into the collection set.
|
||||
_into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
|
||||
DirtyCardQ_CBL_mon,
|
||||
DirtyCardQ_FL_lock,
|
||||
-1, // never trigger processing
|
||||
-1, // no limit on length
|
||||
Shared_DirtyCardQ_lock,
|
||||
&JavaThread::dirty_card_queue_set());
|
||||
}
|
||||
|
||||
G1RemSet::~G1RemSet() {
|
||||
@ -272,7 +252,7 @@ public:
|
||||
if (_g1rs->refine_card(card_ptr, worker_i, true)) {
|
||||
// 'card_ptr' contains references that point into the collection
|
||||
// set. We need to record the card in the DCQS
|
||||
// (G1CollectedHeap::into_cset_dirty_card_queue_set())
|
||||
// (_into_cset_dirty_card_queue_set)
|
||||
// that's used for that purpose.
|
||||
//
|
||||
// Enqueue the card
|
||||
@ -302,10 +282,6 @@ void G1RemSet::cleanupHRRS() {
|
||||
size_t G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
|
||||
CodeBlobClosure* heap_region_codeblobs,
|
||||
uint worker_i) {
|
||||
#if CARD_REPEAT_HISTO
|
||||
ct_freq_update_histo_and_reset();
|
||||
#endif
|
||||
|
||||
// We cache the value of 'oc' closure into the appropriate slot in the
|
||||
// _cset_rs_update_cl for this worker
|
||||
assert(worker_i < n_workers(), "sanity");
|
||||
@ -320,7 +296,7 @@ size_t G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
|
||||
// are wholly 'free' of live objects. In the event of an evacuation
|
||||
// failure the cards/buffers in this queue set are passed to the
|
||||
// DirtyCardQueueSet that is used to manage RSet updates
|
||||
DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
|
||||
DirtyCardQueue into_cset_dcq(&_into_cset_dirty_card_queue_set);
|
||||
|
||||
updateRS(&into_cset_dcq, worker_i);
|
||||
size_t cards_scanned = scanRS(oc, heap_region_codeblobs, worker_i);
|
||||
@ -343,7 +319,7 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() {
|
||||
// Set all cards back to clean.
|
||||
_g1->cleanUpCardTable();
|
||||
|
||||
DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set();
|
||||
DirtyCardQueueSet& into_cset_dcqs = _into_cset_dirty_card_queue_set;
|
||||
int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
|
||||
|
||||
if (_g1->evacuation_failed()) {
|
||||
@ -359,10 +335,10 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() {
|
||||
|
||||
// Free any completed buffers in the DirtyCardQueueSet used to hold cards
|
||||
// which contain references that point into the collection.
|
||||
_g1->into_cset_dirty_card_queue_set().clear();
|
||||
assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0,
|
||||
_into_cset_dirty_card_queue_set.clear();
|
||||
assert(_into_cset_dirty_card_queue_set.completed_buffers_num() == 0,
|
||||
"all buffers should be freed");
|
||||
_g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers();
|
||||
_into_cset_dirty_card_queue_set.clear_n_completed_buffers();
|
||||
}
|
||||
|
||||
class ScrubRSClosure: public HeapRegionClosure {
|
||||
@ -498,11 +474,6 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
|
||||
HeapWord* end = start + CardTableModRefBS::card_size_in_words;
|
||||
MemRegion dirtyRegion(start, end);
|
||||
|
||||
#if CARD_REPEAT_HISTO
|
||||
init_ct_freq_table(_g1->max_capacity());
|
||||
ct_freq_note_card(_ct_bs->index_for(start));
|
||||
#endif
|
||||
|
||||
G1ParPushHeapRSClosure* oops_in_heap_closure = NULL;
|
||||
if (check_for_refs_into_cset) {
|
||||
// ConcurrentG1RefineThreads have worker numbers larger than what
|
||||
@ -607,12 +578,6 @@ void G1RemSet::print_summary_info(G1RemSetSummary * summary, const char * header
|
||||
gclog_or_tty->print_cr("%s", header);
|
||||
}
|
||||
|
||||
#if CARD_REPEAT_HISTO
|
||||
gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
|
||||
gclog_or_tty->print_cr(" # of repeats --> # of cards with that number.");
|
||||
card_repeat_count.print_on(gclog_or_tty);
|
||||
#endif
|
||||
|
||||
summary->print_on(gclog_or_tty);
|
||||
}
|
||||
|
||||
@ -631,9 +596,9 @@ void G1RemSet::prepare_for_verify() {
|
||||
bool use_hot_card_cache = hot_card_cache->use_cache();
|
||||
hot_card_cache->set_use_cache(false);
|
||||
|
||||
DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
|
||||
DirtyCardQueue into_cset_dcq(&_into_cset_dirty_card_queue_set);
|
||||
updateRS(&into_cset_dcq, 0);
|
||||
_g1->into_cset_dirty_card_queue_set().clear();
|
||||
_into_cset_dirty_card_queue_set.clear();
|
||||
|
||||
hot_card_cache->set_use_cache(use_hot_card_cache);
|
||||
assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
|
||||
|
@ -41,6 +41,13 @@ class G1ParPushHeapRSClosure;
|
||||
class G1RemSet: public CHeapObj<mtGC> {
|
||||
private:
|
||||
G1RemSetSummary _prev_period_summary;
|
||||
|
||||
// A DirtyCardQueueSet that is used to hold cards that contain
|
||||
// references into the current collection set. This is used to
|
||||
// update the remembered sets of the regions in the collection
|
||||
// set in the event of an evacuation failure.
|
||||
DirtyCardQueueSet _into_cset_dirty_card_queue_set;
|
||||
|
||||
protected:
|
||||
G1CollectedHeap* _g1;
|
||||
size_t _conc_refine_cards;
|
||||
|
@ -60,7 +60,7 @@ inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, uint tid) {
|
||||
assert(_g1->is_in_reserved(obj), "must be in heap");
|
||||
#endif // ASSERT
|
||||
|
||||
assert(from == NULL || from->is_in_reserved(p), "p is not in from");
|
||||
assert(from->is_in_reserved(p) || from->is_starts_humongous(), "p is not in from");
|
||||
|
||||
HeapRegion* to = _g1->heap_region_containing(obj);
|
||||
if (from != to) {
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/satbQueue.hpp"
|
||||
#include "gc/g1/satbMarkQueue.hpp"
|
||||
#include "gc/shared/memset_with_concurrent_readers.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
@ -188,21 +188,6 @@ G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field,
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
|
||||
oop new_val) {
|
||||
uintptr_t field_uint = (uintptr_t)field;
|
||||
uintptr_t new_val_uint = cast_from_oop<uintptr_t>(new_val);
|
||||
uintptr_t comb = field_uint ^ new_val_uint;
|
||||
comb = comb >> HeapRegion::LogOfHRGrainBytes;
|
||||
if (comb == 0) return;
|
||||
if (new_val == NULL) return;
|
||||
// Otherwise, log it.
|
||||
G1SATBCardTableLoggingModRefBS* g1_bs =
|
||||
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(G1CollectedHeap::heap()->barrier_set());
|
||||
g1_bs->write_ref_field_work(field, new_val, false);
|
||||
}
|
||||
|
||||
void
|
||||
G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
|
||||
volatile jbyte* byte = byte_for(mr.start());
|
||||
|
@ -56,21 +56,15 @@ public:
|
||||
|
||||
virtual bool has_write_ref_pre_barrier() { return true; }
|
||||
|
||||
// This notes that we don't need to access any BarrierSet data
|
||||
// structures, so this can be called from a static context.
|
||||
template <class T> static void write_ref_field_pre_static(T* field, oop newVal) {
|
||||
// We export this to make it available in cases where the static
|
||||
// type of the barrier set is known. Note that it is non-virtual.
|
||||
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
|
||||
T heap_oop = oopDesc::load_heap_oop(field);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
enqueue(oopDesc::decode_heap_oop(heap_oop));
|
||||
}
|
||||
}
|
||||
|
||||
// We export this to make it available in cases where the static
|
||||
// type of the barrier set is known. Note that it is non-virtual.
|
||||
template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
|
||||
write_ref_field_pre_static(field, newVal);
|
||||
}
|
||||
|
||||
// These are the more general virtual versions.
|
||||
virtual void write_ref_field_pre_work(oop* field, oop new_val) {
|
||||
inline_write_ref_field_pre(field, new_val);
|
||||
@ -173,9 +167,6 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
|
||||
|
||||
virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
|
||||
|
||||
// Can be called from static contexts.
|
||||
static void write_ref_field_static(void* field, oop new_val);
|
||||
|
||||
// NB: if you do a whole-heap invalidation, the "usual invariant" defined
|
||||
// above no longer applies.
|
||||
void invalidate(MemRegion mr, bool whole_heap = false);
|
||||
|
@ -52,7 +52,7 @@ void G1StringDedup::stop() {
|
||||
|
||||
bool G1StringDedup::is_candidate_from_mark(oop obj) {
|
||||
if (java_lang_String::is_instance_inlined(obj)) {
|
||||
bool from_young = G1CollectedHeap::heap()->heap_region_containing_raw(obj)->is_young();
|
||||
bool from_young = G1CollectedHeap::heap()->heap_region_containing(obj)->is_young();
|
||||
if (from_young && obj->age() < StringDeduplicationAgeThreshold) {
|
||||
// Candidate found. String is being evacuated from young to old but has not
|
||||
// reached the deduplication age threshold, i.e. has not previously been a
|
||||
|
@ -48,9 +48,6 @@
|
||||
develop(bool, G1TraceMarkStackOverflow, false, \
|
||||
"If true, extra debugging code for CM restart for ovflw.") \
|
||||
\
|
||||
develop(bool, G1TraceHeapRegionRememberedSet, false, \
|
||||
"Enables heap region remembered set debug logs") \
|
||||
\
|
||||
diagnostic(bool, G1SummarizeConcMark, false, \
|
||||
"Summarize concurrent mark info") \
|
||||
\
|
||||
@ -187,12 +184,6 @@
|
||||
range(0, max_jint/wordSize) \
|
||||
constraint(G1RSetSparseRegionEntriesConstraintFunc,AfterErgo) \
|
||||
\
|
||||
develop(bool, G1RecordHRRSOops, false, \
|
||||
"When true, record recent calls to rem set operations.") \
|
||||
\
|
||||
develop(bool, G1RecordHRRSEvents, false, \
|
||||
"When true, record recent calls to rem set operations.") \
|
||||
\
|
||||
develop(intx, G1MaxVerifyFailures, -1, \
|
||||
"The maximum number of verification failures to print. " \
|
||||
"-1 means print all.") \
|
||||
@ -228,10 +219,6 @@
|
||||
develop(bool, G1HRRSFlushLogBuffersOnVerify, false, \
|
||||
"Forces flushing of log buffers before verification.") \
|
||||
\
|
||||
develop(bool, G1FailOnFPError, false, \
|
||||
"When set, G1 will fail when it encounters an FP 'error', " \
|
||||
"so as to allow debugging") \
|
||||
\
|
||||
product(size_t, G1HeapRegionSize, 0, \
|
||||
"Size of the G1 regions.") \
|
||||
range(0, 32*M) \
|
||||
|
@ -67,7 +67,7 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
|
||||
// not considered dead, either because it is marked (in the mark bitmap)
|
||||
// or it was allocated after marking finished, then we add it. Otherwise
|
||||
// we can safely ignore the object.
|
||||
if (!g1h->is_obj_dead(oop(cur), _hr)) {
|
||||
if (!g1h->is_obj_dead(oop(cur))) {
|
||||
oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
|
||||
} else {
|
||||
oop_size = _hr->block_size(cur);
|
||||
@ -81,7 +81,7 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
|
||||
HeapWord* next_obj = cur + oop_size;
|
||||
while (next_obj < top) {
|
||||
// Keep filtering the remembered set.
|
||||
if (!g1h->is_obj_dead(cur_oop, _hr)) {
|
||||
if (!g1h->is_obj_dead(cur_oop)) {
|
||||
// Bottom lies entirely below top, so we can call the
|
||||
// non-memRegion version of oop_iterate below.
|
||||
cur_oop->oop_iterate(_rs_scan);
|
||||
@ -93,7 +93,7 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
|
||||
}
|
||||
|
||||
// Last object. Need to do dead-obj filtering here too.
|
||||
if (!g1h->is_obj_dead(oop(cur), _hr)) {
|
||||
if (!g1h->is_obj_dead(oop(cur))) {
|
||||
oop(cur)->oop_iterate(_rs_scan, mr);
|
||||
}
|
||||
}
|
||||
@ -162,8 +162,6 @@ void HeapRegion::reset_after_compaction() {
|
||||
void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
|
||||
assert(_humongous_start_region == NULL,
|
||||
"we should have already filtered out humongous regions");
|
||||
assert(_end == orig_end(),
|
||||
"we should have already filtered out humongous regions");
|
||||
assert(!in_collection_set(),
|
||||
"Should not clear heap region %u in the collection set", hrm_index());
|
||||
|
||||
@ -213,24 +211,18 @@ void HeapRegion::calc_gc_efficiency() {
|
||||
_gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
|
||||
}
|
||||
|
||||
void HeapRegion::set_starts_humongous(HeapWord* new_top, HeapWord* new_end) {
|
||||
void HeapRegion::set_starts_humongous(HeapWord* obj_top) {
|
||||
assert(!is_humongous(), "sanity / pre-condition");
|
||||
assert(end() == orig_end(),
|
||||
"Should be normal before the humongous object allocation");
|
||||
assert(top() == bottom(), "should be empty");
|
||||
assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
|
||||
|
||||
_type.set_starts_humongous();
|
||||
_humongous_start_region = this;
|
||||
|
||||
set_end(new_end);
|
||||
_offsets.set_for_starts_humongous(new_top);
|
||||
_offsets.set_for_starts_humongous(obj_top);
|
||||
}
|
||||
|
||||
void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
|
||||
assert(!is_humongous(), "sanity / pre-condition");
|
||||
assert(end() == orig_end(),
|
||||
"Should be normal before the humongous object allocation");
|
||||
assert(top() == bottom(), "should be empty");
|
||||
assert(first_hr->is_starts_humongous(), "pre-condition");
|
||||
|
||||
@ -241,18 +233,6 @@ void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
|
||||
void HeapRegion::clear_humongous() {
|
||||
assert(is_humongous(), "pre-condition");
|
||||
|
||||
if (is_starts_humongous()) {
|
||||
assert(top() <= end(), "pre-condition");
|
||||
set_end(orig_end());
|
||||
if (top() > end()) {
|
||||
// at least one "continues humongous" region after it
|
||||
set_top(end());
|
||||
}
|
||||
} else {
|
||||
// continues humongous
|
||||
assert(end() == orig_end(), "sanity");
|
||||
}
|
||||
|
||||
assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
|
||||
_humongous_start_region = NULL;
|
||||
}
|
||||
@ -290,11 +270,6 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
|
||||
hr_clear(false /*par*/, false /*clear_space*/);
|
||||
set_top(bottom());
|
||||
record_timestamp();
|
||||
|
||||
assert(mr.end() == orig_end(),
|
||||
"Given region end address " PTR_FORMAT " should match exactly "
|
||||
"bottom plus one region size, i.e. " PTR_FORMAT,
|
||||
p2i(mr.end()), p2i(orig_end()));
|
||||
}
|
||||
|
||||
CompactibleSpace* HeapRegion::next_compaction_space() const {
|
||||
@ -832,7 +807,14 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
_offsets.verify();
|
||||
}
|
||||
|
||||
if (p != top()) {
|
||||
if (is_region_humongous) {
|
||||
oop obj = oop(this->humongous_start_region()->bottom());
|
||||
if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) {
|
||||
gclog_or_tty->print_cr("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj));
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_region_humongous && p != top()) {
|
||||
gclog_or_tty->print_cr("end of last object " PTR_FORMAT " "
|
||||
"does not match top " PTR_FORMAT, p2i(p), p2i(top()));
|
||||
*failures = true;
|
||||
@ -840,7 +822,6 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
}
|
||||
|
||||
HeapWord* the_end = end();
|
||||
assert(p == top(), "it should still hold");
|
||||
// Do some extra BOT consistency checking for addresses in the
|
||||
// range [top, end). BOT look-ups in this range should yield
|
||||
// top. No point in doing that if top == end (there's nothing there).
|
||||
@ -931,6 +912,7 @@ void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
|
||||
}
|
||||
|
||||
void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
|
||||
assert(new_end == _bottom + HeapRegion::GrainWords, "set_end should only ever be set to _bottom + HeapRegion::GrainWords");
|
||||
Space::set_end(new_end);
|
||||
_offsets.resize(new_end - bottom());
|
||||
}
|
||||
|
@ -43,6 +43,15 @@
|
||||
// The solution is to remove this method from the definition
|
||||
// of a Space.
|
||||
|
||||
// Each heap region is self contained. top() and end() can never
|
||||
// be set beyond the end of the region. For humongous objects,
|
||||
// the first region is a StartsHumongous region. If the humongous
|
||||
// object is larger than a heap region, the following regions will
|
||||
// be of type ContinuesHumongous. In this case the top() of the
|
||||
// StartHumongous region and all ContinuesHumongous regions except
|
||||
// the last will point to their own end. For the last ContinuesHumongous
|
||||
// region, top() will equal the object's top.
|
||||
|
||||
class G1CollectedHeap;
|
||||
class HeapRegionRemSet;
|
||||
class HeapRegionRemSetIterator;
|
||||
@ -389,8 +398,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
size_t garbage_bytes() {
|
||||
size_t used_at_mark_start_bytes =
|
||||
(prev_top_at_mark_start() - bottom()) * HeapWordSize;
|
||||
assert(used_at_mark_start_bytes >= marked_bytes(),
|
||||
"Can't mark more than we have.");
|
||||
return used_at_mark_start_bytes - marked_bytes();
|
||||
}
|
||||
|
||||
@ -409,7 +416,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
|
||||
void add_to_marked_bytes(size_t incr_bytes) {
|
||||
_next_marked_bytes = _next_marked_bytes + incr_bytes;
|
||||
assert(_next_marked_bytes <= used(), "invariant" );
|
||||
}
|
||||
|
||||
void zero_marked_bytes() {
|
||||
@ -445,57 +451,13 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
return _humongous_start_region;
|
||||
}
|
||||
|
||||
// Return the number of distinct regions that are covered by this region:
|
||||
// 1 if the region is not humongous, >= 1 if the region is humongous.
|
||||
uint region_num() const {
|
||||
if (!is_humongous()) {
|
||||
return 1U;
|
||||
} else {
|
||||
assert(is_starts_humongous(), "doesn't make sense on HC regions");
|
||||
assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
|
||||
return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
|
||||
}
|
||||
}
|
||||
|
||||
// Return the index + 1 of the last HC regions that's associated
|
||||
// with this HS region.
|
||||
uint last_hc_index() const {
|
||||
assert(is_starts_humongous(), "don't call this otherwise");
|
||||
return hrm_index() + region_num();
|
||||
}
|
||||
|
||||
// Same as Space::is_in_reserved, but will use the original size of the region.
|
||||
// The original size is different only for start humongous regions. They get
|
||||
// their _end set up to be the end of the last continues region of the
|
||||
// corresponding humongous object.
|
||||
bool is_in_reserved_raw(const void* p) const {
|
||||
return _bottom <= p && p < orig_end();
|
||||
}
|
||||
|
||||
// Makes the current region be a "starts humongous" region, i.e.,
|
||||
// the first region in a series of one or more contiguous regions
|
||||
// that will contain a single "humongous" object. The two parameters
|
||||
// are as follows:
|
||||
// that will contain a single "humongous" object.
|
||||
//
|
||||
// new_top : The new value of the top field of this region which
|
||||
// points to the end of the humongous object that's being
|
||||
// allocated. If there is more than one region in the series, top
|
||||
// will lie beyond this region's original end field and on the last
|
||||
// region in the series.
|
||||
//
|
||||
// new_end : The new value of the end field of this region which
|
||||
// points to the end of the last region in the series. If there is
|
||||
// one region in the series (namely: this one) end will be the same
|
||||
// as the original end of this region.
|
||||
//
|
||||
// Updating top and end as described above makes this region look as
|
||||
// if it spans the entire space taken up by all the regions in the
|
||||
// series and an single allocation moved its top to new_top. This
|
||||
// ensures that the space (capacity / allocated) taken up by all
|
||||
// humongous regions can be calculated by just looking at the
|
||||
// "starts humongous" regions and by ignoring the "continues
|
||||
// humongous" regions.
|
||||
void set_starts_humongous(HeapWord* new_top, HeapWord* new_end);
|
||||
// obj_top : points to the end of the humongous object that's being
|
||||
// allocated.
|
||||
void set_starts_humongous(HeapWord* obj_top);
|
||||
|
||||
// Makes the current region be a "continues humongous'
|
||||
// region. first_hr is the "start humongous" region of the series
|
||||
@ -566,9 +528,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
|
||||
bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
|
||||
|
||||
// For the start region of a humongous sequence, it's original end().
|
||||
HeapWord* orig_end() const { return _bottom + GrainWords; }
|
||||
|
||||
// Reset HR stuff to default values.
|
||||
void hr_clear(bool par, bool clear_space, bool locked = false);
|
||||
void par_clear();
|
||||
@ -614,8 +573,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
bool is_marked() { return _prev_top_at_mark_start != bottom(); }
|
||||
|
||||
void reset_during_compaction() {
|
||||
assert(is_starts_humongous(),
|
||||
"should only be called for starts humongous regions");
|
||||
assert(is_humongous(),
|
||||
"should only be called for humongous regions");
|
||||
|
||||
zero_marked_bytes();
|
||||
init_top_at_mark_start();
|
||||
|
@ -115,6 +115,11 @@ G1OffsetTableContigSpace::block_start_const(const void* p) const {
|
||||
inline bool
|
||||
HeapRegion::block_is_obj(const HeapWord* p) const {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
if (!this->is_in(p)) {
|
||||
assert(is_continues_humongous(), "This case can only happen for humongous regions");
|
||||
return (p == humongous_start_region()->bottom());
|
||||
}
|
||||
if (ClassUnloadingWithConcurrentMark) {
|
||||
return !g1h->is_obj_dead(oop(p), this);
|
||||
}
|
||||
@ -176,10 +181,6 @@ inline void HeapRegion::note_end_of_marking() {
|
||||
_prev_top_at_mark_start = _next_top_at_mark_start;
|
||||
_prev_marked_bytes = _next_marked_bytes;
|
||||
_next_marked_bytes = 0;
|
||||
|
||||
assert(_prev_marked_bytes <=
|
||||
(size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
|
||||
HeapWordSize, "invariant");
|
||||
}
|
||||
|
||||
inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
|
||||
|
@ -343,63 +343,18 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, Heap
|
||||
continue;
|
||||
}
|
||||
HeapRegion* r = _regions.get_by_index(index);
|
||||
// We'll ignore "continues humongous" regions (we'll process them
|
||||
// when we come across their corresponding "start humongous"
|
||||
// region) and regions already claimed.
|
||||
// We'll ignore regions already claimed.
|
||||
// However, if the iteration is specified as concurrent, the values for
|
||||
// is_starts_humongous and is_continues_humongous can not be trusted,
|
||||
// and we should just blindly iterate over regions regardless of their
|
||||
// humongous status.
|
||||
if (hrclaimer->is_region_claimed(index) || (!concurrent && r->is_continues_humongous())) {
|
||||
if (hrclaimer->is_region_claimed(index)) {
|
||||
continue;
|
||||
}
|
||||
// OK, try to claim it
|
||||
if (!hrclaimer->claim_region(index)) {
|
||||
continue;
|
||||
}
|
||||
// Success!
|
||||
// As mentioned above, special treatment of humongous regions can only be
|
||||
// done if we are iterating non-concurrently.
|
||||
if (!concurrent && r->is_starts_humongous()) {
|
||||
// If the region is "starts humongous" we'll iterate over its
|
||||
// "continues humongous" first; in fact we'll do them
|
||||
// first. The order is important. In one case, calling the
|
||||
// closure on the "starts humongous" region might de-allocate
|
||||
// and clear all its "continues humongous" regions and, as a
|
||||
// result, we might end up processing them twice. So, we'll do
|
||||
// them first (note: most closures will ignore them anyway) and
|
||||
// then we'll do the "starts humongous" region.
|
||||
for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
|
||||
HeapRegion* chr = _regions.get_by_index(ch_index);
|
||||
|
||||
assert(chr->is_continues_humongous(), "Must be humongous region");
|
||||
assert(chr->humongous_start_region() == r,
|
||||
"Must work on humongous continuation of the original start region "
|
||||
PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr));
|
||||
assert(!hrclaimer->is_region_claimed(ch_index),
|
||||
"Must not have been claimed yet because claiming of humongous continuation first claims the start region");
|
||||
|
||||
// Claim the region so no other worker tries to process the region. When a worker processes a
|
||||
// starts_humongous region it may also process the associated continues_humongous regions.
|
||||
// The continues_humongous regions can be changed to free regions. Unless this worker claims
|
||||
// all of these regions, other workers might try claim and process these newly free regions.
|
||||
bool claim_result = hrclaimer->claim_region(ch_index);
|
||||
guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
|
||||
|
||||
bool res2 = blk->doHeapRegion(chr);
|
||||
if (res2) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Right now, this holds (i.e., no closure that actually
|
||||
// does something with "continues humongous" regions
|
||||
// clears them). We might have to weaken it in the future,
|
||||
// but let's leave these two asserts here for extra safety.
|
||||
assert(chr->is_continues_humongous(), "should still be the case");
|
||||
assert(chr->humongous_start_region() == r, "sanity");
|
||||
}
|
||||
}
|
||||
|
||||
bool res = blk->doHeapRegion(r);
|
||||
if (res) {
|
||||
return;
|
||||
@ -508,11 +463,7 @@ void HeapRegionManager::verify() {
|
||||
// this method may be called, we have only completed allocation of the regions,
|
||||
// but not put into a region set.
|
||||
prev_committed = true;
|
||||
if (hr->is_starts_humongous()) {
|
||||
prev_end = hr->orig_end();
|
||||
} else {
|
||||
prev_end = hr->end();
|
||||
}
|
||||
prev_end = hr->end();
|
||||
}
|
||||
for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
|
||||
guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i);
|
||||
|
@ -150,6 +150,10 @@ public:
|
||||
// is valid.
|
||||
inline HeapRegion* at(uint index) const;
|
||||
|
||||
// Return the next region (by index) that is part of the same
|
||||
// humongous object that hr is part of.
|
||||
inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
|
||||
|
||||
// If addr is within the committed space return its corresponding
|
||||
// HeapRegion, otherwise return NULL.
|
||||
inline HeapRegion* addr_to_region(HeapWord* addr) const;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user