This commit is contained in:
J. Duke 2017-07-05 19:14:54 +02:00
commit 9212c2d707
199 changed files with 3253 additions and 1804 deletions

View File

@ -231,3 +231,4 @@ b7e64be81c8a7690703df5711f4fc2375da8a9cb jdk8-b103
0874bb4707b723d5bb108d379c557cf41529d1a7 jdk8-b107
9286a6e61291246d88af713f1ef79adeea30fe2e jdk8-b108
91f47e8da5c60de58ed195e9b57f3bf192a18f83 jdk8-b109
4faa09c7fe555de086dd9048d3c5cc92317d6f45 jdk8-b110

View File

@ -66,7 +66,7 @@ else
else
# We are building multiple configurations.
# First, find out the valid targets
# Run the makefile with an arbitraty SPEC using -p -q (quiet dry-run and dump rules) to find
# Run the makefile with an arbitrary SPEC using -p -q (quiet dry-run and dump rules) to find
# available PHONY targets. Use this list as valid targets to pass on to the repeated calls.
all_phony_targets=$(filter-out $(global_targets) bundles-only, $(strip $(shell \
$(MAKE) -p -q -f common/makefiles/Main.gmk FRC SPEC=$(firstword $(SPEC)) | \

View File

@ -1016,8 +1016,8 @@ with_cacerts_file
enable_unlimited_crypto
with_milestone
with_update_version
with_build_number
with_user_release_suffix
with_build_number
with_boot_jdk
with_boot_jdk_jvmargs
with_add_source_root
@ -1755,10 +1755,10 @@ Optional Packages:
--with-cacerts-file specify alternative cacerts file
--with-milestone Set milestone value for build [internal]
--with-update-version Set update version value for build [b00]
--with-build-number Set build number value for build [b00]
--with-user-release-suffix
Add a custom string to the version string if build
number isn't set.[username_builddateb00]
--with-build-number Set build number value for build [b00]
--with-boot-jdk path to Boot JDK (used to bootstrap build) [probed]
--with-boot-jdk-jvmargs specify JVM arguments to be passed to all
invocations of the Boot JDK, overriding the default
@ -3818,7 +3818,7 @@ fi
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1379504921
DATE_WHEN_GENERATED=1381162713
###############################################################################
#
@ -10935,7 +10935,7 @@ BUILD_HEADLESS="BUILD_HEADLESS:=true"
if test "x$SUPPORT_HEADFUL" = xyes; then
# We are building both headful and headless.
headful_msg="inlude support for both headful and headless"
headful_msg="include support for both headful and headless"
fi
if test "x$SUPPORT_HEADFUL" = xno; then
@ -11048,6 +11048,18 @@ elif test "x$with_update_version" != x; then
fi
# Check whether --with-user-release-suffix was given.
if test "${with_user_release_suffix+set}" = set; then :
withval=$with_user_release_suffix;
fi
if test "x$with_user_release_suffix" = xyes; then
as_fn_error $? "Release suffix must have a value" "$LINENO" 5
elif test "x$with_user_release_suffix" != x; then
USER_RELEASE_SUFFIX="$with_user_release_suffix"
fi
# Check whether --with-build-number was given.
if test "${with_build_number+set}" = set; then :
withval=$with_build_number;
@ -11058,28 +11070,17 @@ if test "x$with_build_number" = xyes; then
elif test "x$with_build_number" != x; then
JDK_BUILD_NUMBER="$with_build_number"
fi
# Define default USER_RELEASE_SUFFIX if BUILD_NUMBER and USER_RELEASE_SUFFIX are not set
if test "x$JDK_BUILD_NUMBER" = x; then
JDK_BUILD_NUMBER=b00
if test "x$USER_RELEASE_SUFFIX" = x; then
BUILD_DATE=`date '+%Y_%m_%d_%H_%M'`
# Avoid [:alnum:] since it depends on the locale.
CLEAN_USERNAME=`echo "$USER" | $TR -d -c 'abcdefghijklmnopqrstuvqxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'`
USER_RELEASE_SUFFIX=`echo "${CLEAN_USERNAME}_${BUILD_DATE}" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
fi
fi
# Check whether --with-user-release-suffix was given.
if test "${with_user_release_suffix+set}" = set; then :
withval=$with_user_release_suffix;
fi
if test "x$with_user_release_suffix" = xyes; then
as_fn_error $? "Release suffix must have a value" "$LINENO" 5
elif test "x$with_user_release_suffix" != x; then
USER_RELEASE_SUFFIX="$with_user_release_suffix"
else
BUILD_DATE=`date '+%Y_%m_%d_%H_%M'`
# Avoid [:alnum:] since it depends on the locale.
CLEAN_USERNAME=`echo "$USER" | $TR -d -c 'abcdefghijklmnopqrstuvqxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'`
USER_RELEASE_SUFFIX=`echo "${CLEAN_USERNAME}_${BUILD_DATE}" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
fi
# Now set the JDK version, milestone, build number etc.
@ -11095,6 +11096,7 @@ fi
COPYRIGHT_YEAR=`date +'%Y'`

View File

@ -316,7 +316,7 @@ BUILD_HEADLESS="BUILD_HEADLESS:=true"
if test "x$SUPPORT_HEADFUL" = xyes; then
# We are building both headful and headless.
headful_msg="inlude support for both headful and headless"
headful_msg="include support for both headful and headless"
fi
if test "x$SUPPORT_HEADFUL" = xno; then
@ -426,6 +426,14 @@ elif test "x$with_update_version" != x; then
JDK_UPDATE_VERSION="$with_update_version"
fi
AC_ARG_WITH(user-release-suffix, [AS_HELP_STRING([--with-user-release-suffix],
[Add a custom string to the version string if build number isn't set.@<:@username_builddateb00@:>@])])
if test "x$with_user_release_suffix" = xyes; then
AC_MSG_ERROR([Release suffix must have a value])
elif test "x$with_user_release_suffix" != x; then
USER_RELEASE_SUFFIX="$with_user_release_suffix"
fi
AC_ARG_WITH(build-number, [AS_HELP_STRING([--with-build-number],
[Set build number value for build @<:@b00@:>@])])
if test "x$with_build_number" = xyes; then
@ -433,25 +441,19 @@ if test "x$with_build_number" = xyes; then
elif test "x$with_build_number" != x; then
JDK_BUILD_NUMBER="$with_build_number"
fi
# Define default USER_RELEASE_SUFFIX if BUILD_NUMBER and USER_RELEASE_SUFFIX are not set
if test "x$JDK_BUILD_NUMBER" = x; then
JDK_BUILD_NUMBER=b00
if test "x$USER_RELEASE_SUFFIX" = x; then
BUILD_DATE=`date '+%Y_%m_%d_%H_%M'`
# Avoid [:alnum:] since it depends on the locale.
CLEAN_USERNAME=`echo "$USER" | $TR -d -c 'abcdefghijklmnopqrstuvqxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'`
USER_RELEASE_SUFFIX=`echo "${CLEAN_USERNAME}_${BUILD_DATE}" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
fi
fi
AC_ARG_WITH(user-release-suffix, [AS_HELP_STRING([--with-user-release-suffix],
[Add a custom string to the version string if build number isn't set.@<:@username_builddateb00@:>@])])
if test "x$with_user_release_suffix" = xyes; then
AC_MSG_ERROR([Release suffix must have a value])
elif test "x$with_user_release_suffix" != x; then
USER_RELEASE_SUFFIX="$with_user_release_suffix"
else
BUILD_DATE=`date '+%Y_%m_%d_%H_%M'`
# Avoid [:alnum:] since it depends on the locale.
CLEAN_USERNAME=`echo "$USER" | $TR -d -c 'abcdefghijklmnopqrstuvqxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'`
USER_RELEASE_SUFFIX=`echo "${CLEAN_USERNAME}_${BUILD_DATE}" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
fi
AC_SUBST(USER_RELEASE_SUFFIX)
# Now set the JDK version, milestone, build number etc.
AC_SUBST(USER_RELEASE_SUFFIX)
AC_SUBST(JDK_MAJOR_VERSION)
AC_SUBST(JDK_MINOR_VERSION)
AC_SUBST(JDK_MICRO_VERSION)

View File

@ -161,6 +161,7 @@ JDK_RC_PLATFORM_NAME:=@JDK_RC_PLATFORM_NAME@
COMPANY_NAME:=@COMPANY_NAME@
MACOSX_BUNDLE_NAME_BASE=@MACOSX_BUNDLE_NAME_BASE@
MACOSX_BUNDLE_ID_BASE=@MACOSX_BUNDLE_ID_BASE@
USER_RELEASE_SUFFIX=@USER_RELEASE_SUFFIX@
# Different version strings generated from the above information.
JDK_VERSION:=@JDK_VERSION@
@ -173,8 +174,8 @@ ifeq ($(MILESTONE),)
else
RELEASE=$(JDK_VERSION)-$(MILESTONE)$(BUILD_VARIANT_RELEASE)
endif
ifeq ($(JDK_BUILD_NUMBER),b00)
USER_RELEASE_SUFFIX=@USER_RELEASE_SUFFIX@
ifneq ($(USER_RELEASE_SUFFIX),)
FULL_VERSION=$(RELEASE)-$(USER_RELEASE_SUFFIX)-$(JDK_BUILD_NUMBER)
else
FULL_VERSION=$(RELEASE)-$(JDK_BUILD_NUMBER)

View File

@ -159,7 +159,7 @@ define SetupArchive
endif
endif
# Utility macros, to make the shell script receipt somewhat easier to dechipher.
# Utility macros, to make the shell script receipt somewhat easier to decipher.
# The capture contents macro finds all files (matching the patterns, typically
# .class and .prp) that are newer than the jar-file, ie the new content to be put into the jar.
@ -520,7 +520,7 @@ define SetupJavaCompilation
# Using plain javac to batch compile everything.
$1 := $$($1_ALL_COPY_TARGETS) $$($1_ALL_COPY_CLEAN_TARGETS) $$($1_BIN)/_the.batch
# When buliding in batch, put headers in a temp dir to filter out those that actually
# When building in batch, put headers in a temp dir to filter out those that actually
# changed before copying them to the real header dir.
ifneq (,$$($1_HEADERS))
$1_HEADERS_ARG := -h $$($1_HEADERS).tmp

View File

@ -231,3 +231,4 @@ d411c60a8c2fe8fdc572af907775e90f7eefd513 jdk8-b104
23fc34133152692b725db4bd617b4c8dfd6ccb05 jdk8-b107
a4bb3b4500164748a9c33b2283cfda76d89f25ab jdk8-b108
428428cf5e06163322144cfb5367e1faa86acf20 jdk8-b109
3d2b7ce93c5c2e3db748f29c3d29620a8b3b748a jdk8-b110

View File

@ -381,3 +381,5 @@ a09fe9d1e016c285307507a5793bc4fa6215e9c9 hs25-b50
566db1b0e6efca31f181456e54c8911d0192410d hs25-b51
c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109
58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52
6209b0ed51c086d4127bac0e086c8f326d1764d7 jdk8-b110
562a3d356de67670b4172b82aca2d30743449e04 hs25-b53

View File

@ -29,6 +29,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
@ -80,7 +81,7 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_in
(JNIEnv *env, jclass cls) {
jclass listClass;
if (init_libproc(getenv("LIBSAPROC_DEBUG")) != true) {
if (init_libproc(getenv("LIBSAPROC_DEBUG") != NULL) != true) {
THROW_NEW_DEBUGGER_EXCEPTION("can't initialize libproc");
}

View File

@ -27,6 +27,8 @@
#include <string.h>
#include <signal.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/ptrace.h>
#include "libproc_impl.h"

View File

@ -25,6 +25,7 @@
#include "salibelf.h"
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
extern void print_debug(const char*,...);

View File

@ -305,7 +305,7 @@ static struct symtab* build_symtab_from_build_id(Elf64_Nhdr *note)
unsigned char *bytes
= (unsigned char*)(note+1) + note->n_namesz;
unsigned char *filename
char *filename
= (build_id_to_debug_filename (note->n_descsz, bytes));
fd = pathmap_open(filename);

View File

@ -134,15 +134,13 @@ public class VM {
private String type;
private String name;
private Address addr;
private String kind;
private int origin;
private int flags;
private Flag(String type, String name, Address addr, String kind, int origin) {
private Flag(String type, String name, Address addr, int flags) {
this.type = type;
this.name = name;
this.addr = addr;
this.kind = kind;
this.origin = origin;
this.flags = flags;
}
public String getType() {
@ -157,12 +155,8 @@ public class VM {
return addr;
}
public String getKind() {
return kind;
}
public int getOrigin() {
return origin;
return flags & 0xF; // XXX can we get the mask bits from somewhere?
}
public boolean isBool() {
@ -173,8 +167,7 @@ public class VM {
if (Assert.ASSERTS_ENABLED) {
Assert.that(isBool(), "not a bool flag!");
}
return addr.getCIntegerAt(0, boolType.getSize(), boolType.isUnsigned())
!= 0;
return addr.getCIntegerAt(0, boolType.getSize(), boolType.isUnsigned()) != 0;
}
public boolean isIntx() {
@ -843,11 +836,10 @@ public class VM {
Address flagAddr = flagType.getAddressField("flags").getValue();
AddressField typeFld = flagType.getAddressField("type");
AddressField nameFld = flagType.getAddressField("name");
AddressField addrFld = flagType.getAddressField("addr");
AddressField kindFld = flagType.getAddressField("kind");
CIntField originFld = new CIntField(flagType.getCIntegerField("origin"), 0);
AddressField typeFld = flagType.getAddressField("_type");
AddressField nameFld = flagType.getAddressField("_name");
AddressField addrFld = flagType.getAddressField("_addr");
CIntField flagsFld = new CIntField(flagType.getCIntegerField("_flags"), 0);
long flagSize = flagType.getSize(); // sizeof(Flag)
@ -856,9 +848,8 @@ public class VM {
String type = CStringUtilities.getString(typeFld.getValue(flagAddr));
String name = CStringUtilities.getString(nameFld.getValue(flagAddr));
Address addr = addrFld.getValue(flagAddr);
String kind = CStringUtilities.getString(kindFld.getValue(flagAddr));
int origin = (int)originFld.getValue(flagAddr);
commandLineFlags[f] = new Flag(type, name, addr, kind, origin);
int flags = (int)flagsFld.getValue(flagAddr);
commandLineFlags[f] = new Flag(type, name, addr, flags);
flagAddr = flagAddr.addOffsetTo(flagSize);
}

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -59,5 +59,5 @@ CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
VERSION = fastdebug
SYSDEFS += -DASSERT
SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS
PICFLAGS = DEFAULT

View File

@ -247,7 +247,7 @@ endif
ifeq ($(USE_CLANG), true)
# However we need to clean the code up before we can unrestrictedly enable this option with Clang
WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
WARNINGS_ARE_ERRORS += -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-compare
# Not yet supported by clang in Xcode 4.6.2
# WARNINGS_ARE_ERRORS += -Wno-tautological-constant-out-of-range-compare
@ -262,7 +262,7 @@ ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \&
# conversions which might affect the values. Only enable it in earlier versions.
WARNING_FLAGS = -Wunused-function
ifeq ($(USE_CLANG),)
WARNINGS_FLAGS += -Wconversion
WARNING_FLAGS += -Wconversion
endif
endif

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25
HS_MINOR_VER=0
HS_BUILD_NUMBER=52
HS_BUILD_NUMBER=53
JDK_MAJOR_VER=1
JDK_MINOR_VER=8

View File

@ -59,5 +59,5 @@ CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
VERSION = optimized
SYSDEFS += -DASSERT
SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS
PICFLAGS = DEFAULT

View File

@ -208,7 +208,7 @@ WARNINGS_ARE_ERRORS = -Werror
ifeq ($(USE_CLANG), true)
# However we need to clean the code up before we can unrestrictedly enable this option with Clang
WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
WARNINGS_ARE_ERRORS += -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-constant-out-of-range-compare -Wno-tautological-compare
WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess
WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -38,7 +38,7 @@ default:: $(BUILD_PCH_FILE) $(AOUT) checkAndBuildSA
!include ../local.make
!include compile.make
CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION)
CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION) /D "CHECK_UNHANDLED_OOPS"
!include $(WorkSpace)/make/windows/makefiles/vm.make
!include local.make

View File

@ -764,7 +764,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
#ifdef CC_INTERP
*oop_result = istate->_oop_temp;
#else
oop obj = (oop) at(interpreter_frame_oop_temp_offset);
oop obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
*oop_result = obj;
#endif // CC_INTERP
@ -788,7 +788,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
switch(type) {
case T_OBJECT:
case T_ARRAY: {
oop obj = (oop)*tos_addr;
oop obj = cast_to_oop(*tos_addr);
assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
*oop_result = obj;
break;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -121,6 +121,7 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp,
bool for_compiler_entry) {
Label L_no_such_method;
assert(method == G5_method, "interpreter calling convention");
assert_different_registers(method, target, temp);
@ -133,6 +134,9 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
__ ld(interp_only, temp);
__ cmp_and_br_short(temp, 0, Assembler::zero, Assembler::pt, run_compiled_code);
// Null method test is replicated below in compiled case,
// it might be able to address across the verify_thread()
__ br_null_short(G5_method, Assembler::pn, L_no_such_method);
__ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target);
__ jmp(target, 0);
__ delayed()->nop();
@ -141,11 +145,19 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
// it doesn't matter, since this is interpreter code.
}
// Compiled case, either static or fall-through from runtime conditional
__ br_null_short(G5_method, Assembler::pn, L_no_such_method);
const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
Method::from_interpreted_offset();
__ ld_ptr(G5_method, in_bytes(entry_offset), target);
__ jmp(target, 0);
__ delayed()->nop();
__ bind(L_no_such_method);
AddressLiteral ame(StubRoutines::throw_AbstractMethodError_entry());
__ jump_to(ame, temp);
__ delayed()->nop();
}
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -358,7 +358,7 @@ void NativeMovConstReg::set_data(intptr_t x) {
oop_Relocation *r = iter.oop_reloc();
if (oop_addr == NULL) {
oop_addr = r->oop_addr();
*oop_addr = (oop)x;
*oop_addr = cast_to_oop(x);
} else {
assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
}
@ -478,7 +478,7 @@ void NativeMovConstRegPatching::set_data(int x) {
oop_Relocation *r = iter.oop_reloc();
if (oop_addr == NULL) {
oop_addr = r->oop_addr();
*oop_addr = (oop)x;
*oop_addr = cast_to_oop(x);
} else {
assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
}

View File

@ -2018,6 +2018,15 @@ const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return L7_REGP_mask();
}
const RegMask Matcher::mathExactI_result_proj_mask() {
return G1_REGI_mask();
}
const RegMask Matcher::mathExactI_flags_proj_mask() {
return INT_FLAGS_mask();
}
%}
@ -4245,12 +4254,16 @@ operand cmpOp() %{
greater_equal(0xB);
less_equal(0x2);
greater(0xA);
overflow(0x7);
no_overflow(0xF);
%}
%}
// Comparison Op, unsigned
operand cmpOpU() %{
match(Bool);
predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
n->as_Bool()->_test._test != BoolTest::no_overflow);
format %{ "u" %}
interface(COND_INTER) %{
@ -4260,12 +4273,16 @@ operand cmpOpU() %{
greater_equal(0xD);
less_equal(0x4);
greater(0xC);
overflow(0x7);
no_overflow(0xF);
%}
%}
// Comparison Op, pointer (same as unsigned)
operand cmpOpP() %{
match(Bool);
predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
n->as_Bool()->_test._test != BoolTest::no_overflow);
format %{ "p" %}
interface(COND_INTER) %{
@ -4275,12 +4292,16 @@ operand cmpOpP() %{
greater_equal(0xD);
less_equal(0x4);
greater(0xC);
overflow(0x7);
no_overflow(0xF);
%}
%}
// Comparison Op, branch-register encoding
operand cmpOp_reg() %{
match(Bool);
predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
n->as_Bool()->_test._test != BoolTest::no_overflow);
format %{ "" %}
interface(COND_INTER) %{
@ -4290,12 +4311,16 @@ operand cmpOp_reg() %{
greater_equal(0x7);
less_equal (0x2);
greater (0x6);
overflow(0x7); // not supported
no_overflow(0xF); // not supported
%}
%}
// Comparison Code, floating, unordered same as less
operand cmpOpF() %{
match(Bool);
predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
n->as_Bool()->_test._test != BoolTest::no_overflow);
format %{ "fl" %}
interface(COND_INTER) %{
@ -4305,12 +4330,17 @@ operand cmpOpF() %{
greater_equal(0xB);
less_equal(0xE);
greater(0x6);
overflow(0x7); // not supported
no_overflow(0xF); // not supported
%}
%}
// Used by long compare
operand cmpOp_commute() %{
match(Bool);
predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
n->as_Bool()->_test._test != BoolTest::no_overflow);
format %{ "" %}
interface(COND_INTER) %{
@ -4320,6 +4350,8 @@ operand cmpOp_commute() %{
greater_equal(0x2);
less_equal(0xB);
greater(0x3);
overflow(0x7);
no_overflow(0xF);
%}
%}

View File

@ -4769,7 +4769,7 @@ void Assembler::adcq(Register dst, Address src) {
}
void Assembler::adcq(Register dst, Register src) {
(int) prefixq_and_encode(dst->encoding(), src->encoding());
(void) prefixq_and_encode(dst->encoding(), src->encoding());
emit_arith(0x13, 0xC0, dst, src);
}
@ -4824,7 +4824,7 @@ void Assembler::andq(Register dst, Address src) {
}
void Assembler::andq(Register dst, Register src) {
(int) prefixq_and_encode(dst->encoding(), src->encoding());
(void) prefixq_and_encode(dst->encoding(), src->encoding());
emit_arith(0x23, 0xC0, dst, src);
}

View File

@ -639,7 +639,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
#ifdef CC_INTERP
obj = istate->_oop_temp;
#else
obj = (oop) at(interpreter_frame_oop_temp_offset);
obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
#endif // CC_INTERP
} else {
oop* obj_p = (oop*)tos_addr;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -114,6 +114,11 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
bool for_compiler_entry) {
assert(method == rbx, "interpreter calling convention");
Label L_no_such_method;
__ testptr(rbx, rbx);
__ jcc(Assembler::zero, L_no_such_method);
__ verify_method_ptr(method);
if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
@ -138,6 +143,9 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
Method::from_interpreted_offset();
__ jmp(Address(method, entry_offset));
__ bind(L_no_such_method);
__ jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry()));
}
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
@ -475,7 +483,7 @@ void trace_method_handle_stub(const char* adaptername,
const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT,
adaptername, mh_reg_name,
mh, entry_sp);
(void *)mh, entry_sp);
if (Verbose) {
tty->print_cr("Registers:");

View File

@ -351,7 +351,7 @@ void emit_d32_reloc(CodeBuffer &cbuf, int d32, RelocationHolder const& rspec,
int format) {
#ifdef ASSERT
if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) {
assert(oop(d32)->is_oop() && (ScavengeRootsInCode || !oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
assert(cast_to_oop(d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
}
#endif
cbuf.relocate(cbuf.insts_mark(), rspec, format);
@ -1534,6 +1534,14 @@ const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return EBP_REG_mask();
}
const RegMask Matcher::mathExactI_result_proj_mask() {
return EAX_REG_mask();
}
const RegMask Matcher::mathExactI_flags_proj_mask() {
return INT_FLAGS_mask();
}
// Returns true if the high 32 bits of the value is known to be zero.
bool is_operand_hi32_zero(Node* n) {
int opc = n->Opcode();
@ -4922,6 +4930,8 @@ operand cmpOp() %{
greater_equal(0xD, "ge");
less_equal(0xE, "le");
greater(0xF, "g");
overflow(0x0, "o");
no_overflow(0x1, "no");
%}
%}
@ -4939,6 +4949,8 @@ operand cmpOpU() %{
greater_equal(0x3, "nb");
less_equal(0x6, "be");
greater(0x7, "nbe");
overflow(0x0, "o");
no_overflow(0x1, "no");
%}
%}
@ -4957,6 +4969,8 @@ operand cmpOpUCF() %{
greater_equal(0x3, "nb");
less_equal(0x6, "be");
greater(0x7, "nbe");
overflow(0x0, "o");
no_overflow(0x1, "no");
%}
%}
@ -4974,6 +4988,8 @@ operand cmpOpUCF2() %{
greater_equal(0x3, "nb");
less_equal(0x6, "be");
greater(0x7, "nbe");
overflow(0x0, "o");
no_overflow(0x1, "no");
%}
%}
@ -4981,6 +4997,8 @@ operand cmpOpUCF2() %{
operand cmpOp_fcmov() %{
match(Bool);
predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
n->as_Bool()->_test._test != BoolTest::no_overflow);
format %{ "" %}
interface(COND_INTER) %{
equal (0x0C8);
@ -4989,6 +5007,8 @@ operand cmpOp_fcmov() %{
greater_equal(0x1C0);
less_equal (0x0D0);
greater (0x1D0);
overflow(0x0, "o"); // not really supported by the instruction
no_overflow(0x1, "no"); // not really supported by the instruction
%}
%}
@ -5004,6 +5024,8 @@ operand cmpOp_commute() %{
greater_equal(0xE, "le");
less_equal(0xD, "ge");
greater(0xC, "l");
overflow(0x0, "o");
no_overflow(0x1, "no");
%}
%}
@ -7496,6 +7518,31 @@ instruct cmovL_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, eRegL dst, eRegL src) %{
//----------Arithmetic Instructions--------------------------------------------
//----------Addition Instructions----------------------------------------------
instruct addExactI_rReg(eAXRegI dst, rRegI src, eFlagsReg cr)
%{
match(AddExactI dst src);
effect(DEF cr);
format %{ "ADD $dst, $src\t# addExact int" %}
ins_encode %{
__ addl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct addExactI_rReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
%{
match(AddExactI dst src);
effect(DEF cr);
format %{ "ADD $dst, $src\t# addExact int" %}
ins_encode %{
__ addl($dst$$Register, $src$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
// Integer Addition Instructions
instruct addI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
match(Set dst (AddI dst src));

View File

@ -529,7 +529,7 @@ void emit_d32_reloc(CodeBuffer& cbuf, int d32, RelocationHolder const& rspec, in
if (rspec.reloc()->type() == relocInfo::oop_type &&
d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
assert(Universe::heap()->is_in_reserved((address)(intptr_t)d32), "should be real oop");
assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
assert(cast_to_oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
}
#endif
cbuf.relocate(cbuf.insts_mark(), rspec, format);
@ -556,7 +556,7 @@ void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, RelocationHolder const& rspec
if (rspec.reloc()->type() == relocInfo::oop_type &&
d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
assert(Universe::heap()->is_in_reserved((address)d64), "should be real oop");
assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()),
assert(cast_to_oop(d64)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d64)->is_scavengable()),
"cannot embed scavengable oops in code");
}
#endif
@ -1649,6 +1649,14 @@ const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return PTR_RBP_REG_mask();
}
const RegMask Matcher::mathExactI_result_proj_mask() {
return INT_RAX_REG_mask();
}
const RegMask Matcher::mathExactI_flags_proj_mask() {
return INT_FLAGS_mask();
}
%}
//----------ENCODING BLOCK-----------------------------------------------------
@ -4133,6 +4141,8 @@ operand cmpOp()
greater_equal(0xD, "ge");
less_equal(0xE, "le");
greater(0xF, "g");
overflow(0x0, "o");
no_overflow(0x1, "no");
%}
%}
@ -4151,6 +4161,8 @@ operand cmpOpU()
greater_equal(0x3, "nb");
less_equal(0x6, "be");
greater(0x7, "nbe");
overflow(0x0, "o");
no_overflow(0x1, "no");
%}
%}
@ -4170,6 +4182,8 @@ operand cmpOpUCF() %{
greater_equal(0x3, "nb");
less_equal(0x6, "be");
greater(0x7, "nbe");
overflow(0x0, "o");
no_overflow(0x1, "no");
%}
%}
@ -4187,6 +4201,8 @@ operand cmpOpUCF2() %{
greater_equal(0x3, "nb");
less_equal(0x6, "be");
greater(0x7, "nbe");
overflow(0x0, "o");
no_overflow(0x1, "no");
%}
%}
@ -6922,6 +6938,30 @@ instruct cmovD_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regD dst, regD src) %{
//----------Arithmetic Instructions--------------------------------------------
//----------Addition Instructions----------------------------------------------
instruct addExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
%{
match(AddExactI dst src);
effect(DEF cr);
format %{ "addl $dst, $src\t# addExact int" %}
ins_encode %{
__ addl($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct addExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
%{
match(AddExactI dst src);
effect(DEF cr);
format %{ "addl $dst, $src\t# addExact int" %}
ins_encode %{
__ addl($dst$$Register, $src$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
%{
match(Set dst (AddI dst src));

View File

@ -4839,6 +4839,10 @@ jint os::init_2(void)
Linux::capture_initial_stack(JavaThread::stack_size_at_create());
#if defined(IA32)
workaround_expand_exec_shield_cs_limit();
#endif
Linux::libpthread_init();
if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",

View File

@ -876,3 +876,46 @@ void os::verify_stack_alignment() {
#endif
}
#endif
/*
* IA32 only: execute code at a high address in case buggy NX emulation is present. I.e. avoid CS limit
* updates (JDK-8023956).
*/
void os::workaround_expand_exec_shield_cs_limit() {
#if defined(IA32)
size_t page_size = os::vm_page_size();
/*
* Take the highest VA the OS will give us and exec
*
* Although using -(pagesz) as mmap hint works on newer kernel as you would
* think, older variants affected by this work-around don't (search forward only).
*
* On the affected distributions, we understand the memory layout to be:
*
* TASK_LIMIT= 3G, main stack base close to TASK_LIMT.
*
* A few pages south main stack will do it.
*
* If we are embedded in an app other than launcher (initial != main stack),
* we don't have much control or understanding of the address space, just let it slide.
*/
char* hint = (char*) (Linux::initial_thread_stack_bottom() -
((StackYellowPages + StackRedPages + 1) * page_size));
char* codebuf = os::reserve_memory(page_size, hint);
if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) {
return; // No matter, we tried, best effort.
}
if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print_cr("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
}
// Some code to exec: the 'ret' instruction
codebuf[0] = 0xC3;
// Call the code in the codebuf
__asm__ volatile("call *%0" : : "r"(codebuf));
// keep the page mapped so CS limit isn't reduced.
#endif
}

View File

@ -36,4 +36,17 @@
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
/*
* Work-around for broken NX emulation using CS limit, Red Hat patch "Exec-Shield"
* (IA32 only).
*
* Map and execute at a high VA to prevent CS lazy updates race with SMP MM
* invalidation.Further code generation by the JVM will no longer cause CS limit
* updates.
*
* Affects IA32: RHEL 5 & 6, Ubuntu 10.04 (LTS), 10.10, 11.04, 11.10, 12.04.
* @see JDK-8023956
*/
static void workaround_expand_exec_shield_cs_limit();
#endif // OS_CPU_LINUX_X86_VM_OS_LINUX_X86_HPP

View File

@ -3395,12 +3395,16 @@ Interface *ADLParser::cond_interface_parse(void) {
char *greater_equal;
char *less_equal;
char *greater;
char *overflow;
char *no_overflow;
const char *equal_format = "eq";
const char *not_equal_format = "ne";
const char *less_format = "lt";
const char *greater_equal_format = "ge";
const char *less_equal_format = "le";
const char *greater_format = "gt";
const char *overflow_format = "o";
const char *no_overflow_format = "no";
if (_curchar != '%') {
parse_err(SYNERR, "Missing '%%{' for 'cond_interface' block.\n");
@ -3437,6 +3441,12 @@ Interface *ADLParser::cond_interface_parse(void) {
else if ( strcmp(field,"greater") == 0 ) {
greater = interface_field_parse(&greater_format);
}
else if ( strcmp(field,"overflow") == 0 ) {
overflow = interface_field_parse(&overflow_format);
}
else if ( strcmp(field,"no_overflow") == 0 ) {
no_overflow = interface_field_parse(&no_overflow_format);
}
else {
parse_err(SYNERR, "Expected keyword, base|index|scale|disp, or '%%}' ending interface.\n");
return NULL;
@ -3455,7 +3465,9 @@ Interface *ADLParser::cond_interface_parse(void) {
less, less_format,
greater_equal, greater_equal_format,
less_equal, less_equal_format,
greater, greater_format);
greater, greater_format,
overflow, overflow_format,
no_overflow, no_overflow_format);
return inter;
}

View File

@ -1192,6 +1192,8 @@ void ArchDesc::buildMustCloneMap(FILE *fp_hpp, FILE *fp_cpp) {
|| strcmp(idealName,"CmpF") == 0
|| strcmp(idealName,"FastLock") == 0
|| strcmp(idealName,"FastUnlock") == 0
|| strcmp(idealName,"AddExactI") == 0
|| strcmp(idealName,"FlagsProj") == 0
|| strcmp(idealName,"Bool") == 0
|| strcmp(idealName,"Binary") == 0 ) {
// Removed ConI from the must_clone list. CPUs that cannot use

View File

@ -2757,14 +2757,18 @@ CondInterface::CondInterface(const char* equal, const char* equal_format
const char* less, const char* less_format,
const char* greater_equal, const char* greater_equal_format,
const char* less_equal, const char* less_equal_format,
const char* greater, const char* greater_format)
const char* greater, const char* greater_format,
const char* overflow, const char* overflow_format,
const char* no_overflow, const char* no_overflow_format)
: Interface("COND_INTER"),
_equal(equal), _equal_format(equal_format),
_not_equal(not_equal), _not_equal_format(not_equal_format),
_less(less), _less_format(less_format),
_greater_equal(greater_equal), _greater_equal_format(greater_equal_format),
_less_equal(less_equal), _less_equal_format(less_equal_format),
_greater(greater), _greater_format(greater_format) {
_greater(greater), _greater_format(greater_format),
_overflow(overflow), _overflow_format(overflow_format),
_no_overflow(no_overflow), _no_overflow_format(no_overflow_format) {
}
CondInterface::~CondInterface() {
// not owner of any character arrays
@ -2777,12 +2781,14 @@ void CondInterface::dump() {
// Write info to output files
void CondInterface::output(FILE *fp) {
Interface::output(fp);
if ( _equal != NULL ) fprintf(fp," equal == %s\n", _equal);
if ( _not_equal != NULL ) fprintf(fp," not_equal == %s\n", _not_equal);
if ( _less != NULL ) fprintf(fp," less == %s\n", _less);
if ( _greater_equal != NULL ) fprintf(fp," greater_equal == %s\n", _greater_equal);
if ( _less_equal != NULL ) fprintf(fp," less_equal == %s\n", _less_equal);
if ( _greater != NULL ) fprintf(fp," greater == %s\n", _greater);
if ( _equal != NULL ) fprintf(fp," equal == %s\n", _equal);
if ( _not_equal != NULL ) fprintf(fp," not_equal == %s\n", _not_equal);
if ( _less != NULL ) fprintf(fp," less == %s\n", _less);
if ( _greater_equal != NULL ) fprintf(fp," greater_equal == %s\n", _greater_equal);
if ( _less_equal != NULL ) fprintf(fp," less_equal == %s\n", _less_equal);
if ( _greater != NULL ) fprintf(fp," greater == %s\n", _greater);
if ( _overflow != NULL ) fprintf(fp," overflow == %s\n", _overflow);
if ( _no_overflow != NULL ) fprintf(fp," no_overflow == %s\n", _no_overflow);
// fprintf(fp,"\n");
}

View File

@ -798,12 +798,16 @@ public:
const char *_greater_equal;
const char *_less_equal;
const char *_greater;
const char *_overflow;
const char *_no_overflow;
const char *_equal_format;
const char *_not_equal_format;
const char *_less_format;
const char *_greater_equal_format;
const char *_less_equal_format;
const char *_greater_format;
const char *_overflow_format;
const char *_no_overflow_format;
// Public Methods
CondInterface(const char* equal, const char* equal_format,
@ -811,7 +815,9 @@ public:
const char* less, const char* less_format,
const char* greater_equal, const char* greater_equal_format,
const char* less_equal, const char* less_equal_format,
const char* greater, const char* greater_format);
const char* greater, const char* greater_format,
const char* overflow, const char* overflow_format,
const char* no_overflow, const char* no_overflow_format);
~CondInterface();
void dump();

View File

@ -388,6 +388,8 @@ static void defineCCodeDump(OperandForm* oper, FILE *fp, int i) {
fprintf(fp, " else if( _c%d == BoolTest::ge ) st->print(\"%s\");\n",i,cond->_greater_equal_format);
fprintf(fp, " else if( _c%d == BoolTest::lt ) st->print(\"%s\");\n",i,cond->_less_format);
fprintf(fp, " else if( _c%d == BoolTest::gt ) st->print(\"%s\");\n",i,cond->_greater_format);
fprintf(fp, " else if( _c%d == BoolTest::overflow ) st->print(\"%s\");\n",i,cond->_overflow_format);
fprintf(fp, " else if( _c%d == BoolTest::no_overflow ) st->print(\"%s\");\n",i,cond->_no_overflow_format);
}
// Output code that dumps constant values, increment "i" if type is constant
@ -1208,6 +1210,8 @@ void ArchDesc::declareClasses(FILE *fp) {
fprintf(fp," case BoolTest::ne : return not_equal();\n");
fprintf(fp," case BoolTest::le : return less_equal();\n");
fprintf(fp," case BoolTest::ge : return greater_equal();\n");
fprintf(fp," case BoolTest::overflow : return overflow();\n");
fprintf(fp," case BoolTest::no_overflow: return no_overflow();\n");
fprintf(fp," default : ShouldNotReachHere(); return 0;\n");
fprintf(fp," }\n");
fprintf(fp," };\n");
@ -1373,6 +1377,14 @@ void ArchDesc::declareClasses(FILE *fp) {
if( greater != NULL ) {
define_oper_interface(fp, *oper, _globalNames, "greater", greater);
}
const char *overflow = cInterface->_overflow;
if( overflow != NULL ) {
define_oper_interface(fp, *oper, _globalNames, "overflow", overflow);
}
const char *no_overflow = cInterface->_no_overflow;
if( no_overflow != NULL ) {
define_oper_interface(fp, *oper, _globalNames, "no_overflow", no_overflow);
}
} // end Conditional Interface
// Check if it is a Constant Interface
else if (oper->_interface->is_ConstInterface() != NULL ) {

View File

@ -1019,7 +1019,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
n_copy->set_data((intx) (load_klass()));
} else {
assert(mirror() != NULL, "klass not set");
n_copy->set_data((intx) (mirror()));
n_copy->set_data(cast_from_oop<intx>(mirror()));
}
if (TracePatching) {
@ -1031,7 +1031,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
assert(n_copy->data() == 0 ||
n_copy->data() == (intptr_t)Universe::non_oop_word(),
"illegal init value");
n_copy->set_data((intx) (appendix()));
n_copy->set_data(cast_from_oop<intx>(appendix()));
if (TracePatching) {
Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
@ -1078,14 +1078,17 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// replace instructions
// first replace the tail, then the call
#ifdef ARM
if(load_klass_or_mirror_patch_id && !VM_Version::supports_movw()) {
if((load_klass_or_mirror_patch_id ||
stub_id == Runtime1::load_appendix_patching_id) &&
!VM_Version::supports_movw()) {
nmethod* nm = CodeCache::find_nmethod(instr_pc);
address addr = NULL;
assert(nm != NULL, "invalid nmethod_pc");
RelocIterator mds(nm, copy_buff, copy_buff + 1);
while (mds.next()) {
if (mds.type() == relocInfo::oop_type) {
assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
assert(stub_id == Runtime1::load_mirror_patching_id ||
stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
oop_Relocation* r = mds.oop_reloc();
addr = (address)r->oop_addr();
break;

View File

@ -1787,7 +1787,7 @@ ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_d
if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code
return _method_LambdaForm_Hidden;
case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_invoke_Stable_signature):
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_Stable_signature):
if (_location != _in_field) break; // only allow for fields
if (!privileged) break; // only allow in privileged code
return _field_Stable;
@ -2545,7 +2545,9 @@ Array<Method*>* ClassFileParser::parse_methods(bool is_interface,
if (method->is_final()) {
*has_final_method = true;
}
if (is_interface && !method->is_abstract() && !method->is_static()) {
if (is_interface && !(*has_default_methods)
&& !method->is_abstract() && !method->is_static()
&& !method->is_private()) {
// default method
*has_default_methods = true;
}

View File

@ -261,7 +261,7 @@ void ClassLoaderData::add_class(Klass* k) {
k,
k->external_name(),
k->class_loader_data(),
k->class_loader(),
(void *)k->class_loader(),
loader_name());
}
}
@ -297,7 +297,7 @@ void ClassLoaderData::unload() {
if (TraceClassLoaderData) {
ResourceMark rm;
tty->print("[ClassLoaderData: unload loader data "PTR_FORMAT, this);
tty->print(" for instance "PTR_FORMAT" of %s", class_loader(),
tty->print(" for instance "PTR_FORMAT" of %s", (void *)class_loader(),
loader_name());
if (is_anonymous()) {
tty->print(" for anonymous class "PTR_FORMAT " ", _klasses);
@ -458,7 +458,7 @@ const char* ClassLoaderData::loader_name() {
void ClassLoaderData::dump(outputStream * const out) {
ResourceMark rm;
out->print("ClassLoaderData CLD: "PTR_FORMAT", loader: "PTR_FORMAT", loader_klass: "PTR_FORMAT" %s {",
this, class_loader(),
this, (void *)class_loader(),
class_loader() != NULL ? class_loader()->klass() : NULL, loader_name());
if (claimed()) out->print(" claimed ");
if (is_unloading()) out->print(" unloading ");
@ -553,7 +553,7 @@ ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRA
ResourceMark rm;
tty->print("[ClassLoaderData: ");
tty->print("create class loader data "PTR_FORMAT, cld);
tty->print(" for instance "PTR_FORMAT" of %s", cld->class_loader(),
tty->print(" for instance "PTR_FORMAT" of %s", (void *)cld->class_loader(),
cld->loader_name());
tty->print_cr("]");
}

View File

@ -325,6 +325,7 @@ class MethodFamily : public ResourceObj {
Method* _selected_target; // Filled in later, if a unique target exists
Symbol* _exception_message; // If no unique target is found
Symbol* _exception_name; // If no unique target is found
bool contains_method(Method* method) {
int* lookup = _member_index.get(method);
@ -350,7 +351,7 @@ class MethodFamily : public ResourceObj {
public:
MethodFamily()
: _selected_target(NULL), _exception_message(NULL) {}
: _selected_target(NULL), _exception_message(NULL), _exception_name(NULL) {}
void set_target_if_empty(Method* m) {
if (_selected_target == NULL && !m->is_overpass()) {
@ -383,6 +384,7 @@ class MethodFamily : public ResourceObj {
Method* get_selected_target() { return _selected_target; }
Symbol* get_exception_message() { return _exception_message; }
Symbol* get_exception_name() { return _exception_name; }
// Either sets the target or the exception error message
void determine_target(InstanceKlass* root, TRAPS) {
@ -400,15 +402,18 @@ class MethodFamily : public ResourceObj {
if (qualified_methods.length() == 0) {
_exception_message = generate_no_defaults_message(CHECK);
_exception_name = vmSymbols::java_lang_AbstractMethodError();
} else if (qualified_methods.length() == 1) {
Method* method = qualified_methods.at(0);
if (method->is_abstract()) {
_exception_message = generate_abstract_method_message(method, CHECK);
_exception_name = vmSymbols::java_lang_AbstractMethodError();
} else {
_selected_target = qualified_methods.at(0);
}
} else {
_exception_message = generate_conflicts_message(&qualified_methods,CHECK);
_exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
}
assert((has_target() ^ throws_exception()) == 1,
@ -459,8 +464,9 @@ class MethodFamily : public ResourceObj {
void print_exception(outputStream* str, int indent) {
assert(throws_exception(), "Should be called otherwise");
assert(_exception_name != NULL, "exception_name should be set");
streamIndentor si(str, indent * 2);
str->indent().print_cr("%s", _exception_message->as_C_string());
str->indent().print_cr("%s: %s", _exception_name->as_C_string(), _exception_message->as_C_string());
}
#endif // ndef PRODUCT
};
@ -670,7 +676,10 @@ class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
InstanceKlass* iklass = current_class();
Method* m = iklass->find_method(_method_name, _method_signature);
if (m != NULL) {
// private interface methods are not candidates for default methods
// invokespecial to private interface methods doesn't use default method logic
// future: take access controls into account for superclass methods
if (m != NULL && (!iklass->is_interface() || m->is_public())) {
if (_family == NULL) {
_family = new StatefulMethodFamily();
}
@ -782,202 +791,9 @@ void DefaultMethods::generate_default_methods(
#endif // ndef PRODUCT
}
/**
* Interface inheritance rules were used to find a unique default method
* candidate for the resolved class. This
* method is only viable if it would also be in the set of default method
* candidates if we ran a full analysis on the current class.
*
* The only reason that the method would not be in the set of candidates for
* the current class is if that there's another matching method
* which is "more specific" than the found method -- i.e., one could find a
* path in the interface hierarchy in which the matching method appears
* before we get to '_target'.
*
* In order to determine this, we examine all of the implemented
* interfaces. If we find path that leads to the '_target' interface, then
* we examine that path to see if there are any methods that would shadow
* the selected method along that path.
*/
class ShadowChecker : public HierarchyVisitor<ShadowChecker> {
protected:
Thread* THREAD;
InstanceKlass* _target;
Symbol* _method_name;
InstanceKlass* _method_holder;
bool _found_shadow;
public:
ShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder,
InstanceKlass* target)
: THREAD(thread), _method_name(name), _method_holder(holder),
_target(target), _found_shadow(false) {}
void* new_node_data(InstanceKlass* cls) { return NULL; }
void free_node_data(void* data) { return; }
bool visit() {
InstanceKlass* ik = current_class();
if (ik == _target && current_depth() == 1) {
return false; // This was the specified super -- no need to search it
}
if (ik == _method_holder || ik == _target) {
// We found a path that should be examined to see if it shadows _method
if (path_has_shadow()) {
_found_shadow = true;
cancel_iteration();
}
return false; // no need to continue up hierarchy
}
return true;
}
virtual bool path_has_shadow() = 0;
bool found_shadow() { return _found_shadow; }
};
// Used for Invokespecial.
// Invokespecial is allowed to invoke a concrete interface method
// and can be used to disambuiguate among qualified candidates,
// which are methods in immediate superinterfaces,
// but may not be used to invoke a candidate that would be shadowed
// from the perspective of the caller.
// Invokespecial is also used in the overpass generation today
// We re-run the shadowchecker because we can't distinguish this case,
// but it should return the same answer, since the overpass target
// is now the invokespecial caller.
class ErasedShadowChecker : public ShadowChecker {
private:
bool path_has_shadow() {
for (int i = current_depth() - 1; i > 0; --i) {
InstanceKlass* ik = class_at_depth(i);
if (ik->is_interface()) {
int end;
int start = ik->find_method_by_name(_method_name, &end);
if (start != -1) {
return true;
}
}
}
return false;
}
public:
ErasedShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder,
InstanceKlass* target)
: ShadowChecker(thread, name, holder, target) {}
};
// Find the unique qualified candidate from the perspective of the super_class
// which is the resolved_klass, which must be an immediate superinterface
// of klass
Method* find_erased_super_default(InstanceKlass* current_class, InstanceKlass* super_class, Symbol* method_name, Symbol* sig, TRAPS) {
FindMethodsByErasedSig visitor(method_name, sig);
visitor.run(super_class); // find candidates from resolved_klass
MethodFamily* family;
visitor.get_discovered_family(&family);
if (family != NULL) {
family->determine_target(current_class, CHECK_NULL); // get target from current_class
if (family->has_target()) {
Method* target = family->get_selected_target();
InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
// Verify that the identified method is valid from the context of
// the current class, which is the caller class for invokespecial
// link resolution, i.e. ensure there it is not shadowed.
// You can use invokespecial to disambiguate interface methods, but
// you can not use it to skip over an interface method that would shadow it.
ErasedShadowChecker checker(THREAD, target->name(), holder, super_class);
checker.run(current_class);
if (checker.found_shadow()) {
#ifndef PRODUCT
if (TraceDefaultMethods) {
tty->print_cr(" Only candidate found was shadowed.");
}
#endif // ndef PRODUCT
THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
"Accessible default method not found", NULL);
} else {
#ifndef PRODUCT
if (TraceDefaultMethods) {
family->print_sig_on(tty, target->signature(), 1);
}
#endif // ndef PRODUCT
return target;
}
} else {
assert(family->throws_exception(), "must have target or throw");
THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
family->get_exception_message()->as_C_string(), NULL);
}
} else {
// no method found
ResourceMark rm(THREAD);
THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(),
Method::name_and_sig_as_C_string(current_class,
method_name, sig), NULL);
}
}
// This is called during linktime when we find an invokespecial call that
// refers to a direct superinterface. It indicates that we should find the
// default method in the hierarchy of that superinterface, and if that method
// would have been a candidate from the point of view of 'this' class, then we
// return that method.
// This logic assumes that the super is a direct superclass of the caller
Method* DefaultMethods::find_super_default(
Klass* cls, Klass* super, Symbol* method_name, Symbol* sig, TRAPS) {
ResourceMark rm(THREAD);
assert(cls != NULL && super != NULL, "Need real classes");
InstanceKlass* current_class = InstanceKlass::cast(cls);
InstanceKlass* super_class = InstanceKlass::cast(super);
// Keep entire hierarchy alive for the duration of the computation
KeepAliveRegistrar keepAlive(THREAD);
KeepAliveVisitor loadKeepAlive(&keepAlive);
loadKeepAlive.run(current_class); // get hierarchy from current class
#ifndef PRODUCT
if (TraceDefaultMethods) {
tty->print_cr("Finding super default method %s.%s%s from %s",
super_class->name()->as_C_string(),
method_name->as_C_string(), sig->as_C_string(),
current_class->name()->as_C_string());
}
#endif // ndef PRODUCT
assert(super_class->is_interface(), "only call for default methods");
Method* target = NULL;
target = find_erased_super_default(current_class, super_class,
method_name, sig, CHECK_NULL);
#ifndef PRODUCT
if (target != NULL) {
if (TraceDefaultMethods) {
tty->print(" Returning ");
print_method(tty, target, true);
tty->print_cr("");
}
}
#endif // ndef PRODUCT
return target;
}
#ifndef PRODUCT
#ifdef ASSERT
// Return true is broad type is a covariant return of narrow type
static bool covariant_return_type(BasicType narrow, BasicType broad) {
if (narrow == broad) {
@ -988,7 +804,7 @@ static bool covariant_return_type(BasicType narrow, BasicType broad) {
}
return false;
}
#endif // ndef PRODUCT
#endif
static int assemble_redirect(
BytecodeConstantPool* cp, BytecodeBuffer* buffer,
@ -1035,10 +851,9 @@ static int assemble_redirect(
return parameter_count;
}
static int assemble_abstract_method_error(
BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* message, TRAPS) {
static int assemble_method_error(
BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* errorName, Symbol* message, TRAPS) {
Symbol* errorName = vmSymbols::java_lang_AbstractMethodError();
Symbol* init = vmSymbols::object_initializer_name();
Symbol* sig = vmSymbols::string_void_signature();
@ -1150,8 +965,7 @@ static void create_overpasses(
&bpool, &buffer, slot->signature(), selected, CHECK);
}
} else if (method->throws_exception()) {
max_stack = assemble_abstract_method_error(
&bpool, &buffer, method->get_exception_message(), CHECK);
max_stack = assemble_method_error(&bpool, &buffer, method->get_exception_name(), method->get_exception_message(), CHECK);
}
if (max_stack != 0) {
AccessFlags flags = accessFlags_from(
@ -1281,4 +1095,3 @@ static void merge_in_new_methods(InstanceKlass* klass,
MetadataFactory::free_array(cld, original_ordering);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,15 +44,5 @@ class DefaultMethods : AllStatic {
// the class.
static void generate_default_methods(
InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS);
// Called during linking when an invokespecial to an direct interface
// method is found. Selects and returns a method if there is a unique
// default method in the 'super_iface' part of the hierarchy which is
// also a candidate default for 'this_klass'. Otherwise throws an AME.
static Method* find_super_default(
Klass* this_klass, Klass* super_iface,
Symbol* method_name, Symbol* method_sig, TRAPS);
};
#endif // SHARE_VM_CLASSFILE_DEFAULTMETHODS_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -264,7 +264,7 @@ class SymbolPropertyEntry : public HashtableEntry<Symbol*, mtSymbol> {
}
if (method_type() != NULL) {
if (printed) st->print(" and ");
st->print(INTPTR_FORMAT, method_type());
st->print(INTPTR_FORMAT, (void *)method_type());
printed = true;
}
st->print_cr(printed ? "" : "(empty)");

View File

@ -840,7 +840,7 @@ StringTable::VerifyRetTypes StringTable::compare_entries(
if (str1 == str2) {
tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") "
"in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]",
str1, bkt1, e_cnt1, bkt2, e_cnt2);
(void *)str1, bkt1, e_cnt1, bkt2, e_cnt2);
return _verify_fail_continue;
}

View File

@ -270,7 +270,7 @@
template(java_lang_invoke_LambdaForm, "java/lang/invoke/LambdaForm") \
template(java_lang_invoke_ForceInline_signature, "Ljava/lang/invoke/ForceInline;") \
template(java_lang_invoke_DontInline_signature, "Ljava/lang/invoke/DontInline;") \
template(sun_invoke_Stable_signature, "Lsun/invoke/Stable;") \
template(java_lang_invoke_Stable_signature, "Ljava/lang/invoke/Stable;") \
template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;") \
template(java_lang_invoke_MagicLambdaImpl, "java/lang/invoke/MagicLambdaImpl") \
@ -631,6 +631,10 @@
do_name(log_name,"log") do_name(log10_name,"log10") do_name(pow_name,"pow") \
do_name(exp_name,"exp") do_name(min_name,"min") do_name(max_name,"max") \
\
do_name(addExact_name,"addExact") \
do_name(subtractExact_name,"subtractExact") \
do_name(multiplyExact_name,"multiplyExact") \
\
do_intrinsic(_dabs, java_lang_Math, abs_name, double_double_signature, F_S) \
do_intrinsic(_dsin, java_lang_Math, sin_name, double_double_signature, F_S) \
do_intrinsic(_dcos, java_lang_Math, cos_name, double_double_signature, F_S) \
@ -643,6 +647,7 @@
do_intrinsic(_dexp, java_lang_Math, exp_name, double_double_signature, F_S) \
do_intrinsic(_min, java_lang_Math, min_name, int2_int_signature, F_S) \
do_intrinsic(_max, java_lang_Math, max_name, int2_int_signature, F_S) \
do_intrinsic(_addExact, java_lang_Math, addExact_name, int2_int_signature, F_S) \
\
do_intrinsic(_floatToRawIntBits, java_lang_Float, floatToRawIntBits_name, float_int_signature, F_S) \
do_name( floatToRawIntBits_name, "floatToRawIntBits") \

View File

@ -124,7 +124,6 @@ int CodeCache::_number_of_nmethods = 0;
int CodeCache::_number_of_nmethods_with_dependencies = 0;
bool CodeCache::_needs_cache_clean = false;
nmethod* CodeCache::_scavenge_root_nmethods = NULL;
nmethod* CodeCache::_saved_nmethods = NULL;
int CodeCache::_codemem_full_count = 0;
@ -464,96 +463,11 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
}
#endif //PRODUCT
/**
* Remove and return nmethod from the saved code list in order to reanimate it.
*/
nmethod* CodeCache::reanimate_saved_code(Method* m) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nmethod* saved = _saved_nmethods;
nmethod* prev = NULL;
while (saved != NULL) {
if (saved->is_in_use() && saved->method() == m) {
if (prev != NULL) {
prev->set_saved_nmethod_link(saved->saved_nmethod_link());
} else {
_saved_nmethods = saved->saved_nmethod_link();
}
assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
saved->set_speculatively_disconnected(false);
saved->set_saved_nmethod_link(NULL);
if (PrintMethodFlushing) {
saved->print_on(tty, " ### nmethod is reconnected");
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
xtty->method(m);
xtty->stamp();
xtty->end_elem();
}
return saved;
}
prev = saved;
saved = saved->saved_nmethod_link();
}
return NULL;
}
/**
* Remove nmethod from the saved code list in order to discard it permanently
*/
void CodeCache::remove_saved_code(nmethod* nm) {
// For conc swpr this will be called with CodeCache_lock taken by caller
assert_locked_or_safepoint(CodeCache_lock);
assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
nmethod* saved = _saved_nmethods;
nmethod* prev = NULL;
while (saved != NULL) {
if (saved == nm) {
if (prev != NULL) {
prev->set_saved_nmethod_link(saved->saved_nmethod_link());
} else {
_saved_nmethods = saved->saved_nmethod_link();
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
xtty->stamp();
xtty->end_elem();
}
return;
}
prev = saved;
saved = saved->saved_nmethod_link();
}
ShouldNotReachHere();
}
void CodeCache::speculatively_disconnect(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
nm->set_saved_nmethod_link(_saved_nmethods);
_saved_nmethods = nm;
if (PrintMethodFlushing) {
nm->print_on(tty, " ### nmethod is speculatively disconnected");
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
xtty->method(nm->method());
xtty->stamp();
xtty->end_elem();
}
nm->method()->clear_code();
nm->set_speculatively_disconnected(true);
}
void CodeCache::gc_prologue() {
assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
}
void CodeCache::gc_epilogue() {
assert_locked_or_safepoint(CodeCache_lock);
FOR_ALL_ALIVE_BLOBS(cb) {

View File

@ -57,7 +57,6 @@ class CodeCache : AllStatic {
static int _number_of_nmethods_with_dependencies;
static bool _needs_cache_clean;
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
static nmethod* _saved_nmethods; // Linked list of speculatively disconnected nmethods.
static void verify_if_often() PRODUCT_RETURN;
@ -167,17 +166,12 @@ class CodeCache : AllStatic {
static size_t capacity() { return _heap->capacity(); }
static size_t max_capacity() { return _heap->max_capacity(); }
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
static double reverse_free_ratio();
static bool needs_cache_clean() { return _needs_cache_clean; }
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
static void clear_inline_caches(); // clear all inline caches
static nmethod* reanimate_saved_code(Method* m);
static void remove_saved_code(nmethod* nm);
static void speculatively_disconnect(nmethod* nm);
// Deoptimization
static int mark_for_deoptimization(DepChange& changes);
#ifdef HOTSWAP

View File

@ -462,7 +462,6 @@ void nmethod::init_defaults() {
_state = alive;
_marked_for_reclamation = 0;
_has_flushed_dependencies = 0;
_speculatively_disconnected = 0;
_has_unsafe_access = 0;
_has_method_handle_invokes = 0;
_lazy_critical_native = 0;
@ -481,7 +480,6 @@ void nmethod::init_defaults() {
_osr_link = NULL;
_scavenge_root_link = NULL;
_scavenge_root_state = 0;
_saved_nmethod_link = NULL;
_compiler = NULL;
#ifdef HAVE_DTRACE_H
@ -686,6 +684,7 @@ nmethod::nmethod(
_osr_entry_point = NULL;
_exception_cache = NULL;
_pc_desc_cache.reset_to(NULL);
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
code_buffer->copy_values_to(this);
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
@ -770,6 +769,7 @@ nmethod::nmethod(
_osr_entry_point = NULL;
_exception_cache = NULL;
_pc_desc_cache.reset_to(NULL);
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
code_buffer->copy_values_to(this);
debug_only(verify_scavenge_root_oops());
@ -842,6 +842,7 @@ nmethod::nmethod(
_comp_level = comp_level;
_compiler = compiler;
_orig_pc_offset = orig_pc_offset;
_hotness_counter = NMethodSweeper::hotness_counter_reset_val();
// Section offsets
_consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts());
@ -1176,7 +1177,7 @@ void nmethod::cleanup_inline_caches() {
// This is a private interface with the sweeper.
void nmethod::mark_as_seen_on_stack() {
assert(is_not_entrant(), "must be a non-entrant method");
assert(is_alive(), "Must be an alive method");
// Set the traversal mark to ensure that the sweeper does 2
// cleaning passes before moving to zombie.
set_stack_traversal_mark(NMethodSweeper::traversal_count());
@ -1261,7 +1262,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
set_osr_link(NULL);
//set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
NMethodSweeper::notify(this);
NMethodSweeper::notify();
}
void nmethod::invalidate_osr_method() {
@ -1351,6 +1352,15 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
nmethod_needs_unregister = true;
}
// Must happen before state change. Otherwise we have a race condition in
// nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
// transition its state from 'not_entrant' to 'zombie' without having to wait
// for stack scanning.
if (state == not_entrant) {
mark_as_seen_on_stack();
OrderAccess::storestore();
}
// Change state
_state = state;
@ -1369,11 +1379,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
HandleMark hm;
method()->clear_code();
}
if (state == not_entrant) {
mark_as_seen_on_stack();
}
} // leave critical region under Patching_lock
// When the nmethod becomes zombie it is no longer alive so the
@ -1416,7 +1421,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
}
// Make sweeper aware that there is a zombie method that needs to be removed
NMethodSweeper::notify(this);
NMethodSweeper::notify();
return true;
}
@ -1451,10 +1456,6 @@ void nmethod::flush() {
CodeCache::drop_scavenge_root_nmethod(this);
}
if (is_speculatively_disconnected()) {
CodeCache::remove_saved_code(this);
}
#ifdef SHARK
((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
#endif // SHARK
@ -1965,7 +1966,7 @@ public:
if (!_detected_scavenge_root) _print_nm->print_on(tty, "new scavenge root");
tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
_print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
(intptr_t)(*p), (intptr_t)p);
(void *)(*p), (intptr_t)p);
(*p)->print();
}
#endif //PRODUCT
@ -2345,7 +2346,7 @@ public:
_ok = false;
}
tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
(intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
(void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
}
virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
};
@ -2466,7 +2467,7 @@ public:
_ok = false;
}
tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
(intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
(void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
(*p)->print();
}
virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }

View File

@ -119,7 +119,6 @@ class nmethod : public CodeBlob {
// To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
nmethod* _saved_nmethod_link; // from CodeCache::speculatively_disconnect
static nmethod* volatile _oops_do_mark_nmethods;
nmethod* volatile _oops_do_mark_link;
@ -165,7 +164,6 @@ class nmethod : public CodeBlob {
// protected by CodeCache_lock
bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
bool _speculatively_disconnected; // Marked for potential unload
bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper)
bool _marked_for_deoptimization; // Used for stack deoptimization
@ -180,7 +178,7 @@ class nmethod : public CodeBlob {
unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
// Protected by Patching_lock
unsigned char _state; // {alive, not_entrant, zombie, unloaded}
volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded}
#ifdef ASSERT
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
@ -202,11 +200,18 @@ class nmethod : public CodeBlob {
// not_entrant method removal. Each mark_sweep pass will update
// this mark to current sweep invocation count if it is seen on the
// stack. An not_entrant method can be removed when there is no
// stack. An not_entrant method can be removed when there are no
// more activations, i.e., when the _stack_traversal_mark is less than
// current sweep traversal index.
long _stack_traversal_mark;
// The _hotness_counter indicates the hotness of a method. The higher
// the value the hotter the method. The hotness counter of a nmethod is
// set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
// is active while stack scanning (mark_active_nmethods()). The hotness
// counter is decreased (by 1) while sweeping.
int _hotness_counter;
ExceptionCache *_exception_cache;
PcDescCache _pc_desc_cache;
@ -382,6 +387,10 @@ class nmethod : public CodeBlob {
int total_size () const;
void dec_hotness_counter() { _hotness_counter--; }
void set_hotness_counter(int val) { _hotness_counter = val; }
int hotness_counter() const { return _hotness_counter; }
// Containment
bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
@ -408,8 +417,8 @@ class nmethod : public CodeBlob {
// alive. It is used when an uncommon trap happens. Returns true
// if this thread changed the state of the nmethod or false if
// another thread performed the transition.
bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
// used by jvmti to track if the unload event has been reported
bool unload_reported() { return _unload_reported; }
@ -437,9 +446,6 @@ class nmethod : public CodeBlob {
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool is_speculatively_disconnected() const { return _speculatively_disconnected; }
void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; }
bool is_lazy_critical_native() const { return _lazy_critical_native; }
void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
@ -499,9 +505,6 @@ public:
nmethod* scavenge_root_link() const { return _scavenge_root_link; }
void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
nmethod* saved_nmethod_link() const { return _saved_nmethod_link; }
void set_saved_nmethod_link(nmethod *n) { _saved_nmethod_link = n; }
public:
// Sweeper support

View File

@ -634,19 +634,36 @@ CompileTask* CompileQueue::get() {
NMethodSweeper::possibly_sweep();
MutexLocker locker(lock());
// Wait for an available CompileTask.
// If _first is NULL we have no more compile jobs. There are two reasons for
// having no compile jobs: First, we compiled everything we wanted. Second,
// we ran out of code cache so compilation has been disabled. In the latter
// case we perform code cache sweeps to free memory such that we can re-enable
// compilation.
while (_first == NULL) {
// There is no work to be done right now. Wait.
if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
// During the emergency sweeping periods, wake up and sweep occasionally
bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
if (timedout) {
if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
// Wait a certain amount of time to possibly do another sweep.
// We must wait until stack scanning has happened so that we can
// transition a method's state from 'not_entrant' to 'zombie'.
long wait_time = NmethodSweepCheckInterval * 1000;
if (FLAG_IS_DEFAULT(NmethodSweepCheckInterval)) {
// Only one thread at a time can do sweeping. Scale the
// wait time according to the number of compiler threads.
// As a result, the next sweep is likely to happen every 100ms
// with an arbitrary number of threads that do sweeping.
wait_time = 100 * CICompilerCount;
}
bool timeout = lock()->wait(!Mutex::_no_safepoint_check_flag, wait_time);
if (timeout) {
MutexUnlocker ul(lock());
// When otherwise not busy, run nmethod sweeping
NMethodSweeper::possibly_sweep();
}
} else {
// During normal operation no need to wake up on timer
// If there are no compilation tasks and we can compile new jobs
// (i.e., there is enough free space in the code cache) there is
// no need to invoke the sweeper. As a result, the hotness of methods
// remains unchanged. This behavior is desired, since we want to keep
// the stable state, i.e., we do not want to evict methods from the
// code cache if it is unnecessary.
lock()->wait();
}
}
@ -1227,16 +1244,9 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
return method_code;
}
}
if (method->is_not_compilable(comp_level)) return NULL;
if (UseCodeCacheFlushing) {
nmethod* saved = CodeCache::reanimate_saved_code(method());
if (saved != NULL) {
method->set_code(method, saved);
return saved;
}
if (method->is_not_compilable(comp_level)) {
return NULL;
}
} else {
// osr compilation
#ifndef TIERED
@ -1585,9 +1595,6 @@ void CompileBroker::compiler_thread_loop() {
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
// the code cache is really full
handle_full_code_cache();
} else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
// Attempt to start cleaning the code cache while there is still a little headroom
NMethodSweeper::handle_full_code_cache(false);
}
CompileTask* task = queue->get();
@ -1943,7 +1950,11 @@ void CompileBroker::handle_full_code_cache() {
}
#endif
if (UseCodeCacheFlushing) {
NMethodSweeper::handle_full_code_cache(true);
// Since code cache is full, immediately stop new compiles
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
NMethodSweeper::log_sweep("disable_compiler");
NMethodSweeper::possibly_sweep();
}
} else {
UseCompiler = false;
AlwaysCompileLoopMethods = false;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -628,7 +628,7 @@ void DerivedPointerTable::clear() {
// Returns value of location as an int
intptr_t value_of_loc(oop *pointer) { return (intptr_t)(*pointer); }
intptr_t value_of_loc(oop *pointer) { return cast_from_oop<intptr_t>((*pointer)); }
void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) {

View File

@ -9065,7 +9065,7 @@ bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
return !stack->isEmpty();
}
#define BUSY (oop(0x1aff1aff))
#define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
// (MT-safe) Get a prefix of at most "num" from the list.
// The overflow list is chained through the mark word of
// each object in the list. We fetch the entire list,
@ -9098,7 +9098,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
return false;
}
// Grab the entire list; we'll put back a suffix
oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
Thread* tid = Thread::current();
// Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
// set to ParallelGCThreads.
@ -9113,7 +9113,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
return false;
} else if (_overflow_list != BUSY) {
// Try and grab the prefix
prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
}
}
// If the list was found to be empty, or we spun long

View File

@ -2694,7 +2694,7 @@ public:
if (print_it) {
_out->print_cr(" "PTR_FORMAT"%s",
o, (over_tams) ? " >" : (marked) ? " M" : "");
(void *)o, (over_tams) ? " >" : (marked) ? " M" : "");
PrintReachableOopClosure oopCl(_out, _vo, _all);
o->oop_iterate_no_header(&oopCl);
}

View File

@ -81,7 +81,7 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm) {
G1CollectedHeap* g1h = _g1h;
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
CardTableModRefBS* ct_bs = g1h->g1_barrier_set();
HeapWord* start = mr.start();
HeapWord* end = mr.end();

View File

@ -65,9 +65,7 @@ void G1CardCounts::initialize() {
// threshold limit is no more than this.
guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
ModRefBarrierSet* bs = _g1h->mr_bs();
guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
_ct_bs = (CardTableModRefBS*)bs;
_ct_bs = _g1h->g1_barrier_set();
_ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
// Allocate/Reserve the counts table

View File

@ -125,10 +125,8 @@ class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
int _histo[256];
public:
ClearLoggedCardTableEntryClosure() :
_calls(0)
_calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set())
{
_g1h = G1CollectedHeap::heap();
_ctbs = (CardTableModRefBS*)_g1h->barrier_set();
for (int i = 0; i < 256; i++) _histo[i] = 0;
}
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
@ -158,11 +156,8 @@ class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
CardTableModRefBS* _ctbs;
public:
RedirtyLoggedCardTableEntryClosure() :
_calls(0)
{
_g1h = G1CollectedHeap::heap();
_ctbs = (CardTableModRefBS*)_g1h->barrier_set();
}
_calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
bool do_card_ptr(jbyte* card_ptr, int worker_i) {
if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
_calls++;
@ -478,7 +473,7 @@ bool G1CollectedHeap::is_scavengable(const void* p) {
void G1CollectedHeap::check_ct_logs_at_safepoint() {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
CardTableModRefBS* ct_bs = g1_barrier_set();
// Count the dirty cards at the start.
CountNonCleanMemRegionClosure count1(this);
@ -1205,7 +1200,7 @@ public:
};
void G1CollectedHeap::clear_rsets_post_compaction() {
PostMCRemSetClearClosure rs_clear(this, mr_bs());
PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
heap_region_iterate(&rs_clear);
}
@ -1777,7 +1772,6 @@ void G1CollectedHeap::update_committed_space(HeapWord* old_end,
}
bool G1CollectedHeap::expand(size_t expand_bytes) {
size_t old_mem_size = _g1_storage.committed_size();
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
aligned_expand_bytes = align_size_up(aligned_expand_bytes,
HeapRegion::GrainBytes);
@ -1787,6 +1781,13 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
ergo_format_byte("attempted expansion amount"),
expand_bytes, aligned_expand_bytes);
if (_g1_storage.uncommitted_size() == 0) {
ergo_verbose0(ErgoHeapSizing,
"did not expand the heap",
ergo_format_reason("heap already fully expanded"));
return false;
}
// First commit the memory.
HeapWord* old_end = (HeapWord*) _g1_storage.high();
bool successful = _g1_storage.expand_by(aligned_expand_bytes);
@ -1845,7 +1846,6 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
}
void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
size_t old_mem_size = _g1_storage.committed_size();
size_t aligned_shrink_bytes =
ReservedSpace::page_align_size_down(shrink_bytes);
aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
@ -2045,20 +2045,13 @@ jint G1CollectedHeap::initialize() {
// Create the gen rem set (and barrier set) for the entire reserved region.
_rem_set = collector_policy()->create_rem_set(_reserved, 2);
set_barrier_set(rem_set()->bs());
if (barrier_set()->is_a(BarrierSet::ModRef)) {
_mr_bs = (ModRefBarrierSet*)_barrier_set;
} else {
vm_exit_during_initialization("G1 requires a mod ref bs.");
if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
return JNI_ENOMEM;
}
// Also create a G1 rem set.
if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
_g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
} else {
vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
return JNI_ENOMEM;
}
_g1_rem_set = new G1RemSet(this, g1_barrier_set());
// Carve out the G1 part of the heap.
@ -3681,6 +3674,11 @@ void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
// Fill TLAB's and such
ensure_parsability(true);
if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
(total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
}
}
void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
@ -3689,7 +3687,7 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
(G1SummarizeRSetStatsPeriod > 0) &&
// we are at the end of the GC. Total collections has already been increased.
((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
g1_rem_set()->print_periodic_summary_info();
g1_rem_set()->print_periodic_summary_info("After GC RS summary");
}
// FIXME: what is this about?
@ -4550,7 +4548,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
: _g1h(g1h),
_refs(g1h->task_queue(queue_num)),
_dcq(&g1h->dirty_card_queue_set()),
_ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
_ct_bs(g1h->g1_barrier_set()),
_g1_rem(g1h->g1_rem_set()),
_hash_seed(17), _queue_num(queue_num),
_term_attempts(0),
@ -4617,7 +4615,7 @@ bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
oop p = oopDesc::load_decode_heap_oop(ref);
assert(_g1h->is_in_g1_reserved(p),
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
return true;
}
@ -4627,11 +4625,11 @@ bool G1ParScanThreadState::verify_ref(oop* ref) const {
// Must be in the collection set--it's already been copied.
oop p = clear_partial_array_mask(ref);
assert(_g1h->obj_in_cs(p),
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
} else {
oop p = oopDesc::load_decode_heap_oop(ref);
assert(_g1h->is_in_g1_reserved(p),
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
}
return true;
}
@ -5979,11 +5977,11 @@ void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
}
class G1ParCleanupCTTask : public AbstractGangTask {
CardTableModRefBS* _ct_bs;
G1SATBCardTableModRefBS* _ct_bs;
G1CollectedHeap* _g1h;
HeapRegion* volatile _su_head;
public:
G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
G1CollectedHeap* g1h) :
AbstractGangTask("G1 Par Cleanup CT Task"),
_ct_bs(ct_bs), _g1h(g1h) { }
@ -6006,9 +6004,9 @@ public:
#ifndef PRODUCT
class G1VerifyCardTableCleanup: public HeapRegionClosure {
G1CollectedHeap* _g1h;
CardTableModRefBS* _ct_bs;
G1SATBCardTableModRefBS* _ct_bs;
public:
G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs)
: _g1h(g1h), _ct_bs(ct_bs) { }
virtual bool doHeapRegion(HeapRegion* r) {
if (r->is_survivor()) {
@ -6022,7 +6020,7 @@ public:
void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
// All of the region should be clean.
CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
MemRegion mr(hr->bottom(), hr->end());
ct_bs->verify_not_dirty_region(mr);
}
@ -6035,13 +6033,13 @@ void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
// not dirty that area (one less thing to have to do while holding
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
// is dirty.
CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
MemRegion mr(hr->bottom(), hr->pre_dummy_top());
ct_bs->verify_dirty_region(mr);
}
void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
verify_dirty_region(hr);
}
@ -6053,7 +6051,7 @@ void G1CollectedHeap::verify_dirty_young_regions() {
#endif
void G1CollectedHeap::cleanUpCardTable() {
CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
double start = os::elapsedTime();
{

View File

@ -31,6 +31,7 @@
#include "gc_implementation/g1/g1HRPrinter.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
@ -703,7 +704,7 @@ public:
if (_g1_committed.contains((HeapWord*) obj)) {
// no need to subtract the bottom of the heap from obj,
// _in_cset_fast_test is biased
uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes;
uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
bool ret = _in_cset_fast_test[index];
// let's make sure the result is consistent with what the slower
// test returns
@ -791,8 +792,6 @@ protected:
// The g1 remembered set of the heap.
G1RemSet* _g1_rem_set;
// And it's mod ref barrier set, used to track updates for the above.
ModRefBarrierSet* _mr_bs;
// A set of cards that cover the objects for which the Rsets should be updated
// concurrently after the collection.
@ -1127,7 +1126,6 @@ public:
// The rem set and barrier set.
G1RemSet* g1_rem_set() const { return _g1_rem_set; }
ModRefBarrierSet* mr_bs() const { return _mr_bs; }
unsigned get_gc_time_stamp() {
return _gc_time_stamp;
@ -1346,6 +1344,10 @@ public:
virtual bool is_in_closed_subset(const void* p) const;
G1SATBCardTableModRefBS* g1_barrier_set() {
return (G1SATBCardTableModRefBS*) barrier_set();
}
// This resets the card table to all zeros. It is used after
// a collection pause which used the card table to claim cards.
void cleanUpCardTable();
@ -1875,7 +1877,7 @@ protected:
G1CollectedHeap* _g1h;
RefToScanQueue* _refs;
DirtyCardQueue _dcq;
CardTableModRefBS* _ct_bs;
G1SATBCardTableModRefBS* _ct_bs;
G1RemSet* _g1_rem;
G1ParGCAllocBufferContainer _surviving_alloc_buffer;
@ -1914,7 +1916,7 @@ protected:
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
DirtyCardQueue& dirty_card_queue() { return _dcq; }
CardTableModRefBS* ctbs() { return _ct_bs; }
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
if (!from->is_survivor()) {

View File

@ -134,7 +134,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
MemRegion mr(start, end);
((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
g1_barrier_set()->dirty(mr);
}
inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {

View File

@ -41,11 +41,11 @@ class UpdateRSetDeferred : public OopsInHeapRegionClosure {
private:
G1CollectedHeap* _g1;
DirtyCardQueue *_dcq;
CardTableModRefBS* _ct_bs;
G1SATBCardTableModRefBS* _ct_bs;
public:
UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
_g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
_g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }

View File

@ -220,7 +220,7 @@ class G1PrepareCompactClosure: public HeapRegionClosure {
public:
G1PrepareCompactClosure(CompactibleSpace* cs)
: _g1h(G1CollectedHeap::heap()),
_mrbs(G1CollectedHeap::heap()->mr_bs()),
_mrbs(_g1h->g1_barrier_set()),
_cp(NULL, cs, cs->initialize_threshold()),
_humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -91,12 +91,12 @@ template <class T> inline bool has_partial_array_mask(T* ref) {
}
template <class T> inline T* set_partial_array_mask(T obj) {
assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK);
assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
return (T*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
}
template <class T> inline oop clear_partial_array_mask(T* ref) {
return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
}
class G1ParScanPartialArrayClosure : public G1ParClosureSuper {

View File

@ -83,7 +83,9 @@ G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
for (uint i = 0; i < n_workers(); i++) {
_cset_rs_update_cl[i] = NULL;
}
_prev_period_summary.initialize(this, n_workers());
if (G1SummarizeRSetStats) {
_prev_period_summary.initialize(this);
}
}
G1RemSet::~G1RemSet() {
@ -109,7 +111,7 @@ class ScanRSClosure : public HeapRegionClosure {
CodeBlobToOopClosure* _code_root_cl;
G1BlockOffsetSharedArray* _bot_shared;
CardTableModRefBS *_ct_bs;
G1SATBCardTableModRefBS *_ct_bs;
double _strong_code_root_scan_time_sec;
int _worker_i;
@ -130,7 +132,7 @@ public:
{
_g1h = G1CollectedHeap::heap();
_bot_shared = _g1h->bot_shared();
_ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
_ct_bs = _g1h->g1_barrier_set();
_block_size = MAX2<int>(G1RSetScanBlockSize, 1);
}
@ -505,12 +507,7 @@ public:
ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
_g1h(G1CollectedHeap::heap()),
_region_bm(region_bm), _card_bm(card_bm),
_ctbs(NULL)
{
ModRefBarrierSet* bs = _g1h->mr_bs();
guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
_ctbs = (CardTableModRefBS*)bs;
}
_ctbs(_g1h->g1_barrier_set()) {}
bool doHeapRegion(HeapRegion* r) {
if (!r->continuesHumongous()) {
@ -731,19 +728,19 @@ bool G1RemSet::refine_card(jbyte* card_ptr, int worker_i,
return has_refs_into_cset;
}
void G1RemSet::print_periodic_summary_info() {
void G1RemSet::print_periodic_summary_info(const char* header) {
G1RemSetSummary current;
current.initialize(this, n_workers());
current.initialize(this);
_prev_period_summary.subtract_from(&current);
print_summary_info(&_prev_period_summary);
print_summary_info(&_prev_period_summary, header);
_prev_period_summary.set(&current);
}
void G1RemSet::print_summary_info() {
G1RemSetSummary current;
current.initialize(this, n_workers());
current.initialize(this);
print_summary_info(&current, " Cumulative RS summary");
}

View File

@ -145,7 +145,7 @@ public:
virtual void print_summary_info();
// Print accumulated summary info from the last time called.
virtual void print_periodic_summary_info();
virtual void print_periodic_summary_info(const char* header);
// Prepare remembered set for verification.
virtual void prepare_for_verify();

View File

@ -77,12 +77,12 @@ double G1RemSetSummary::rs_thread_vtime(uint thread) const {
return _rs_threads_vtimes[thread];
}
void G1RemSetSummary::initialize(G1RemSet* remset, uint num_workers) {
void G1RemSetSummary::initialize(G1RemSet* remset) {
assert(_rs_threads_vtimes == NULL, "just checking");
assert(remset != NULL, "just checking");
_remset = remset;
_num_vtimes = num_workers;
_num_vtimes = ConcurrentG1Refine::thread_num();
_rs_threads_vtimes = NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC);
memset(_rs_threads_vtimes, 0, sizeof(double) * _num_vtimes);
@ -125,25 +125,115 @@ void G1RemSetSummary::subtract_from(G1RemSetSummary* other) {
_sampling_thread_vtime = other->sampling_thread_vtime() - _sampling_thread_vtime;
}
class HRRSStatsIter: public HeapRegionClosure {
size_t _occupied;
static double percent_of(size_t numerator, size_t denominator) {
if (denominator != 0) {
return (double)numerator / denominator * 100.0f;
} else {
return 0.0f;
}
}
static size_t round_to_K(size_t value) {
return value / K;
}
class RegionTypeCounter VALUE_OBJ_CLASS_SPEC {
private:
const char* _name;
size_t _rs_mem_size;
size_t _cards_occupied;
size_t _amount;
size_t _code_root_mem_size;
size_t _code_root_elems;
double rs_mem_size_percent_of(size_t total) {
return percent_of(_rs_mem_size, total);
}
double cards_occupied_percent_of(size_t total) {
return percent_of(_cards_occupied, total);
}
double code_root_mem_size_percent_of(size_t total) {
return percent_of(_code_root_mem_size, total);
}
double code_root_elems_percent_of(size_t total) {
return percent_of(_code_root_elems, total);
}
size_t amount() const { return _amount; }
public:
RegionTypeCounter(const char* name) : _name(name), _rs_mem_size(0), _cards_occupied(0),
_amount(0), _code_root_mem_size(0), _code_root_elems(0) { }
void add(size_t rs_mem_size, size_t cards_occupied, size_t code_root_mem_size,
size_t code_root_elems) {
_rs_mem_size += rs_mem_size;
_cards_occupied += cards_occupied;
_code_root_mem_size += code_root_mem_size;
_code_root_elems += code_root_elems;
_amount++;
}
size_t rs_mem_size() const { return _rs_mem_size; }
size_t cards_occupied() const { return _cards_occupied; }
size_t code_root_mem_size() const { return _code_root_mem_size; }
size_t code_root_elems() const { return _code_root_elems; }
void print_rs_mem_info_on(outputStream * out, size_t total) {
out->print_cr(" %8dK (%5.1f%%) by %zd %s regions", round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name);
}
void print_cards_occupied_info_on(outputStream * out, size_t total) {
out->print_cr(" %8d (%5.1f%%) entries by %zd %s regions", cards_occupied(), cards_occupied_percent_of(total), amount(), _name);
}
void print_code_root_mem_info_on(outputStream * out, size_t total) {
out->print_cr(" %8dK (%5.1f%%) by %zd %s regions", round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name);
}
void print_code_root_elems_info_on(outputStream * out, size_t total) {
out->print_cr(" %8d (%5.1f%%) elements by %zd %s regions", code_root_elems(), code_root_elems_percent_of(total), amount(), _name);
}
};
class HRRSStatsIter: public HeapRegionClosure {
private:
RegionTypeCounter _young;
RegionTypeCounter _humonguous;
RegionTypeCounter _free;
RegionTypeCounter _old;
RegionTypeCounter _all;
size_t _total_rs_mem_sz;
size_t _max_rs_mem_sz;
HeapRegion* _max_rs_mem_sz_region;
size_t _total_code_root_mem_sz;
size_t total_rs_mem_sz() const { return _all.rs_mem_size(); }
size_t total_cards_occupied() const { return _all.cards_occupied(); }
size_t max_rs_mem_sz() const { return _max_rs_mem_sz; }
HeapRegion* max_rs_mem_sz_region() const { return _max_rs_mem_sz_region; }
size_t _max_code_root_mem_sz;
HeapRegion* _max_code_root_mem_sz_region;
size_t total_code_root_mem_sz() const { return _all.code_root_mem_size(); }
size_t total_code_root_elems() const { return _all.code_root_elems(); }
size_t max_code_root_mem_sz() const { return _max_code_root_mem_sz; }
HeapRegion* max_code_root_mem_sz_region() const { return _max_code_root_mem_sz_region; }
public:
HRRSStatsIter() :
_occupied(0),
_total_rs_mem_sz(0),
_max_rs_mem_sz(0),
_max_rs_mem_sz_region(NULL),
_total_code_root_mem_sz(0),
_max_code_root_mem_sz(0),
_max_code_root_mem_sz_region(NULL)
HRRSStatsIter() : _all("All"), _young("Young"), _humonguous("Humonguous"),
_free("Free"), _old("Old"), _max_code_root_mem_sz_region(NULL), _max_rs_mem_sz_region(NULL),
_max_rs_mem_sz(0), _max_code_root_mem_sz(0)
{}
bool doHeapRegion(HeapRegion* r) {
@ -156,46 +246,95 @@ public:
_max_rs_mem_sz = rs_mem_sz;
_max_rs_mem_sz_region = r;
}
_total_rs_mem_sz += rs_mem_sz;
size_t occupied_cards = hrrs->occupied();
size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size();
if (code_root_mem_sz > _max_code_root_mem_sz) {
_max_code_root_mem_sz = code_root_mem_sz;
if (code_root_mem_sz > max_code_root_mem_sz()) {
_max_code_root_mem_sz_region = r;
}
_total_code_root_mem_sz += code_root_mem_sz;
size_t code_root_elems = hrrs->strong_code_roots_list_length();
RegionTypeCounter* current = NULL;
if (r->is_young()) {
current = &_young;
} else if (r->isHumongous()) {
current = &_humonguous;
} else if (r->is_empty()) {
current = &_free;
} else {
current = &_old;
}
current->add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems);
_all.add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems);
size_t occ = hrrs->occupied();
_occupied += occ;
return false;
}
size_t total_rs_mem_sz() { return _total_rs_mem_sz; }
size_t max_rs_mem_sz() { return _max_rs_mem_sz; }
HeapRegion* max_rs_mem_sz_region() { return _max_rs_mem_sz_region; }
size_t total_code_root_mem_sz() { return _total_code_root_mem_sz; }
size_t max_code_root_mem_sz() { return _max_code_root_mem_sz; }
HeapRegion* max_code_root_mem_sz_region() { return _max_code_root_mem_sz_region; }
size_t occupied() { return _occupied; }
void print_summary_on(outputStream* out) {
RegionTypeCounter* counters[] = { &_young, &_humonguous, &_free, &_old, NULL };
out->print_cr("\n Current rem set statistics");
out->print_cr(" Total per region rem sets sizes = "SIZE_FORMAT"K."
" Max = "SIZE_FORMAT"K.",
round_to_K(total_rs_mem_sz()), round_to_K(max_rs_mem_sz()));
for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
(*current)->print_rs_mem_info_on(out, total_rs_mem_sz());
}
out->print_cr(" Static structures = "SIZE_FORMAT"K,"
" free_lists = "SIZE_FORMAT"K.",
round_to_K(HeapRegionRemSet::static_mem_size()),
round_to_K(HeapRegionRemSet::fl_mem_size()));
out->print_cr(" "SIZE_FORMAT" occupied cards represented.",
total_cards_occupied());
for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
(*current)->print_cards_occupied_info_on(out, total_cards_occupied());
}
// Largest sized rem set region statistics
HeapRegionRemSet* rem_set = max_rs_mem_sz_region()->rem_set();
out->print_cr(" Region with largest rem set = "HR_FORMAT", "
"size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
HR_FORMAT_PARAMS(max_rs_mem_sz_region()),
round_to_K(rem_set->mem_size()),
round_to_K(rem_set->occupied()));
// Strong code root statistics
HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region()->rem_set();
out->print_cr(" Total heap region code root sets sizes = "SIZE_FORMAT"K."
" Max = "SIZE_FORMAT"K.",
round_to_K(total_code_root_mem_sz()),
round_to_K(max_code_root_rem_set->strong_code_roots_mem_size()));
for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
(*current)->print_code_root_mem_info_on(out, total_code_root_mem_sz());
}
out->print_cr(" "SIZE_FORMAT" code roots represented.",
total_code_root_elems());
for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
(*current)->print_code_root_elems_info_on(out, total_code_root_elems());
}
out->print_cr(" Region with largest amount of code roots = "HR_FORMAT", "
"size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".",
HR_FORMAT_PARAMS(max_code_root_mem_sz_region()),
round_to_K(max_code_root_rem_set->strong_code_roots_mem_size()),
round_to_K(max_code_root_rem_set->strong_code_roots_list_length()));
}
};
double calc_percentage(size_t numerator, size_t denominator) {
if (denominator != 0) {
return (double)numerator / denominator * 100.0;
} else {
return 0.0f;
}
}
void G1RemSetSummary::print_on(outputStream* out) {
out->print_cr("\n Concurrent RS processed "SIZE_FORMAT" cards",
out->print_cr("\n Recent concurrent refinement statistics");
out->print_cr(" Processed "SIZE_FORMAT" cards",
num_concurrent_refined_cards());
out->print_cr(" Of %d completed buffers:", num_processed_buf_total());
out->print_cr(" %8d (%5.1f%%) by concurrent RS threads.",
num_processed_buf_total(),
calc_percentage(num_processed_buf_rs_threads(), num_processed_buf_total()));
percent_of(num_processed_buf_rs_threads(), num_processed_buf_total()));
out->print_cr(" %8d (%5.1f%%) by mutator threads.",
num_processed_buf_mutator(),
calc_percentage(num_processed_buf_mutator(), num_processed_buf_total()));
percent_of(num_processed_buf_mutator(), num_processed_buf_total()));
out->print_cr(" Did %d coarsenings.", num_coarsenings());
out->print_cr(" Concurrent RS threads times (s)");
out->print(" ");
for (uint i = 0; i < _num_vtimes; i++) {
@ -207,33 +346,5 @@ void G1RemSetSummary::print_on(outputStream* out) {
HRRSStatsIter blk;
G1CollectedHeap::heap()->heap_region_iterate(&blk);
// RemSet stats
out->print_cr(" Total heap region rem set sizes = "SIZE_FORMAT"K."
" Max = "SIZE_FORMAT"K.",
blk.total_rs_mem_sz()/K, blk.max_rs_mem_sz()/K);
out->print_cr(" Static structures = "SIZE_FORMAT"K,"
" free_lists = "SIZE_FORMAT"K.",
HeapRegionRemSet::static_mem_size() / K,
HeapRegionRemSet::fl_mem_size() / K);
out->print_cr(" "SIZE_FORMAT" occupied cards represented.",
blk.occupied());
HeapRegion* max_rs_mem_sz_region = blk.max_rs_mem_sz_region();
HeapRegionRemSet* max_rs_rem_set = max_rs_mem_sz_region->rem_set();
out->print_cr(" Max size region = "HR_FORMAT", "
"size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
HR_FORMAT_PARAMS(max_rs_mem_sz_region),
(max_rs_rem_set->mem_size() + K - 1)/K,
(max_rs_rem_set->occupied() + K - 1)/K);
out->print_cr(" Did %d coarsenings.", num_coarsenings());
// Strong code root stats
out->print_cr(" Total heap region code-root set sizes = "SIZE_FORMAT"K."
" Max = "SIZE_FORMAT"K.",
blk.total_code_root_mem_sz()/K, blk.max_code_root_mem_sz()/K);
HeapRegion* max_code_root_mem_sz_region = blk.max_code_root_mem_sz_region();
HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region->rem_set();
out->print_cr(" Max size region = "HR_FORMAT", "
"size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".",
HR_FORMAT_PARAMS(max_code_root_mem_sz_region),
(max_code_root_rem_set->strong_code_roots_mem_size() + K - 1)/K,
(max_code_root_rem_set->strong_code_roots_list_length()));
blk.print_summary_on(out);
}

View File

@ -84,7 +84,7 @@ public:
void subtract_from(G1RemSetSummary* other);
// initialize and get the first sampling
void initialize(G1RemSet* remset, uint num_workers);
void initialize(G1RemSet* remset);
void print_on(outputStream* out);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,6 +64,27 @@ G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
}
}
bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
jbyte val = _byte_map[card_index];
// It's already processed
if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
return false;
}
// Cached bit can be installed either on a clean card or on a claimed card.
jbyte new_val = val;
if (val == clean_card_val()) {
new_val = (jbyte)deferred_card_val();
} else {
if (val & claimed_card_val()) {
new_val = val | (jbyte)deferred_card_val();
}
}
if (new_val != val) {
Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
}
return true;
}
G1SATBCardTableLoggingModRefBS::
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
int max_covered_regions) :
@ -95,7 +116,7 @@ void
G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
oop new_val) {
uintptr_t field_uint = (uintptr_t)field;
uintptr_t new_val_uint = (uintptr_t)new_val;
uintptr_t new_val_uint = cast_from_oop<uintptr_t>(new_val);
uintptr_t comb = field_uint ^ new_val_uint;
comb = comb >> HeapRegion::LogOfHRGrainBytes;
if (comb == 0) return;

View File

@ -89,6 +89,42 @@ public:
write_ref_array_pre_work(dst, count);
}
}
/*
Claimed and deferred bits are used together in G1 during the evacuation
pause. These bits can have the following state transitions:
1. The claimed bit can be put over any other card state. Except that
the "dirty -> dirty and claimed" transition is checked for in
G1 code and is not used.
2. Deferred bit can be set only if the previous state of the card
was either clean or claimed. mark_card_deferred() is wait-free.
We do not care if the operation is be successful because if
it does not it will only result in duplicate entry in the update
buffer because of the "cache-miss". So it's not worth spinning.
*/
bool is_card_claimed(size_t card_index) {
jbyte val = _byte_map[card_index];
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
}
void set_card_claimed(size_t card_index) {
jbyte val = _byte_map[card_index];
if (val == clean_card_val()) {
val = (jbyte)claimed_card_val();
} else {
val |= (jbyte)claimed_card_val();
}
_byte_map[card_index] = val;
}
bool mark_card_deferred(size_t card_index);
bool is_card_deferred(size_t card_index) {
jbyte val = _byte_map[card_index];
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
}
};
// Adds card-table logging to the post-barrier.

View File

@ -637,7 +637,7 @@ class VerifyStrongCodeRootOopClosure: public OopClosure {
gclog_or_tty->print_cr("Object "PTR_FORMAT" in region "
"["PTR_FORMAT", "PTR_FORMAT") is above "
"top "PTR_FORMAT,
obj, _hr->bottom(), _hr->end(), _hr->top());
(void *)obj, _hr->bottom(), _hr->end(), _hr->top());
_failures = true;
return;
}
@ -951,12 +951,12 @@ void HeapRegion::verify(VerifyOption vo,
Klass* klass = obj->klass();
if (!klass->is_metaspace_object()) {
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
"not metadata", klass, obj);
"not metadata", klass, (void *)obj);
*failures = true;
return;
} else if (!klass->is_klass()) {
gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
"not a klass", klass, obj);
"not a klass", klass, (void *)obj);
*failures = true;
return;
} else {
@ -971,7 +971,7 @@ void HeapRegion::verify(VerifyOption vo,
}
}
} else {
gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
gclog_or_tty->print_cr(PTR_FORMAT" no an oop", (void *)obj);
*failures = true;
return;
}

View File

@ -91,8 +91,8 @@ protected:
gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
from,
UseCompressedOops
? oopDesc::load_decode_heap_oop((narrowOop*)from)
: oopDesc::load_decode_heap_oop((oop*)from));
? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
: (void *)oopDesc::load_decode_heap_oop((oop*)from));
}
HeapRegion* loc_hr = hr();
@ -403,8 +403,8 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
from,
UseCompressedOops
? oopDesc::load_decode_heap_oop((narrowOop*)from)
: oopDesc::load_decode_heap_oop((oop*)from));
? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
: (void *)oopDesc::load_decode_heap_oop((oop*)from));
}
int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);

View File

@ -1103,7 +1103,7 @@ void ParNewGeneration::waste_some_time() {
}
}
static const oop ClaimedForwardPtr = oop(0x4);
static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
// Because of concurrency, there are times where an object for which
// "is_forwarded()" is true contains an "interim" forwarding pointer
@ -1226,7 +1226,7 @@ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
if (TraceScavenge) {
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
is_in_reserved(new_obj) ? "copying" : "tenuring",
new_obj->klass()->internal_name(), old, new_obj, new_obj->size());
new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
}
#endif
@ -1347,7 +1347,7 @@ oop ParNewGeneration::copy_to_survivor_space_with_undo(
if (TraceScavenge) {
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
is_in_reserved(new_obj) ? "copying" : "tenuring",
new_obj->klass()->internal_name(), old, new_obj, new_obj->size());
new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
}
#endif
@ -1436,7 +1436,7 @@ bool ParNewGeneration::should_simulate_overflow() {
// (although some performance comparisons would be useful since
// single global lists have their own performance disadvantages
// as we were made painfully aware not long ago, see 6786503).
#define BUSY (oop(0x1aff1aff))
#define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
assert(is_in_reserved(from_space_obj), "Should be from this generation");
if (ParGCUseLocalOverflow) {
@ -1512,7 +1512,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
if (_overflow_list == NULL) return false;
// Otherwise, there was something there; try claiming the list.
oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
// Trim off a prefix of at most objsFromOverflow items
Thread* tid = Thread::current();
size_t spin_count = (size_t)ParallelGCThreads;
@ -1526,7 +1526,7 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
return false;
} else if (_overflow_list != BUSY) {
// try and grab the prefix
prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
}
}
if (prefix == NULL || prefix == BUSY) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -84,7 +84,7 @@ inline void ParScanClosure::do_oop_work(T* p,
Space* sp = gch->space_containing(p);
oop obj = oop(sp->block_start(p));
assert((HeapWord*)obj < (HeapWord*)p, "Error");
tty->print_cr("Object: " PTR_FORMAT, obj);
tty->print_cr("Object: " PTR_FORMAT, (void *)obj);
tty->print_cr("-------");
obj->print();
tty->print_cr("-----");
@ -110,7 +110,7 @@ inline void ParScanClosure::do_oop_work(T* p,
if (TraceScavenge) {
gclog_or_tty->print_cr("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
"forwarded ",
new_obj->klass()->internal_name(), p, obj, new_obj, new_obj->size());
new_obj->klass()->internal_name(), p, (void *)obj, (void *)new_obj, new_obj->size());
}
#endif

View File

@ -40,10 +40,8 @@ class GenerationSizer : public TwoGenerationCollectorPolicy {
void initialize_flags() {
// Do basic sizing work
this->TwoGenerationCollectorPolicy::initialize_flags();
TwoGenerationCollectorPolicy::initialize_flags();
// If the user hasn't explicitly set the number of worker
// threads, set the count.
assert(UseSerialGC ||
!FLAG_IS_DEFAULT(ParallelGCThreads) ||
(ParallelGCThreads > 0),

View File

@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
#include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
#include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp"

View File

@ -53,7 +53,6 @@
// Forward decls
class elapsedTimer;
class GenerationSizer;
class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
friend class PSGCAdaptivePolicyCounters;

View File

@ -26,7 +26,6 @@
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
#include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
#include "gc_implementation/parallelScavenge/psMarkSweep.hpp"

View File

@ -27,7 +27,6 @@
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
#include "gc_implementation/parallelScavenge/pcTasks.hpp"
#include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"

View File

@ -333,7 +333,7 @@ oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
"promotion-failure",
obj->klass()->internal_name(),
obj, obj->size());
(void *)obj, obj->size());
}
#endif

View File

@ -126,7 +126,7 @@ class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
oop* mask_chunked_array_oop(oop obj) {
assert(!is_oop_masked((oop*) obj), "invariant");
oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK);
oop* ret = (oop*) (cast_from_oop<uintptr_t>(obj) | PS_CHUNKED_ARRAY_OOP_MASK);
assert(is_oop_masked(ret), "invariant");
return ret;
}

View File

@ -225,7 +225,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
if (TraceScavenge) {
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
new_obj->klass()->internal_name(), o, new_obj, new_obj->size());
new_obj->klass()->internal_name(), (void *)o, (void *)new_obj, new_obj->size());
}
#endif

View File

@ -27,7 +27,6 @@
#include "code/codeCache.hpp"
#include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
#include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
#include "gc_implementation/parallelScavenge/generationSizer.hpp"
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
#include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
#include "gc_implementation/parallelScavenge/psMarkSweep.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -81,7 +81,7 @@ inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
if (TraceScavenge && o->is_forwarded()) {
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
"forwarding",
new_obj->klass()->internal_name(), o, new_obj, new_obj->size());
new_obj->klass()->internal_name(), (void *)o, (void *)new_obj, new_obj->size());
}
#endif

View File

@ -215,7 +215,7 @@ void print_oop(oop value, outputStream* st) {
st->print_cr(" %s", buf);
}
} else {
st->print_cr(" " PTR_FORMAT, (intptr_t) value);
st->print_cr(" " PTR_FORMAT, (void *)value);
}
}

View File

@ -1,5 +1,4 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -573,6 +572,16 @@ void LinkResolver::resolve_interface_method(methodHandle& resolved_method,
}
if (check_access) {
// JDK8 adds non-public interface methods, and accessability check requirement
assert(current_klass.not_null() , "current_klass should not be null");
// check if method can be accessed by the referring class
check_method_accessability(current_klass,
resolved_klass,
KlassHandle(THREAD, resolved_method->method_holder()),
resolved_method,
CHECK);
HandleMark hm(THREAD);
Handle loader (THREAD, InstanceKlass::cast(current_klass())->class_loader());
Handle class_loader (THREAD, resolved_method->method_holder()->class_loader());
@ -604,6 +613,20 @@ void LinkResolver::resolve_interface_method(methodHandle& resolved_method,
}
}
}
if (TraceItables && Verbose) {
ResourceMark rm(THREAD);
tty->print("invokeinterface resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
(current_klass.is_null() ? "<NULL>" : current_klass->internal_name()),
(resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
Method::name_and_sig_as_C_string(resolved_klass(),
resolved_method->name(),
resolved_method->signature()),
resolved_method->method_holder()->internal_name()
);
resolved_method->access_flags().print_on(tty);
tty->cr();
}
}
//------------------------------------------------------------------------------------------------------------------------
@ -795,26 +818,12 @@ void LinkResolver::linktime_resolve_special_method(methodHandle& resolved_method
Symbol* method_name, Symbol* method_signature,
KlassHandle current_klass, bool check_access, TRAPS) {
if (resolved_klass->is_interface() && current_klass() != NULL) {
// If the target class is a direct interface, treat this as a "super"
// default call.
//
// If the current method is an overpass that happens to call a direct
// super-interface's method, then we'll end up rerunning the default method
// analysis even though we don't need to, but that's ok since it will end
// up with the same answer.
InstanceKlass* ik = InstanceKlass::cast(current_klass());
Array<Klass*>* interfaces = ik->local_interfaces();
int num_interfaces = interfaces->length();
for (int index = 0; index < num_interfaces; index++) {
if (interfaces->at(index) == resolved_klass()) {
Method* method = DefaultMethods::find_super_default(current_klass(),
resolved_klass(), method_name, method_signature, CHECK);
resolved_method = methodHandle(THREAD, method);
return;
}
}
}
// Invokespecial is called for multiple special reasons:
// <init>
// local private method invocation, for classes and interfaces
// superclass.method, which can also resolve to a default method
// and the selected method is recalculated relative to the direct superclass
// superinterface.method, which explicitly does not check shadowing
resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
@ -844,6 +853,26 @@ void LinkResolver::linktime_resolve_special_method(methodHandle& resolved_method
resolved_method->signature()));
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
}
if (TraceItables && Verbose) {
ResourceMark rm(THREAD);
tty->print("invokespecial resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
(current_klass.is_null() ? "<NULL>" : current_klass->internal_name()),
(resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
Method::name_and_sig_as_C_string(resolved_klass(),
resolved_method->name(),
resolved_method->signature()),
resolved_method->method_holder()->internal_name()
);
resolved_method->access_flags().print_on(tty);
if (resolved_method->method_holder()->is_interface() &&
!resolved_method->is_abstract()) {
tty->print("default");
}
if (resolved_method->is_overpass()) {
tty->print("overpass");
}
tty->cr();
}
}
// throws runtime exceptions
@ -851,23 +880,24 @@ void LinkResolver::runtime_resolve_special_method(CallInfo& result, methodHandle
KlassHandle current_klass, bool check_access, TRAPS) {
// resolved method is selected method unless we have an old-style lookup
// for a superclass method
// Invokespecial for a superinterface, resolved method is selected method,
// no checks for shadowing
methodHandle sel_method(THREAD, resolved_method());
// check if this is an old-style super call and do a new lookup if so
{ KlassHandle method_klass = KlassHandle(THREAD,
resolved_method->method_holder());
const bool direct_calling_default_method =
resolved_klass() != NULL && resolved_method() != NULL &&
resolved_klass->is_interface() && !resolved_method->is_abstract();
if (!direct_calling_default_method &&
check_access &&
if (check_access &&
// a) check if ACC_SUPER flag is set for the current class
(current_klass->is_super() || !AllowNonVirtualCalls) &&
// b) check if the method class is a superclass of the current class (superclass relation is not reflexive!)
current_klass->is_subtype_of(method_klass()) &&
current_klass() != method_klass() &&
// b) check if the class of the resolved_klass is a superclass
// (not supertype in order to exclude interface classes) of the current class.
// This check is not performed for super.invoke for interface methods
// in super interfaces.
current_klass->is_subclass_of(resolved_klass()) &&
current_klass() != resolved_klass() &&
// c) check if the method is not <init>
resolved_method->name() != vmSymbols::object_initializer_name()) {
// Lookup super method
@ -905,6 +935,23 @@ void LinkResolver::runtime_resolve_special_method(CallInfo& result, methodHandle
sel_method->signature()));
}
if (TraceItables && Verbose) {
ResourceMark rm(THREAD);
tty->print("invokespecial selected method: resolved-class:%s, method:%s, method_holder:%s, access_flags: ",
(resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
Method::name_and_sig_as_C_string(resolved_klass(),
sel_method->name(),
sel_method->signature()),
sel_method->method_holder()->internal_name()
);
sel_method->access_flags().print_on(tty);
if (sel_method->method_holder()->is_interface() &&
!sel_method->is_abstract()) {
tty->print("default");
}
tty->cr();
}
// setup result
result.set_static(resolved_klass, sel_method, CHECK);
}
@ -927,6 +974,18 @@ void LinkResolver::linktime_resolve_virtual_method(methodHandle &resolved_method
assert(resolved_method->name() != vmSymbols::object_initializer_name(), "should have been checked in verifier");
assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier");
// check if private interface method
if (resolved_klass->is_interface() && resolved_method->is_private()) {
ResourceMark rm(THREAD);
char buf[200];
jio_snprintf(buf, sizeof(buf), "private interface method requires invokespecial, not invokevirtual: method %s, caller-class:%s",
Method::name_and_sig_as_C_string(resolved_klass(),
resolved_method->name(),
resolved_method->signature()),
(current_klass.is_null() ? "<NULL>" : current_klass->internal_name()));
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
}
// check if not static
if (resolved_method->is_static()) {
ResourceMark rm(THREAD);
@ -936,6 +995,27 @@ void LinkResolver::linktime_resolve_virtual_method(methodHandle &resolved_method
resolved_method->signature()));
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
}
if (PrintVtables && Verbose) {
ResourceMark rm(THREAD);
tty->print("invokevirtual resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
(current_klass.is_null() ? "<NULL>" : current_klass->internal_name()),
(resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
Method::name_and_sig_as_C_string(resolved_klass(),
resolved_method->name(),
resolved_method->signature()),
resolved_method->method_holder()->internal_name()
);
resolved_method->access_flags().print_on(tty);
if (resolved_method->method_holder()->is_interface() &&
!resolved_method->is_abstract()) {
tty->print("default");
}
if (resolved_method->is_overpass()) {
tty->print("overpass");
}
tty->cr();
}
}
// throws runtime exceptions
@ -1012,6 +1092,27 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
selected_method->signature()));
}
if (PrintVtables && Verbose) {
ResourceMark rm(THREAD);
tty->print("invokevirtual selected method: receiver-class:%s, resolved-class:%s, method:%s, method_holder:%s, vtable_index:%d, access_flags: ",
(recv_klass.is_null() ? "<NULL>" : recv_klass->internal_name()),
(resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
Method::name_and_sig_as_C_string(resolved_klass(),
resolved_method->name(),
resolved_method->signature()),
selected_method->method_holder()->internal_name(),
vtable_index
);
selected_method->access_flags().print_on(tty);
if (selected_method->method_holder()->is_interface() &&
!selected_method->is_abstract()) {
tty->print("default");
}
if (resolved_method->is_overpass()) {
tty->print("overpass");
}
tty->cr();
}
// setup result
result.set_virtual(resolved_klass, recv_klass, resolved_method, selected_method, vtable_index, CHECK);
}
@ -1042,6 +1143,17 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
THROW(vmSymbols::java_lang_NullPointerException());
}
// check if private interface method
if (resolved_klass->is_interface() && resolved_method->is_private()) {
ResourceMark rm(THREAD);
char buf[200];
jio_snprintf(buf, sizeof(buf), "private interface method requires invokespecial, not invokeinterface: method %s",
Method::name_and_sig_as_C_string(resolved_klass(),
resolved_method->name(),
resolved_method->signature()));
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
}
// check if receiver klass implements the resolved interface
if (!recv_klass->is_subtype_of(resolved_klass())) {
ResourceMark rm(THREAD);
@ -1071,28 +1183,15 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
resolved_method->signature()));
}
// check access
if (sel_method->method_holder()->is_interface()) {
// Method holder is an interface. Throw Illegal Access Error if sel_method
// is neither public nor private.
if (!(sel_method->is_public() || sel_method->is_private())) {
ResourceMark rm(THREAD);
THROW_MSG(vmSymbols::java_lang_IllegalAccessError(),
Method::name_and_sig_as_C_string(recv_klass(),
sel_method->name(),
sel_method->signature()));
}
}
else {
// Method holder is a class. Throw Illegal Access Error if sel_method
// is not public.
if (!sel_method->is_public()) {
ResourceMark rm(THREAD);
THROW_MSG(vmSymbols::java_lang_IllegalAccessError(),
Method::name_and_sig_as_C_string(recv_klass(),
sel_method->name(),
sel_method->signature()));
}
// Throw Illegal Access Error if sel_method is not public.
if (!sel_method->is_public()) {
ResourceMark rm(THREAD);
THROW_MSG(vmSymbols::java_lang_IllegalAccessError(),
Method::name_and_sig_as_C_string(recv_klass(),
sel_method->name(),
sel_method->signature()));
}
// check if abstract
if (check_null_and_abstract && sel_method->is_abstract()) {
ResourceMark rm(THREAD);
@ -1109,6 +1208,27 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
return;
}
int itable_index = resolved_method()->itable_index();
if (TraceItables && Verbose) {
ResourceMark rm(THREAD);
tty->print("invokeinterface selected method: receiver-class:%s, resolved-class:%s, method:%s, method_holder:%s, access_flags: ",
(recv_klass.is_null() ? "<NULL>" : recv_klass->internal_name()),
(resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
Method::name_and_sig_as_C_string(resolved_klass(),
resolved_method->name(),
resolved_method->signature()),
sel_method->method_holder()->internal_name()
);
sel_method->access_flags().print_on(tty);
if (sel_method->method_holder()->is_interface() &&
!sel_method->is_abstract()) {
tty->print("default");
}
if (resolved_method->is_overpass()) {
tty->print("overpass");
}
tty->cr();
}
result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
}
@ -1384,7 +1504,7 @@ void LinkResolver::resolve_dynamic_call(CallInfo& result,
THREAD);
if (HAS_PENDING_EXCEPTION) {
if (TraceMethodHandles) {
tty->print_cr("invokedynamic throws BSME for "INTPTR_FORMAT, PENDING_EXCEPTION);
tty->print_cr("invokedynamic throws BSME for "INTPTR_FORMAT, (void *)PENDING_EXCEPTION);
PENDING_EXCEPTION->print();
}
if (PENDING_EXCEPTION->is_a(SystemDictionary::BootstrapMethodError_klass())) {

View File

@ -423,60 +423,6 @@ void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
inline_write_ref_field(field, newVal);
}
/*
Claimed and deferred bits are used together in G1 during the evacuation
pause. These bits can have the following state transitions:
1. The claimed bit can be put over any other card state. Except that
the "dirty -> dirty and claimed" transition is checked for in
G1 code and is not used.
2. Deferred bit can be set only if the previous state of the card
was either clean or claimed. mark_card_deferred() is wait-free.
We do not care if the operation is be successful because if
it does not it will only result in duplicate entry in the update
buffer because of the "cache-miss". So it's not worth spinning.
*/
bool CardTableModRefBS::claim_card(size_t card_index) {
jbyte val = _byte_map[card_index];
assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
while (val == clean_card_val() ||
(val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
jbyte new_val = val;
if (val == clean_card_val()) {
new_val = (jbyte)claimed_card_val();
} else {
new_val = val | (jbyte)claimed_card_val();
}
jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
if (res == val) {
return true;
}
val = res;
}
return false;
}
bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
jbyte val = _byte_map[card_index];
// It's already processed
if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
return false;
}
// Cached bit can be installed either on a clean card or on a claimed card.
jbyte new_val = val;
if (val == clean_card_val()) {
new_val = (jbyte)deferred_card_val();
} else {
if (val & claimed_card_val()) {
new_val = val | (jbyte)deferred_card_val();
}
}
if (new_val != val) {
Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
}
return true;
}
void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
MemRegion mr,

View File

@ -339,34 +339,10 @@ public:
_byte_map[card_index] = dirty_card_val();
}
bool is_card_claimed(size_t card_index) {
jbyte val = _byte_map[card_index];
return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
}
void set_card_claimed(size_t card_index) {
jbyte val = _byte_map[card_index];
if (val == clean_card_val()) {
val = (jbyte)claimed_card_val();
} else {
val |= (jbyte)claimed_card_val();
}
_byte_map[card_index] = val;
}
bool claim_card(size_t card_index);
bool is_card_clean(size_t card_index) {
return _byte_map[card_index] == clean_card_val();
}
bool is_card_deferred(size_t card_index) {
jbyte val = _byte_map[card_index];
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
}
bool mark_card_deferred(size_t card_index);
// Card marking array base (adjusted for heap low boundary)
// This would be the 0th element of _byte_map, if the heap started at 0x0.
// But since the heap starts at some higher address, this points to somewhere

View File

@ -64,19 +64,21 @@ void CollectorPolicy::initialize_flags() {
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
}
if (!is_size_aligned(MaxMetaspaceSize, max_alignment())) {
FLAG_SET_ERGO(uintx, MaxMetaspaceSize,
restricted_align_down(MaxMetaspaceSize, max_alignment()));
}
// Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
// override if MaxMetaspaceSize was set on the command line or not.
// This information is needed later to conform to the specification of the
// java.lang.management.MemoryUsage API.
//
// Ideally, we would be able to set the default value of MaxMetaspaceSize in
// globals.hpp to the aligned value, but this is not possible, since the
// alignment depends on other flags being parsed.
MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, max_alignment());
if (MetaspaceSize > MaxMetaspaceSize) {
FLAG_SET_ERGO(uintx, MetaspaceSize, MaxMetaspaceSize);
MetaspaceSize = MaxMetaspaceSize;
}
if (!is_size_aligned(MetaspaceSize, min_alignment())) {
FLAG_SET_ERGO(uintx, MetaspaceSize,
restricted_align_down(MetaspaceSize, min_alignment()));
}
MetaspaceSize = restricted_align_down(MetaspaceSize, min_alignment());
assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
@ -135,15 +137,8 @@ bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
int max_covered_regions) {
switch (rem_set_name()) {
case GenRemSet::CardTable: {
CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions);
return res;
}
default:
guarantee(false, "unrecognized GenRemSet::Name");
return NULL;
}
assert(rem_set_name() == GenRemSet::CardTable, "unrecognized GenRemSet::Name");
return new CardTableRS(whole_heap, max_covered_regions);
}
void CollectorPolicy::cleared_all_soft_refs() {

View File

@ -32,13 +32,8 @@
// enumeration.)
uintx GenRemSet::max_alignment_constraint(Name nm) {
switch (nm) {
case GenRemSet::CardTable:
return CardTableRS::ct_max_alignment_constraint();
default:
guarantee(false, "Unrecognized GenRemSet type.");
return (0); // Make Windows compiler happy
}
assert(nm == GenRemSet::CardTable, "Unrecognized GenRemSet type.");
return CardTableRS::ct_max_alignment_constraint();
}
class HasAccumulatedModifiedOopsClosure : public KlassClosure {

View File

@ -150,11 +150,11 @@ public:
HEAP_INSPECTION_COLUMNS_DO(DECLARE_KLASS_SIZE_STATS_FIELD)
static int count(oop x) {
return (HeapWordSize * ((x) ? (x)->size() : 0));
return (HeapWordSize * (((x) != NULL) ? (x)->size() : 0));
}
static int count_array(objArrayOop x) {
return (HeapWordSize * ((x) ? (x)->size() : 0));
return (HeapWordSize * (((x) != NULL) ? (x)->size() : 0));
}
template <class T> static int count(T* x) {

View File

@ -3104,7 +3104,7 @@ size_t Metaspace::align_word_size_up(size_t word_size) {
MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
// DumpSharedSpaces doesn't use class metadata area (yet)
// Also, don't use class_vsm() unless UseCompressedClassPointers is true.
if (mdtype == ClassType && using_class_space()) {
if (is_class_space_allocation(mdtype)) {
return class_vsm()->allocate(word_size);
} else {
return vsm()->allocate(word_size);
@ -3252,8 +3252,8 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
MetaspaceAux::dump(gclog_or_tty);
}
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
const char* space_string = (mdtype == ClassType) ? "Compressed class space" :
"Metadata space";
const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
"Metadata space";
report_java_out_of_memory(space_string);
if (JvmtiExport::should_post_resource_exhausted()) {
@ -3261,7 +3261,7 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
space_string);
}
if (mdtype == ClassType) {
if (is_class_space_allocation(mdtype)) {
THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
} else {
THROW_OOP_0(Universe::out_of_memory_error_metaspace());

View File

@ -235,6 +235,9 @@ class Metaspace : public CHeapObj<mtClass> {
return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces);
}
static bool is_class_space_allocation(MetadataType mdType) {
return mdType == ClassType && using_class_space();
}
};
class MetaspaceAux : AllStatic {

View File

@ -367,7 +367,7 @@ void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
next_d = java_lang_ref_Reference::discovered(obj);
if (TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
obj, next_d);
(void *)obj, (void *)next_d);
}
assert(java_lang_ref_Reference::next(obj) == NULL,
"Reference not active; should not be discovered");
@ -392,7 +392,7 @@ void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
next_d = java_lang_ref_Reference::discovered(obj);
if (TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
obj, next_d);
(void *)obj, (void *)next_d);
}
assert(java_lang_ref_Reference::next(obj) == NULL,
"The reference should not be enqueued");
@ -562,7 +562,7 @@ ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
!policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
iter.obj(), iter.obj()->klass()->internal_name());
(void *)iter.obj(), iter.obj()->klass()->internal_name());
}
// Remove Reference object from list
iter.remove();
@ -601,7 +601,7 @@ ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
if (iter.is_referent_alive()) {
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
iter.obj(), iter.obj()->klass()->internal_name());
(void *)iter.obj(), iter.obj()->klass()->internal_name());
}
// The referent is reachable after all.
// Remove Reference object from list.
@ -687,7 +687,7 @@ ReferenceProcessor::process_phase3(DiscoveredList& refs_list,
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
clear_referent ? "cleared " : "",
iter.obj(), iter.obj()->klass()->internal_name());
(void *)iter.obj(), iter.obj()->klass()->internal_name());
}
assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
iter.next();
@ -1003,7 +1003,7 @@ void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list)
gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
INTPTR_FORMAT " with next field: " INTPTR_FORMAT
" and referent: " INTPTR_FORMAT,
iter.obj(), next, iter.referent());
(void *)iter.obj(), (void *)next, (void *)iter.referent());
}
)
// Remove Reference object from list
@ -1103,14 +1103,14 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
obj, obj->klass()->internal_name());
(void *)obj, obj->klass()->internal_name());
}
} else {
// If retest was non NULL, another thread beat us to it:
// The reference has already been discovered...
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
obj, obj->klass()->internal_name());
(void *)obj, obj->klass()->internal_name());
}
}
}
@ -1125,7 +1125,7 @@ void ReferenceProcessor::verify_referent(oop obj) {
assert(da ? referent->is_oop() : referent->is_oop_or_null(),
err_msg("Bad referent " INTPTR_FORMAT " found in Reference "
INTPTR_FORMAT " during %satomic discovery ",
(intptr_t)referent, (intptr_t)obj, da ? "" : "non-"));
(void *)referent, (void *)obj, da ? "" : "non-"));
}
#endif
@ -1205,7 +1205,7 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
// The reference has already been discovered...
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
obj, obj->klass()->internal_name());
(void *)obj, obj->klass()->internal_name());
}
if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
// assumes that an object is not processed twice;
@ -1273,7 +1273,7 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)",
obj, obj->klass()->internal_name());
(void *)obj, obj->klass()->internal_name());
}
}
assert(obj->is_oop(), "Discovered a bad reference");
@ -1372,7 +1372,7 @@ ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
// active; we need to trace and mark its cohort.
if (TraceReferenceGC) {
gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
iter.obj(), iter.obj()->klass()->internal_name());
(void *)iter.obj(), iter.obj()->klass()->internal_name());
}
// Remove Reference object from list
iter.remove();

View File

@ -1918,7 +1918,7 @@ void ConstantPool::print_on(outputStream* st) const {
st->print_cr(" - holder: " INTPTR_FORMAT, pool_holder());
}
st->print_cr(" - cache: " INTPTR_FORMAT, cache());
st->print_cr(" - resolved_references: " INTPTR_FORMAT, resolved_references());
st->print_cr(" - resolved_references: " INTPTR_FORMAT, (void *)resolved_references());
st->print_cr(" - reference_map: " INTPTR_FORMAT, reference_map());
for (int index = 1; index < length(); index++) { // Index 0 is unused

View File

@ -306,8 +306,8 @@ void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool,
if (TraceInvokeDynamic) {
tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ",
invoke_code,
(intptr_t)appendix(), (has_appendix ? "" : " (unused)"),
(intptr_t)method_type(), (has_method_type ? "" : " (unused)"),
(void *)appendix(), (has_appendix ? "" : " (unused)"),
(void *)method_type(), (has_method_type ? "" : " (unused)"),
(intptr_t)adapter());
adapter->print();
if (has_appendix) appendix()->print();

View File

@ -106,7 +106,7 @@ HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
len = name->utf8_length(); \
} \
HS_DTRACE_PROBE4(hotspot, class__initialization__##type, \
data, len, (clss)->class_loader(), thread_type); \
data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type); \
}
#define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
@ -119,7 +119,7 @@ HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
len = name->utf8_length(); \
} \
HS_DTRACE_PROBE5(hotspot, class__initialization__##type, \
data, len, (clss)->class_loader(), thread_type, wait); \
data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type, wait); \
}
#else /* USDT2 */
@ -1419,6 +1419,8 @@ Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) c
}
// lookup a method in all the interfaces that this class implements
// Do NOT return private or static methods, new in JDK8 which are not externally visible
// They should only be found in the initial InterfaceMethodRef
Method* InstanceKlass::lookup_method_in_all_interfaces(Symbol* name,
Symbol* signature) const {
Array<Klass*>* all_ifs = transitive_interfaces();
@ -1427,7 +1429,7 @@ Method* InstanceKlass::lookup_method_in_all_interfaces(Symbol* name,
for (int i = 0; i < num_ifs; i++) {
ik = InstanceKlass::cast(all_ifs->at(i));
Method* m = ik->lookup_method(name, signature);
if (m != NULL) {
if (m != NULL && m->is_public() && !m->is_static()) {
return m;
}
}
@ -2303,7 +2305,7 @@ void InstanceKlass::set_source_debug_extension(char* array, int length) {
}
address InstanceKlass::static_field_addr(int offset) {
return (address)(offset + InstanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror());
return (address)(offset + InstanceMirrorKlass::offset_of_static_fields() + cast_from_oop<intptr_t>(java_mirror()));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,7 +66,7 @@ class InstanceMirrorKlass: public InstanceKlass {
// Static field offset is an offset into the Heap, should be converted by
// based on UseCompressedOop for traversal
static HeapWord* start_of_static_fields(oop obj) {
return (HeapWord*)((intptr_t)obj + offset_of_static_fields());
return (HeapWord*)(cast_from_oop<intptr_t>(obj) + offset_of_static_fields());
}
static void init_offset_of_static_fields() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,7 +51,7 @@ void specialized_oop_follow_contents(InstanceRefKlass* ref, oop obj) {
T heap_oop = oopDesc::load_heap_oop(referent_addr);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (void *)obj);
}
)
if (!oopDesc::is_null(heap_oop)) {
@ -62,7 +62,7 @@ void specialized_oop_follow_contents(InstanceRefKlass* ref, oop obj) {
ref->InstanceKlass::oop_follow_contents(obj);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, obj);
gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, (void *)obj);
}
)
return;
@ -70,7 +70,7 @@ void specialized_oop_follow_contents(InstanceRefKlass* ref, oop obj) {
// treat referent as normal oop
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, obj);
gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, (void *)obj);
}
)
MarkSweep::mark_and_push(referent_addr);
@ -130,7 +130,7 @@ void specialized_oop_follow_contents(InstanceRefKlass* ref,
T heap_oop = oopDesc::load_heap_oop(referent_addr);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (void *)obj);
}
)
if (!oopDesc::is_null(heap_oop)) {
@ -142,7 +142,7 @@ void specialized_oop_follow_contents(InstanceRefKlass* ref,
ref->InstanceKlass::oop_follow_contents(cm, obj);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, obj);
gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, (void *)obj);
}
)
return;
@ -150,7 +150,7 @@ void specialized_oop_follow_contents(InstanceRefKlass* ref,
// treat referent as normal oop
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, obj);
gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, (void *)obj);
}
)
PSParallelCompact::mark_and_push(cm, referent_addr);

View File

@ -292,9 +292,10 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
return allocate_new;
}
// private methods always have a new entry in the vtable
// private methods in classes always have a new entry in the vtable
// specification interpretation since classic has
// private methods not overriding
// JDK8 adds private methods in interfaces which require invokespecial
if (target_method()->is_private()) {
return allocate_new;
}
@ -442,9 +443,10 @@ bool klassVtable::needs_new_vtable_entry(methodHandle target_method,
return true;
}
// private methods always have a new entry in the vtable
// private methods in classes always have a new entry in the vtable
// specification interpretation since classic has
// private methods not overriding
// JDK8 adds private methods in interfaces which require invokespecial
if (target_method()->is_private()) {
return true;
}
@ -520,7 +522,7 @@ bool klassVtable::is_miranda_entry_at(int i) {
Klass* method_holder = m->method_holder();
InstanceKlass *mhk = InstanceKlass::cast(method_holder);
// miranda methods are interface methods in a class's vtable
// miranda methods are public abstract instance interface methods in a class's vtable
if (mhk->is_interface()) {
assert(m->is_public(), "should be public");
assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
@ -534,6 +536,8 @@ bool klassVtable::is_miranda_entry_at(int i) {
// "miranda" means not static, not defined by this class, and not defined
// in super unless it is private and therefore inaccessible to this class.
// the caller must make sure that the method belongs to an interface implemented by the class
// Miranda methods only include public interface instance methods
// Not private methods, not static methods, not default = concrete abstract
bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods, Klass* super) {
if (m->is_static()) {
return false;

View File

@ -901,16 +901,6 @@ address Method::make_adapters(methodHandle mh, TRAPS) {
// This function must not hit a safepoint!
address Method::verified_code_entry() {
debug_only(No_Safepoint_Verifier nsv;)
nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
if (code == NULL && UseCodeCacheFlushing) {
nmethod *saved_code = CodeCache::reanimate_saved_code(this);
if (saved_code != NULL) {
methodHandle method(this);
assert( ! saved_code->is_osr_method(), "should not get here for osr" );
set_code( method, saved_code );
}
}
assert(_from_compiled_entry != NULL, "must be set");
return _from_compiled_entry;
}

View File

@ -333,10 +333,10 @@ protected:
return (int)data()->cell_at(index);
}
void set_oop_at(int index, oop value) {
set_intptr_at(index, (intptr_t) value);
set_intptr_at(index, cast_from_oop<intptr_t>(value));
}
oop oop_at(int index) {
return (oop)intptr_at(index);
return cast_to_oop(intptr_at(index));
}
void set_flag_at(int flag_number) {

View File

@ -183,7 +183,7 @@ inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
// in inner GC loops so these are separated.
inline bool check_obj_alignment(oop obj) {
return (intptr_t)obj % MinObjAlignmentInBytes == 0;
return cast_from_oop<intptr_t>(obj) % MinObjAlignmentInBytes == 0;
}
inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {

View File

@ -55,11 +55,16 @@ typedef class typeArrayOopDesc* typeArrayOop;
// to and from the underlying oopDesc pointer type.
//
// Because oop and its subclasses <type>Oop are class types, arbitrary
// conversions are not accepted by the compiler, and you may get a message
// about overloading ambiguity (between long and int is common when converting
// from a constant in 64 bit mode), or unable to convert from type to 'oop'.
// Applying a cast to one of these conversion operators first will get to the
// underlying oopDesc* type if appropriate.
// conversions are not accepted by the compiler. Applying a cast to
// an oop will cause the best matched conversion operator to be
// invoked returning the underlying oopDesc* type if appropriate.
// No copy constructors, explicit user conversions or operators of
// numerical type should be defined within the oop class. Most C++
// compilers will issue a compile time error concerning the overloading
// ambiguity between operators of numerical and pointer types. If
// a conversion to or from an oop to a numerical type is needed,
// use the inline template methods, cast_*_oop, defined below.
//
// Converting NULL to oop to Handle implicit is no longer accepted by the
// compiler because there are too many steps in the conversion. Use Handle()
// instead, which generates less code anyway.
@ -83,12 +88,9 @@ public:
void raw_set_obj(const void* p) { _o = (oopDesc*)p; }
oop() { set_obj(NULL); }
oop(const oop& o) { set_obj(o.obj()); }
oop(const volatile oop& o) { set_obj(o.obj()); }
oop(const void* p) { set_obj(p); }
oop(intptr_t i) { set_obj((void *)i); }
#ifdef _LP64
oop(int i) { set_obj((void *)i); }
#endif
~oop() {
if (CheckUnhandledOops) unregister_oop();
}
@ -101,8 +103,6 @@ public:
bool operator==(void *p) const { return obj() == p; }
bool operator!=(const volatile oop o) const { return obj() != o.obj(); }
bool operator!=(void *p) const { return obj() != p; }
bool operator==(intptr_t p) const { return obj() == (oopDesc*)p; }
bool operator!=(intptr_t p) const { return obj() != (oopDesc*)p; }
bool operator<(oop o) const { return obj() < o.obj(); }
bool operator>(oop o) const { return obj() > o.obj(); }
@ -110,8 +110,18 @@ public:
bool operator>=(oop o) const { return obj() >= o.obj(); }
bool operator!() const { return !obj(); }
// Cast
// Assignment
oop& operator=(const oop& o) { _o = o.obj(); return *this; }
#ifndef SOLARIS
volatile oop& operator=(const oop& o) volatile { _o = o.obj(); return *this; }
#endif
volatile oop& operator=(const volatile oop& o) volatile { _o = o.obj(); return *this; }
// Explict user conversions
operator void* () const { return (void *)obj(); }
#ifndef SOLARIS
operator void* () const volatile { return (void *)obj(); }
#endif
operator HeapWord* () const { return (HeapWord*)obj(); }
operator oopDesc* () const { return obj(); }
operator intptr_t* () const { return (intptr_t*)obj(); }
@ -119,7 +129,6 @@ public:
operator markOop () const { return markOop(obj()); }
operator address () const { return (address)obj(); }
operator intptr_t () const volatile { return (intptr_t)obj(); }
// from javaCalls.cpp
operator jobject () const { return (jobject)obj(); }
@ -141,12 +150,26 @@ public:
class type##Oop : public oop { \
public: \
type##Oop() : oop() {} \
type##Oop(const oop& o) : oop(o) {} \
type##Oop(const volatile oop& o) : oop(o) {} \
type##Oop(const void* p) : oop(p) {} \
operator type##OopDesc* () const { return (type##OopDesc*)obj(); } \
type##OopDesc* operator->() const { \
return (type##OopDesc*)obj(); \
} \
type##Oop& operator=(const type##Oop& o) { \
oop::operator=(o); \
return *this; \
} \
NOT_SOLARIS( \
volatile type##Oop& operator=(const type##Oop& o) volatile { \
(void)const_cast<oop&>(oop::operator=(o)); \
return *this; \
}) \
volatile type##Oop& operator=(const volatile type##Oop& o) volatile {\
(void)const_cast<oop&>(oop::operator=(o)); \
return *this; \
} \
};
DEF_OOP(instance);
@ -156,6 +179,16 @@ DEF_OOP(typeArray);
#endif // CHECK_UNHANDLED_OOPS
// For CHECK_UNHANDLED_OOPS, it is ambiguous C++ behavior to have the oop
// structure contain explicit user defined conversions of both numerical
// and pointer type. Define inline methods to provide the numerical conversions.
template <class T> inline oop cast_to_oop(T value) {
return (oop)(CHECK_UNHANDLED_OOPS_ONLY((void *))(value));
}
template <class T> inline T cast_from_oop(oop o) {
return (T)(CHECK_UNHANDLED_OOPS_ONLY((void*))o);
}
// The metadata hierarchy is separate from the oop hierarchy
// class MetaspaceObj

Some files were not shown because too many files have changed in this diff Show More