Merge
This commit is contained in:
commit
776ef6a071
4
.hgtags
4
.hgtags
@ -526,3 +526,7 @@ dc1f9dec2018a37fedba47d8a2aedef99faaec64 jdk-12+19
|
||||
f8fb0c86f2b3d24294d39c5685a628e1beb14ba7 jdk-12+21
|
||||
732bec44c89e8b93a38296bf690f97b7230c5b6d jdk-12+22
|
||||
eef755718cb24813031a842bbfc716a6cea18e9a jdk-12+23
|
||||
7d4397b43fa305806160785a4c7210600d59581a jdk-12+24
|
||||
7496df94b3b79f3da53925d2d137317715f11d97 jdk-12+25
|
||||
de9fd809bb475401aad188eab2264226788aad81 jdk-12+26
|
||||
f15d443f97318e9b40e6f451e327ff69ed4ec361 jdk-12+27
|
||||
|
@ -517,7 +517,7 @@ $(foreach m, $(ALL_MODULES), \
|
||||
) \
|
||||
)
|
||||
|
||||
ifneq ($(PANDOC), )
|
||||
ifeq ($(ENABLE_PANDOC), true)
|
||||
# For all markdown files in $module/share/specs directories, convert them to
|
||||
# html, if we have pandoc (otherwise we'll just skip this).
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -237,7 +237,8 @@ else ifeq ($(OPENJDK_TARGET_OS), solaris)
|
||||
else ifeq ($(OPENJDK_TARGET_OS), windows)
|
||||
NUM_CORES := $(NUMBER_OF_PROCESSORS)
|
||||
MEMORY_SIZE := $(shell \
|
||||
$(EXPR) `wmic computersystem get totalphysicalmemory -value | $(GREP) = \
|
||||
$(EXPR) `wmic computersystem get totalphysicalmemory -value \
|
||||
| $(GREP) = | $(SED) 's/\\r//g' \
|
||||
| $(CUT) -d "=" -f 2-` / 1024 / 1024 \
|
||||
)
|
||||
endif
|
||||
|
@ -610,7 +610,14 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
|
||||
BASIC_PATH_PROGS(DF, df)
|
||||
BASIC_PATH_PROGS(CPIO, [cpio bsdcpio])
|
||||
BASIC_PATH_PROGS(NICE, nice)
|
||||
|
||||
BASIC_PATH_PROGS(PANDOC, pandoc)
|
||||
if test -n "$PANDOC"; then
|
||||
ENABLE_PANDOC="true"
|
||||
else
|
||||
ENABLE_PANDOC="false"
|
||||
fi
|
||||
AC_SUBST(ENABLE_PANDOC)
|
||||
])
|
||||
|
||||
###############################################################################
|
||||
|
@ -183,7 +183,8 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS],
|
||||
|
||||
# Additional warnings that are not activated by -Wall and -Wextra
|
||||
WARNINGS_ENABLE_ADDITIONAL="-Wpointer-arith -Wsign-compare \
|
||||
-Wunused-function -Wundef -Wunused-value -Wreturn-type"
|
||||
-Wunused-function -Wundef -Wunused-value -Wreturn-type \
|
||||
-Wtrampolines"
|
||||
WARNINGS_ENABLE_ADDITIONAL_CXX="-Woverloaded-virtual -Wreorder"
|
||||
WARNINGS_ENABLE_ALL_CFLAGS="-Wall -Wextra -Wformat=2 $WARNINGS_ENABLE_ADDITIONAL"
|
||||
WARNINGS_ENABLE_ALL_CXXFLAGS="$WARNINGS_ENABLE_ALL_CFLAGS $WARNINGS_ENABLE_ADDITIONAL_CXX"
|
||||
|
@ -761,6 +761,7 @@ MSVCR_DLL:=@MSVCR_DLL@
|
||||
MSVCP_DLL:=@MSVCP_DLL@
|
||||
UCRT_DLL_DIR:=@UCRT_DLL_DIR@
|
||||
STLPORT_LIB:=@STLPORT_LIB@
|
||||
ENABLE_PANDOC:=@ENABLE_PANDOC@
|
||||
|
||||
####################################################
|
||||
#
|
||||
|
@ -203,7 +203,7 @@ ifeq ($(OPENJDK_TARGET_OS_TYPE), unix)
|
||||
|
||||
ifneq ($(MAN_FILES_MD), )
|
||||
# If we got markdown files, ignore the troff files
|
||||
ifeq ($(PANDOC), )
|
||||
ifeq ($(ENABLE_PANDOC), false)
|
||||
$(info Warning: pandoc not found. Not generating man pages)
|
||||
else
|
||||
# Create dynamic man pages from markdown using pandoc. We need
|
||||
|
@ -86,7 +86,7 @@ function MetaInlines(value) {
|
||||
function change_title(type, value) {
|
||||
if (type === 'MetaInlines') {
|
||||
if (value[0].t === 'Str') {
|
||||
var match = value[0].c.match(/^([A-Z]+)\([0-9]+\)$/);
|
||||
var match = value[0].c.match(/^([A-Z0-9]+)\([0-9]+\)$/);
|
||||
if (match) {
|
||||
return MetaInlines([
|
||||
Str("The"), Space(),
|
||||
|
@ -2133,7 +2133,12 @@ const uint Matcher::vector_ideal_reg(int len) {
|
||||
}
|
||||
|
||||
const uint Matcher::vector_shift_count_ideal_reg(int size) {
|
||||
return Op_VecX;
|
||||
switch(size) {
|
||||
case 8: return Op_VecD;
|
||||
case 16: return Op_VecX;
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
}
|
||||
|
||||
// AES support not yet implemented
|
||||
@ -16524,32 +16529,32 @@ instruct vxor16B(vecX dst, vecX src1, vecX src2)
|
||||
%}
|
||||
|
||||
// ------------------------------ Shift ---------------------------------------
|
||||
|
||||
instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{
|
||||
instruct vshiftcnt8B(vecD dst, iRegIorL2I cnt) %{
|
||||
predicate(n->as_Vector()->length_in_bytes() == 8);
|
||||
match(Set dst (LShiftCntV cnt));
|
||||
format %{ "dup $dst, $cnt\t# shift count (vecX)" %}
|
||||
ins_encode %{
|
||||
__ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
|
||||
%}
|
||||
ins_pipe(vdup_reg_reg128);
|
||||
%}
|
||||
|
||||
// Right shifts on aarch64 SIMD are implemented as left shift by -ve amount
|
||||
instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{
|
||||
match(Set dst (RShiftCntV cnt));
|
||||
format %{ "dup $dst, $cnt\t# shift count (vecX)\n\tneg $dst, $dst\t T16B" %}
|
||||
format %{ "dup $dst, $cnt\t# shift count vector (8B)" %}
|
||||
ins_encode %{
|
||||
__ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($cnt$$reg));
|
||||
%}
|
||||
ins_pipe(vdup_reg_reg64);
|
||||
%}
|
||||
|
||||
instruct vshiftcnt16B(vecX dst, iRegIorL2I cnt) %{
|
||||
predicate(n->as_Vector()->length_in_bytes() == 16);
|
||||
match(Set dst (LShiftCntV cnt));
|
||||
match(Set dst (RShiftCntV cnt));
|
||||
format %{ "dup $dst, $cnt\t# shift count vector (16B)" %}
|
||||
ins_encode %{
|
||||
__ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg));
|
||||
__ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg));
|
||||
%}
|
||||
ins_pipe(vdup_reg_reg128);
|
||||
%}
|
||||
|
||||
instruct vsll8B(vecD dst, vecD src, vecX shift) %{
|
||||
instruct vsll8B(vecD dst, vecD src, vecD shift) %{
|
||||
predicate(n->as_Vector()->length() == 4 ||
|
||||
n->as_Vector()->length() == 8);
|
||||
match(Set dst (LShiftVB src shift));
|
||||
match(Set dst (RShiftVB src shift));
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "sshl $dst,$src,$shift\t# vector (8B)" %}
|
||||
ins_encode %{
|
||||
@ -16563,7 +16568,6 @@ instruct vsll8B(vecD dst, vecD src, vecX shift) %{
|
||||
instruct vsll16B(vecX dst, vecX src, vecX shift) %{
|
||||
predicate(n->as_Vector()->length() == 16);
|
||||
match(Set dst (LShiftVB src shift));
|
||||
match(Set dst (RShiftVB src shift));
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "sshl $dst,$src,$shift\t# vector (16B)" %}
|
||||
ins_encode %{
|
||||
@ -16574,29 +16578,93 @@ instruct vsll16B(vecX dst, vecX src, vecX shift) %{
|
||||
ins_pipe(vshift128);
|
||||
%}
|
||||
|
||||
instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
|
||||
// Right shifts with vector shift count on aarch64 SIMD are implemented
|
||||
// as left shift by negative shift count.
|
||||
// There are two cases for vector shift count.
|
||||
//
|
||||
// Case 1: The vector shift count is from replication.
|
||||
// | |
|
||||
// LoadVector RShiftCntV
|
||||
// | /
|
||||
// RShiftVI
|
||||
// Note: In inner loop, multiple neg instructions are used, which can be
|
||||
// moved to outer loop and merge into one neg instruction.
|
||||
//
|
||||
// Case 2: The vector shift count is from loading.
|
||||
// This case isn't supported by middle-end now. But it's supported by
|
||||
// panama/vectorIntrinsics(JEP 338: Vector API).
|
||||
// | |
|
||||
// LoadVector LoadVector
|
||||
// | /
|
||||
// RShiftVI
|
||||
//
|
||||
|
||||
instruct vsra8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
|
||||
predicate(n->as_Vector()->length() == 4 ||
|
||||
n->as_Vector()->length() == 8);
|
||||
match(Set dst (URShiftVB src shift));
|
||||
match(Set dst (RShiftVB src shift));
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ushl $dst,$src,$shift\t# vector (8B)" %}
|
||||
effect(TEMP tmp);
|
||||
format %{ "negr $tmp,$shift\t"
|
||||
"sshl $dst,$src,$tmp\t# vector (8B)" %}
|
||||
ins_encode %{
|
||||
__ ushl(as_FloatRegister($dst$$reg), __ T8B,
|
||||
as_FloatRegister($src$$reg),
|
||||
__ negr(as_FloatRegister($tmp$$reg), __ T8B,
|
||||
as_FloatRegister($shift$$reg));
|
||||
__ sshl(as_FloatRegister($dst$$reg), __ T8B,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(vshift64);
|
||||
%}
|
||||
|
||||
instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
|
||||
instruct vsra16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
|
||||
predicate(n->as_Vector()->length() == 16);
|
||||
match(Set dst (RShiftVB src shift));
|
||||
ins_cost(INSN_COST);
|
||||
effect(TEMP tmp);
|
||||
format %{ "negr $tmp,$shift\t"
|
||||
"sshl $dst,$src,$tmp\t# vector (16B)" %}
|
||||
ins_encode %{
|
||||
__ negr(as_FloatRegister($tmp$$reg), __ T16B,
|
||||
as_FloatRegister($shift$$reg));
|
||||
__ sshl(as_FloatRegister($dst$$reg), __ T16B,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(vshift128);
|
||||
%}
|
||||
|
||||
instruct vsrl8B(vecD dst, vecD src, vecD shift, vecD tmp) %{
|
||||
predicate(n->as_Vector()->length() == 4 ||
|
||||
n->as_Vector()->length() == 8);
|
||||
match(Set dst (URShiftVB src shift));
|
||||
ins_cost(INSN_COST);
|
||||
effect(TEMP tmp);
|
||||
format %{ "negr $tmp,$shift\t"
|
||||
"ushl $dst,$src,$tmp\t# vector (8B)" %}
|
||||
ins_encode %{
|
||||
__ negr(as_FloatRegister($tmp$$reg), __ T8B,
|
||||
as_FloatRegister($shift$$reg));
|
||||
__ ushl(as_FloatRegister($dst$$reg), __ T8B,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(vshift64);
|
||||
%}
|
||||
|
||||
instruct vsrl16B(vecX dst, vecX src, vecX shift, vecX tmp) %{
|
||||
predicate(n->as_Vector()->length() == 16);
|
||||
match(Set dst (URShiftVB src shift));
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ushl $dst,$src,$shift\t# vector (16B)" %}
|
||||
effect(TEMP tmp);
|
||||
format %{ "negr $tmp,$shift\t"
|
||||
"ushl $dst,$src,$tmp\t# vector (16B)" %}
|
||||
ins_encode %{
|
||||
__ negr(as_FloatRegister($tmp$$reg), __ T16B,
|
||||
as_FloatRegister($shift$$reg));
|
||||
__ ushl(as_FloatRegister($dst$$reg), __ T16B,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($shift$$reg));
|
||||
as_FloatRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(vshift128);
|
||||
%}
|
||||
@ -16708,11 +16776,10 @@ instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
|
||||
ins_pipe(vshift128_imm);
|
||||
%}
|
||||
|
||||
instruct vsll4S(vecD dst, vecD src, vecX shift) %{
|
||||
instruct vsll4S(vecD dst, vecD src, vecD shift) %{
|
||||
predicate(n->as_Vector()->length() == 2 ||
|
||||
n->as_Vector()->length() == 4);
|
||||
match(Set dst (LShiftVS src shift));
|
||||
match(Set dst (RShiftVS src shift));
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "sshl $dst,$src,$shift\t# vector (4H)" %}
|
||||
ins_encode %{
|
||||
@ -16726,7 +16793,6 @@ instruct vsll4S(vecD dst, vecD src, vecX shift) %{
|
||||
instruct vsll8S(vecX dst, vecX src, vecX shift) %{
|
||||
predicate(n->as_Vector()->length() == 8);
|
||||
match(Set dst (LShiftVS src shift));
|
||||
match(Set dst (RShiftVS src shift));
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "sshl $dst,$src,$shift\t# vector (8H)" %}
|
||||
ins_encode %{
|
||||
@ -16737,29 +16803,72 @@ instruct vsll8S(vecX dst, vecX src, vecX shift) %{
|
||||
ins_pipe(vshift128);
|
||||
%}
|
||||
|
||||
instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
|
||||
instruct vsra4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
|
||||
predicate(n->as_Vector()->length() == 2 ||
|
||||
n->as_Vector()->length() == 4);
|
||||
match(Set dst (URShiftVS src shift));
|
||||
match(Set dst (RShiftVS src shift));
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ushl $dst,$src,$shift\t# vector (4H)" %}
|
||||
effect(TEMP tmp);
|
||||
format %{ "negr $tmp,$shift\t"
|
||||
"sshl $dst,$src,$tmp\t# vector (4H)" %}
|
||||
ins_encode %{
|
||||
__ ushl(as_FloatRegister($dst$$reg), __ T4H,
|
||||
as_FloatRegister($src$$reg),
|
||||
__ negr(as_FloatRegister($tmp$$reg), __ T8B,
|
||||
as_FloatRegister($shift$$reg));
|
||||
__ sshl(as_FloatRegister($dst$$reg), __ T4H,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(vshift64);
|
||||
%}
|
||||
|
||||
instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
|
||||
instruct vsra8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
|
||||
predicate(n->as_Vector()->length() == 8);
|
||||
match(Set dst (RShiftVS src shift));
|
||||
ins_cost(INSN_COST);
|
||||
effect(TEMP tmp);
|
||||
format %{ "negr $tmp,$shift\t"
|
||||
"sshl $dst,$src,$tmp\t# vector (8H)" %}
|
||||
ins_encode %{
|
||||
__ negr(as_FloatRegister($tmp$$reg), __ T16B,
|
||||
as_FloatRegister($shift$$reg));
|
||||
__ sshl(as_FloatRegister($dst$$reg), __ T8H,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(vshift128);
|
||||
%}
|
||||
|
||||
instruct vsrl4S(vecD dst, vecD src, vecD shift, vecD tmp) %{
|
||||
predicate(n->as_Vector()->length() == 2 ||
|
||||
n->as_Vector()->length() == 4);
|
||||
match(Set dst (URShiftVS src shift));
|
||||
ins_cost(INSN_COST);
|
||||
effect(TEMP tmp);
|
||||
format %{ "negr $tmp,$shift\t"
|
||||
"ushl $dst,$src,$tmp\t# vector (4H)" %}
|
||||
ins_encode %{
|
||||
__ negr(as_FloatRegister($tmp$$reg), __ T8B,
|
||||
as_FloatRegister($shift$$reg));
|
||||
__ ushl(as_FloatRegister($dst$$reg), __ T4H,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(vshift64);
|
||||
%}
|
||||
|
||||
instruct vsrl8S(vecX dst, vecX src, vecX shift, vecX tmp) %{
|
||||
predicate(n->as_Vector()->length() == 8);
|
||||
match(Set dst (URShiftVS src shift));
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ushl $dst,$src,$shift\t# vector (8H)" %}
|
||||
effect(TEMP tmp);
|
||||
format %{ "negr $tmp,$shift\t"
|
||||
"ushl $dst,$src,$tmp\t# vector (8H)" %}
|
||||
ins_encode %{
|
||||
__ negr(as_FloatRegister($tmp$$reg), __ T16B,
|
||||
as_FloatRegister($shift$$reg));
|
||||
__ ushl(as_FloatRegister($dst$$reg), __ T8H,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($shift$$reg));
|
||||
as_FloatRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(vshift128);
|
||||
%}
|
||||
@ -16871,10 +16980,9 @@ instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
|
||||
ins_pipe(vshift128_imm);
|
||||
%}
|
||||
|
||||
instruct vsll2I(vecD dst, vecD src, vecX shift) %{
|
||||
instruct vsll2I(vecD dst, vecD src, vecD shift) %{
|
||||
predicate(n->as_Vector()->length() == 2);
|
||||
match(Set dst (LShiftVI src shift));
|
||||
match(Set dst (RShiftVI src shift));
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "sshl $dst,$src,$shift\t# vector (2S)" %}
|
||||
ins_encode %{
|
||||
@ -16888,7 +16996,6 @@ instruct vsll2I(vecD dst, vecD src, vecX shift) %{
|
||||
instruct vsll4I(vecX dst, vecX src, vecX shift) %{
|
||||
predicate(n->as_Vector()->length() == 4);
|
||||
match(Set dst (LShiftVI src shift));
|
||||
match(Set dst (RShiftVI src shift));
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "sshl $dst,$src,$shift\t# vector (4S)" %}
|
||||
ins_encode %{
|
||||
@ -16899,28 +17006,70 @@ instruct vsll4I(vecX dst, vecX src, vecX shift) %{
|
||||
ins_pipe(vshift128);
|
||||
%}
|
||||
|
||||
instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
|
||||
instruct vsra2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
|
||||
predicate(n->as_Vector()->length() == 2);
|
||||
match(Set dst (URShiftVI src shift));
|
||||
match(Set dst (RShiftVI src shift));
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ushl $dst,$src,$shift\t# vector (2S)" %}
|
||||
effect(TEMP tmp);
|
||||
format %{ "negr $tmp,$shift\t"
|
||||
"sshl $dst,$src,$tmp\t# vector (2S)" %}
|
||||
ins_encode %{
|
||||
__ ushl(as_FloatRegister($dst$$reg), __ T2S,
|
||||
as_FloatRegister($src$$reg),
|
||||
__ negr(as_FloatRegister($tmp$$reg), __ T8B,
|
||||
as_FloatRegister($shift$$reg));
|
||||
__ sshl(as_FloatRegister($dst$$reg), __ T2S,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(vshift64);
|
||||
%}
|
||||
|
||||
instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
|
||||
instruct vsra4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
|
||||
predicate(n->as_Vector()->length() == 4);
|
||||
match(Set dst (RShiftVI src shift));
|
||||
ins_cost(INSN_COST);
|
||||
effect(TEMP tmp);
|
||||
format %{ "negr $tmp,$shift\t"
|
||||
"sshl $dst,$src,$tmp\t# vector (4S)" %}
|
||||
ins_encode %{
|
||||
__ negr(as_FloatRegister($tmp$$reg), __ T16B,
|
||||
as_FloatRegister($shift$$reg));
|
||||
__ sshl(as_FloatRegister($dst$$reg), __ T4S,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(vshift128);
|
||||
%}
|
||||
|
||||
instruct vsrl2I(vecD dst, vecD src, vecD shift, vecD tmp) %{
|
||||
predicate(n->as_Vector()->length() == 2);
|
||||
match(Set dst (URShiftVI src shift));
|
||||
ins_cost(INSN_COST);
|
||||
effect(TEMP tmp);
|
||||
format %{ "negr $tmp,$shift\t"
|
||||
"ushl $dst,$src,$tmp\t# vector (2S)" %}
|
||||
ins_encode %{
|
||||
__ negr(as_FloatRegister($tmp$$reg), __ T8B,
|
||||
as_FloatRegister($shift$$reg));
|
||||
__ ushl(as_FloatRegister($dst$$reg), __ T2S,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(vshift64);
|
||||
%}
|
||||
|
||||
instruct vsrl4I(vecX dst, vecX src, vecX shift, vecX tmp) %{
|
||||
predicate(n->as_Vector()->length() == 4);
|
||||
match(Set dst (URShiftVI src shift));
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ushl $dst,$src,$shift\t# vector (4S)" %}
|
||||
effect(TEMP tmp);
|
||||
format %{ "negr $tmp,$shift\t"
|
||||
"ushl $dst,$src,$tmp\t# vector (4S)" %}
|
||||
ins_encode %{
|
||||
__ negr(as_FloatRegister($tmp$$reg), __ T16B,
|
||||
as_FloatRegister($shift$$reg));
|
||||
__ ushl(as_FloatRegister($dst$$reg), __ T4S,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($shift$$reg));
|
||||
as_FloatRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(vshift128);
|
||||
%}
|
||||
@ -17006,7 +17155,6 @@ instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
|
||||
instruct vsll2L(vecX dst, vecX src, vecX shift) %{
|
||||
predicate(n->as_Vector()->length() == 2);
|
||||
match(Set dst (LShiftVL src shift));
|
||||
match(Set dst (RShiftVL src shift));
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "sshl $dst,$src,$shift\t# vector (2D)" %}
|
||||
ins_encode %{
|
||||
@ -17017,15 +17165,36 @@ instruct vsll2L(vecX dst, vecX src, vecX shift) %{
|
||||
ins_pipe(vshift128);
|
||||
%}
|
||||
|
||||
instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
|
||||
instruct vsra2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
|
||||
predicate(n->as_Vector()->length() == 2);
|
||||
match(Set dst (RShiftVL src shift));
|
||||
ins_cost(INSN_COST);
|
||||
effect(TEMP tmp);
|
||||
format %{ "negr $tmp,$shift\t"
|
||||
"sshl $dst,$src,$tmp\t# vector (2D)" %}
|
||||
ins_encode %{
|
||||
__ negr(as_FloatRegister($tmp$$reg), __ T16B,
|
||||
as_FloatRegister($shift$$reg));
|
||||
__ sshl(as_FloatRegister($dst$$reg), __ T2D,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(vshift128);
|
||||
%}
|
||||
|
||||
instruct vsrl2L(vecX dst, vecX src, vecX shift, vecX tmp) %{
|
||||
predicate(n->as_Vector()->length() == 2);
|
||||
match(Set dst (URShiftVL src shift));
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ushl $dst,$src,$shift\t# vector (2D)" %}
|
||||
effect(TEMP tmp);
|
||||
format %{ "negr $tmp,$shift\t"
|
||||
"ushl $dst,$src,$tmp\t# vector (2D)" %}
|
||||
ins_encode %{
|
||||
__ negr(as_FloatRegister($tmp$$reg), __ T16B,
|
||||
as_FloatRegister($shift$$reg));
|
||||
__ ushl(as_FloatRegister($dst$$reg), __ T2D,
|
||||
as_FloatRegister($src$$reg),
|
||||
as_FloatRegister($shift$$reg));
|
||||
as_FloatRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(vshift128);
|
||||
%}
|
||||
|
@ -4896,7 +4896,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
|
||||
|
||||
// A very short string
|
||||
cmpw(cnt2, minCharsInWord);
|
||||
br(Assembler::LT, SHORT_STRING);
|
||||
br(Assembler::LE, SHORT_STRING);
|
||||
|
||||
// Compare longwords
|
||||
// load first parts of strings and finish initialization while loading
|
||||
@ -4920,8 +4920,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
|
||||
ldr(tmp2, Address(str2));
|
||||
cmp(cnt2, STUB_THRESHOLD);
|
||||
br(GE, STUB);
|
||||
subsw(cnt2, cnt2, 4);
|
||||
br(EQ, TAIL_CHECK);
|
||||
subw(cnt2, cnt2, 4);
|
||||
eor(vtmpZ, T16B, vtmpZ, vtmpZ);
|
||||
lea(str1, Address(str1, cnt2, Address::uxtw(str1_chr_shift)));
|
||||
lea(str2, Address(str2, cnt2, Address::uxtw(str2_chr_shift)));
|
||||
@ -4937,8 +4936,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
|
||||
ldrs(vtmp, Address(str2));
|
||||
cmp(cnt2, STUB_THRESHOLD);
|
||||
br(GE, STUB);
|
||||
subsw(cnt2, cnt2, 4);
|
||||
br(EQ, TAIL_CHECK);
|
||||
subw(cnt2, cnt2, 4);
|
||||
lea(str1, Address(str1, cnt2, Address::uxtw(str1_chr_shift)));
|
||||
eor(vtmpZ, T16B, vtmpZ, vtmpZ);
|
||||
lea(str2, Address(str2, cnt2, Address::uxtw(str2_chr_shift)));
|
||||
@ -5650,12 +5648,12 @@ void MacroAssembler::encode_iso_array(Register src, Register dst,
|
||||
orr(v5, T16B, Vtmp3, Vtmp4);
|
||||
uzp1(Vtmp1, T16B, Vtmp1, Vtmp2);
|
||||
uzp1(Vtmp3, T16B, Vtmp3, Vtmp4);
|
||||
stpq(Vtmp1, Vtmp3, dst);
|
||||
uzp2(v5, T16B, v4, v5); // high bytes
|
||||
umov(tmp2, v5, D, 1);
|
||||
fmovd(tmp1, v5);
|
||||
orr(tmp1, tmp1, tmp2);
|
||||
cbnz(tmp1, LOOP_8);
|
||||
stpq(Vtmp1, Vtmp3, dst);
|
||||
sub(len, len, 32);
|
||||
add(dst, dst, 32);
|
||||
add(src, src, 64);
|
||||
@ -5673,7 +5671,6 @@ void MacroAssembler::encode_iso_array(Register src, Register dst,
|
||||
prfm(Address(src, SoftwarePrefetchHintDistance));
|
||||
uzp1(v4, T16B, Vtmp1, Vtmp2);
|
||||
uzp1(v5, T16B, Vtmp3, Vtmp4);
|
||||
stpq(v4, v5, dst);
|
||||
orr(Vtmp1, T16B, Vtmp1, Vtmp2);
|
||||
orr(Vtmp3, T16B, Vtmp3, Vtmp4);
|
||||
uzp2(Vtmp1, T16B, Vtmp1, Vtmp3); // high bytes
|
||||
@ -5681,6 +5678,7 @@ void MacroAssembler::encode_iso_array(Register src, Register dst,
|
||||
fmovd(tmp1, Vtmp1);
|
||||
orr(tmp1, tmp1, tmp2);
|
||||
cbnz(tmp1, LOOP_8);
|
||||
stpq(v4, v5, dst);
|
||||
sub(len, len, 32);
|
||||
add(dst, dst, 32);
|
||||
add(src, src, 64);
|
||||
@ -5695,9 +5693,9 @@ void MacroAssembler::encode_iso_array(Register src, Register dst,
|
||||
ld1(Vtmp1, T8H, src);
|
||||
uzp1(Vtmp2, T16B, Vtmp1, Vtmp1); // low bytes
|
||||
uzp2(Vtmp3, T16B, Vtmp1, Vtmp1); // high bytes
|
||||
strd(Vtmp2, dst);
|
||||
fmovd(tmp1, Vtmp3);
|
||||
cbnz(tmp1, NEXT_1);
|
||||
strd(Vtmp2, dst);
|
||||
|
||||
sub(len, len, 8);
|
||||
add(dst, dst, 8);
|
||||
@ -5710,9 +5708,9 @@ void MacroAssembler::encode_iso_array(Register src, Register dst,
|
||||
cbz(len, DONE);
|
||||
BIND(NEXT_1);
|
||||
ldrh(tmp1, Address(post(src, 2)));
|
||||
strb(tmp1, Address(post(dst, 1)));
|
||||
tst(tmp1, 0xff00);
|
||||
br(NE, SET_RESULT);
|
||||
strb(tmp1, Address(post(dst, 1)));
|
||||
subs(len, len, 1);
|
||||
br(GT, NEXT_1);
|
||||
|
||||
|
@ -105,8 +105,8 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
|
||||
// compiled code in threads for which the event is enabled. Check here for
|
||||
// interp_only_mode if these events CAN be enabled.
|
||||
|
||||
__ ldrb(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
|
||||
__ cbnz(rscratch1, run_compiled_code);
|
||||
__ ldrw(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));
|
||||
__ cbzw(rscratch1, run_compiled_code);
|
||||
__ ldr(rscratch1, Address(method, Method::interpreter_entry_offset()));
|
||||
__ br(rscratch1);
|
||||
__ BIND(run_compiled_code);
|
||||
|
@ -1886,6 +1886,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ restore_locals();
|
||||
__ restore_constant_pool_cache();
|
||||
__ get_method(rmethod);
|
||||
__ get_dispatch();
|
||||
|
||||
// The method data pointer was incremented already during
|
||||
// call profiling. We have to restore the mdp for the current bcp.
|
||||
|
@ -8945,9 +8945,10 @@ instruct partialSubtypeCheck( R0RegP index, R1RegP sub, R2RegP super, flagsRegP
|
||||
instruct cmpFastLock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRegP scratch )
|
||||
%{
|
||||
match(Set pcc (FastLock object box));
|
||||
predicate(!(UseBiasedLocking && !UseOptoBiasInlining));
|
||||
|
||||
effect(TEMP scratch, TEMP scratch2);
|
||||
ins_cost(100);
|
||||
ins_cost(DEFAULT_COST*3);
|
||||
|
||||
format %{ "FASTLOCK $object, $box; KILL $scratch, $scratch2" %}
|
||||
ins_encode %{
|
||||
@ -8956,6 +8957,21 @@ instruct cmpFastLock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRe
|
||||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
||||
instruct cmpFastLock_noBiasInline(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2,
|
||||
iRegP scratch, iRegP scratch3) %{
|
||||
match(Set pcc (FastLock object box));
|
||||
predicate(UseBiasedLocking && !UseOptoBiasInlining);
|
||||
|
||||
effect(TEMP scratch, TEMP scratch2, TEMP scratch3);
|
||||
ins_cost(DEFAULT_COST*5);
|
||||
|
||||
format %{ "FASTLOCK $object, $box; KILL $scratch, $scratch2, $scratch3" %}
|
||||
ins_encode %{
|
||||
__ fast_lock($object$$Register, $box$$Register, $scratch$$Register, $scratch2$$Register, $scratch3$$Register);
|
||||
%}
|
||||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
||||
|
||||
instruct cmpFastUnlock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, iRegP scratch ) %{
|
||||
match(Set pcc (FastUnlock object box));
|
||||
|
@ -1971,7 +1971,7 @@ void MacroAssembler::resolve(DecoratorSet decorators, Register obj) {
|
||||
|
||||
|
||||
#ifdef COMPILER2
|
||||
void MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2)
|
||||
void MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2, Register scratch3)
|
||||
{
|
||||
assert(VM_Version::supports_ldrex(), "unsupported, yet?");
|
||||
|
||||
@ -1985,11 +1985,13 @@ void MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratch,
|
||||
Label fast_lock, done;
|
||||
|
||||
if (UseBiasedLocking && !UseOptoBiasInlining) {
|
||||
Label failed;
|
||||
biased_locking_enter(Roop, Rmark, Rscratch, false, noreg, done, failed);
|
||||
bind(failed);
|
||||
assert(scratch3 != noreg, "need extra temporary for -XX:-UseOptoBiasInlining");
|
||||
biased_locking_enter(Roop, Rmark, Rscratch, false, scratch3, done, done);
|
||||
// Fall through if lock not biased otherwise branch to done
|
||||
}
|
||||
|
||||
// Invariant: Rmark loaded below does not contain biased lock pattern
|
||||
|
||||
ldr(Rmark, Address(Roop, oopDesc::mark_offset_in_bytes()));
|
||||
tst(Rmark, markOopDesc::unlocked_value);
|
||||
b(fast_lock, ne);
|
||||
@ -2016,6 +2018,9 @@ void MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratch,
|
||||
|
||||
bind(done);
|
||||
|
||||
// At this point flags are set as follows:
|
||||
// EQ -> Success
|
||||
// NE -> Failure, branch to slow path
|
||||
}
|
||||
|
||||
void MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscratch, Register Rscratch2)
|
||||
|
@ -371,10 +371,10 @@ public:
|
||||
// lock_reg and obj_reg must be loaded up with the appropriate values.
|
||||
// swap_reg must be supplied.
|
||||
// tmp_reg must be supplied.
|
||||
// Optional slow case is for implementations (interpreter and C1) which branch to
|
||||
// slow case directly. If slow_case is NULL, then leaves condition
|
||||
// codes set (for C2's Fast_Lock node) and jumps to done label.
|
||||
// Falls through for the fast locking attempt.
|
||||
// Done label is branched to with condition code EQ set if the lock is
|
||||
// biased and we acquired it. Slow case label is branched to with
|
||||
// condition code NE set if the lock is biased but we failed to acquire
|
||||
// it. Otherwise fall through.
|
||||
// Returns offset of first potentially-faulting instruction for null
|
||||
// check info (currently consumed only by C1). If
|
||||
// swap_reg_contains_mark is true then returns -1 as it is assumed
|
||||
@ -1073,7 +1073,7 @@ public:
|
||||
void restore_default_fp_mode();
|
||||
|
||||
#ifdef COMPILER2
|
||||
void fast_lock(Register obj, Register box, Register scratch, Register scratch2);
|
||||
void fast_lock(Register obj, Register box, Register scratch, Register scratch2, Register scratch3 = noreg);
|
||||
void fast_unlock(Register obj, Register box, Register scratch, Register scratch2);
|
||||
#endif
|
||||
|
||||
|
@ -269,7 +269,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// when called via a c2i.
|
||||
|
||||
// Pass initial_caller_sp to framemanager.
|
||||
__ mr(R21_tmp1, R1_SP);
|
||||
__ mr(R21_sender_SP, R1_SP);
|
||||
|
||||
// Do a light-weight C-call here, r_new_arg_entry holds the address
|
||||
// of the interpreter entry point (frame manager or native entry)
|
||||
|
@ -532,14 +532,8 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
// these parameters the pre-barrier does not generate
|
||||
// the load of the previous value.
|
||||
|
||||
// Restore caller sp for c2i case.
|
||||
#ifdef ASSERT
|
||||
__ ld(R9_ARG7, 0, R1_SP);
|
||||
__ ld(R10_ARG8, 0, R21_sender_SP);
|
||||
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
|
||||
__ asm_assert_eq("backlink", 0x544);
|
||||
#endif // ASSERT
|
||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
||||
// Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted).
|
||||
__ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0);
|
||||
|
||||
__ blr();
|
||||
|
||||
@ -835,8 +829,13 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_f
|
||||
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
|
||||
__ load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0);
|
||||
__ mtctr(Rscratch1);
|
||||
// Restore caller_sp.
|
||||
// Restore caller_sp (c2i adapter may exist, but no shrinking of interpreted caller frame).
|
||||
#ifdef ASSERT
|
||||
Label frame_not_shrunk;
|
||||
__ cmpld(CCR0, R1_SP, R21_sender_SP);
|
||||
__ ble(CCR0, frame_not_shrunk);
|
||||
__ stop("frame shrunk", 0x546);
|
||||
__ bind(frame_not_shrunk);
|
||||
__ ld(Rscratch1, 0, R1_SP);
|
||||
__ ld(R0, 0, R21_sender_SP);
|
||||
__ cmpd(CCR0, R0, Rscratch1);
|
||||
@ -1155,15 +1154,6 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
}
|
||||
}
|
||||
|
||||
// Pop c2i arguments (if any) off when we return.
|
||||
#ifdef ASSERT
|
||||
__ ld(R9_ARG7, 0, R1_SP);
|
||||
__ ld(R10_ARG8, 0, R21_sender_SP);
|
||||
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
|
||||
__ asm_assert_eq("backlink", 0x545);
|
||||
#endif // ASSERT
|
||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
||||
|
||||
if (use_instruction) {
|
||||
switch (kind) {
|
||||
case Interpreter::java_lang_math_sqrt: __ fsqrt(F1_RET, F1); break;
|
||||
@ -1188,6 +1178,8 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
__ restore_LR_CR(R0);
|
||||
}
|
||||
|
||||
// Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted).
|
||||
__ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0);
|
||||
__ blr();
|
||||
|
||||
__ flush();
|
||||
@ -1843,8 +1835,8 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
|
||||
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
|
||||
__ kernel_crc32_singleByte(crc, data, dataLen, table, tmp, true);
|
||||
|
||||
// Restore caller sp for c2i case and return.
|
||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
||||
// Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted).
|
||||
__ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0);
|
||||
__ blr();
|
||||
|
||||
// Generate a vanilla native entry as the slow path.
|
||||
@ -1931,8 +1923,8 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
|
||||
// code compactness.
|
||||
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, true);
|
||||
|
||||
// Restore caller sp for c2i case and return.
|
||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
||||
// Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted).
|
||||
__ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0);
|
||||
__ blr();
|
||||
|
||||
// Generate a vanilla native entry as the slow path.
|
||||
@ -2019,8 +2011,8 @@ address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(Abstract
|
||||
// code compactness.
|
||||
__ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3, false);
|
||||
|
||||
// Restore caller sp for c2i case and return.
|
||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
||||
// Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted).
|
||||
__ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0);
|
||||
__ blr();
|
||||
|
||||
BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}");
|
||||
|
@ -649,7 +649,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
|
||||
|
||||
case T_FLOAT: {
|
||||
if (dest->is_single_xmm()) {
|
||||
if (LP64_ONLY(UseAVX < 2 &&) c->is_zero_float()) {
|
||||
if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_float()) {
|
||||
__ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
|
||||
} else {
|
||||
__ movflt(dest->as_xmm_float_reg(),
|
||||
@ -671,7 +671,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
|
||||
|
||||
case T_DOUBLE: {
|
||||
if (dest->is_double_xmm()) {
|
||||
if (LP64_ONLY(UseAVX < 2 &&) c->is_zero_double()) {
|
||||
if (LP64_ONLY(UseAVX <= 2 &&) c->is_zero_double()) {
|
||||
__ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
|
||||
} else {
|
||||
__ movdbl(dest->as_xmm_double_reg(),
|
||||
|
@ -2924,11 +2924,11 @@ instruct MoveVecX2Leg(legVecX dst, vecX src) %{
|
||||
match(Set dst src);
|
||||
format %{ "movdqu $dst,$src\t! load vector (16 bytes)" %}
|
||||
ins_encode %{
|
||||
if (UseAVX < 2 || VM_Version::supports_avx512vl()) {
|
||||
__ movdqu($dst$$XMMRegister, $src$$XMMRegister);
|
||||
} else {
|
||||
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
|
||||
int vector_len = 2;
|
||||
__ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
|
||||
} else {
|
||||
__ movdqu($dst$$XMMRegister, $src$$XMMRegister);
|
||||
}
|
||||
%}
|
||||
ins_pipe( fpu_reg_reg );
|
||||
@ -2939,11 +2939,11 @@ instruct MoveLeg2VecX(vecX dst, legVecX src) %{
|
||||
match(Set dst src);
|
||||
format %{ "movdqu $dst,$src\t! load vector (16 bytes)" %}
|
||||
ins_encode %{
|
||||
if (UseAVX < 2 || VM_Version::supports_avx512vl()) {
|
||||
__ movdqu($dst$$XMMRegister, $src$$XMMRegister);
|
||||
} else {
|
||||
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
|
||||
int vector_len = 2;
|
||||
__ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
|
||||
} else {
|
||||
__ movdqu($dst$$XMMRegister, $src$$XMMRegister);
|
||||
}
|
||||
%}
|
||||
ins_pipe( fpu_reg_reg );
|
||||
@ -2966,11 +2966,11 @@ instruct MoveVecY2Leg(legVecY dst, vecY src) %{
|
||||
match(Set dst src);
|
||||
format %{ "vmovdqu $dst,$src\t! load vector (32 bytes)" %}
|
||||
ins_encode %{
|
||||
if (UseAVX < 2 || VM_Version::supports_avx512vl()) {
|
||||
__ vmovdqu($dst$$XMMRegister, $src$$XMMRegister);
|
||||
} else {
|
||||
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
|
||||
int vector_len = 2;
|
||||
__ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
|
||||
} else {
|
||||
__ vmovdqu($dst$$XMMRegister, $src$$XMMRegister);
|
||||
}
|
||||
%}
|
||||
ins_pipe( fpu_reg_reg );
|
||||
@ -2981,11 +2981,11 @@ instruct MoveLeg2VecY(vecY dst, legVecY src) %{
|
||||
match(Set dst src);
|
||||
format %{ "vmovdqu $dst,$src\t! load vector (32 bytes)" %}
|
||||
ins_encode %{
|
||||
if (UseAVX < 2 || VM_Version::supports_avx512vl()) {
|
||||
__ vmovdqu($dst$$XMMRegister, $src$$XMMRegister);
|
||||
} else {
|
||||
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
|
||||
int vector_len = 2;
|
||||
__ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
|
||||
} else {
|
||||
__ vmovdqu($dst$$XMMRegister, $src$$XMMRegister);
|
||||
}
|
||||
%}
|
||||
ins_pipe( fpu_reg_reg );
|
||||
|
@ -7760,9 +7760,9 @@ instruct mulAddS2I_rReg(rRegI dst, rRegI src1, rRegI src2, rRegI src3, eFlagsReg
|
||||
match(Set dst (MulAddS2I (Binary dst src1) (Binary src2 src3)));
|
||||
effect(KILL cr, KILL src2);
|
||||
|
||||
expand %{ mulI_rReg(dst, src1, cr);
|
||||
mulI_rReg(src2, src3, cr);
|
||||
addI_rReg(dst, src2, cr); %}
|
||||
expand %{ mulI_eReg(dst, src1, cr);
|
||||
mulI_eReg(src2, src3, cr);
|
||||
addI_eReg(dst, src2, cr); %}
|
||||
%}
|
||||
|
||||
// Multiply Register Int to Long
|
||||
|
@ -4265,132 +4265,196 @@ operand cmpOpUCF2() %{
|
||||
|
||||
// Operands for bound floating pointer register arguments
|
||||
operand rxmm0() %{
|
||||
constraint(ALLOC_IN_RC(xmm0_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX<= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm0_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm1() %{
|
||||
constraint(ALLOC_IN_RC(xmm1_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm1_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm2() %{
|
||||
constraint(ALLOC_IN_RC(xmm2_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm2_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm3() %{
|
||||
constraint(ALLOC_IN_RC(xmm3_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm3_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm4() %{
|
||||
constraint(ALLOC_IN_RC(xmm4_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm4_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm5() %{
|
||||
constraint(ALLOC_IN_RC(xmm5_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm5_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm6() %{
|
||||
constraint(ALLOC_IN_RC(xmm6_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm6_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm7() %{
|
||||
constraint(ALLOC_IN_RC(xmm7_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm7_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm8() %{
|
||||
constraint(ALLOC_IN_RC(xmm8_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm8_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm9() %{
|
||||
constraint(ALLOC_IN_RC(xmm9_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm9_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm10() %{
|
||||
constraint(ALLOC_IN_RC(xmm10_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm10_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm11() %{
|
||||
constraint(ALLOC_IN_RC(xmm11_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm11_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm12() %{
|
||||
constraint(ALLOC_IN_RC(xmm12_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm12_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm13() %{
|
||||
constraint(ALLOC_IN_RC(xmm13_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm13_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm14() %{
|
||||
constraint(ALLOC_IN_RC(xmm14_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm14_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm15() %{
|
||||
constraint(ALLOC_IN_RC(xmm15_reg)); match(VecX);
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2)); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm15_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm16() %{
|
||||
constraint(ALLOC_IN_RC(xmm16_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm16_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm17() %{
|
||||
constraint(ALLOC_IN_RC(xmm17_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm17_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm18() %{
|
||||
constraint(ALLOC_IN_RC(xmm18_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm18_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm19() %{
|
||||
constraint(ALLOC_IN_RC(xmm19_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm19_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm20() %{
|
||||
constraint(ALLOC_IN_RC(xmm20_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm20_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm21() %{
|
||||
constraint(ALLOC_IN_RC(xmm21_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm21_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm22() %{
|
||||
constraint(ALLOC_IN_RC(xmm22_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm22_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm23() %{
|
||||
constraint(ALLOC_IN_RC(xmm23_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm23_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm24() %{
|
||||
constraint(ALLOC_IN_RC(xmm24_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm24_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm25() %{
|
||||
constraint(ALLOC_IN_RC(xmm25_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm25_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm26() %{
|
||||
constraint(ALLOC_IN_RC(xmm26_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm26_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm27() %{
|
||||
constraint(ALLOC_IN_RC(xmm27_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm27_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm28() %{
|
||||
constraint(ALLOC_IN_RC(xmm28_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm28_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm29() %{
|
||||
constraint(ALLOC_IN_RC(xmm29_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm29_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm30() %{
|
||||
constraint(ALLOC_IN_RC(xmm30_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm30_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
operand rxmm31() %{
|
||||
constraint(ALLOC_IN_RC(xmm31_reg)); match(VecX);
|
||||
predicate(UseAVX == 3); format%{%} interface(REG_INTER);
|
||||
constraint(ALLOC_IN_RC(xmm31_reg));
|
||||
match(VecX);
|
||||
format%{%}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
//----------OPERAND CLASSES----------------------------------------------------
|
||||
@ -12651,33 +12715,6 @@ instruct RethrowException()
|
||||
// Execute ZGC load barrier (strong) slow path
|
||||
//
|
||||
|
||||
// When running without XMM regs
|
||||
instruct loadBarrierSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
|
||||
|
||||
match(Set dst (LoadBarrierSlowReg mem));
|
||||
predicate(MaxVectorSize < 16);
|
||||
|
||||
effect(DEF dst, KILL cr);
|
||||
|
||||
format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
|
||||
ins_encode %{
|
||||
#if INCLUDE_ZGC
|
||||
Register d = $dst$$Register;
|
||||
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
|
||||
assert(d != r12, "Can't be R12!");
|
||||
assert(d != r15, "Can't be R15!");
|
||||
assert(d != rsp, "Can't be RSP!");
|
||||
|
||||
__ lea(d, $mem$$Address);
|
||||
__ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
|
||||
#else
|
||||
ShouldNotReachHere();
|
||||
#endif
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// For XMM and YMM enabled processors
|
||||
instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
|
||||
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
|
||||
@ -12686,7 +12723,7 @@ instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
|
||||
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
|
||||
|
||||
match(Set dst (LoadBarrierSlowReg mem));
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
|
||||
predicate(UseAVX <= 2);
|
||||
|
||||
effect(DEF dst, KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
@ -12694,7 +12731,7 @@ instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
|
||||
KILL x8, KILL x9, KILL x10, KILL x11,
|
||||
KILL x12, KILL x13, KILL x14, KILL x15);
|
||||
|
||||
format %{"LoadBarrierSlowRegXmm $dst, $mem" %}
|
||||
format %{"LoadBarrierSlowRegXmmAndYmm $dst, $mem" %}
|
||||
ins_encode %{
|
||||
#if INCLUDE_ZGC
|
||||
Register d = $dst$$Register;
|
||||
@ -12725,7 +12762,7 @@ instruct loadBarrierSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
|
||||
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
|
||||
|
||||
match(Set dst (LoadBarrierSlowReg mem));
|
||||
predicate((UseAVX == 3) && (MaxVectorSize >= 16));
|
||||
predicate(UseAVX == 3);
|
||||
|
||||
effect(DEF dst, KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
@ -12760,33 +12797,6 @@ instruct loadBarrierSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
|
||||
// Execute ZGC load barrier (weak) slow path
|
||||
//
|
||||
|
||||
// When running without XMM regs
|
||||
instruct loadBarrierWeakSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
|
||||
|
||||
match(Set dst (LoadBarrierSlowReg mem));
|
||||
predicate(MaxVectorSize < 16);
|
||||
|
||||
effect(DEF dst, KILL cr);
|
||||
|
||||
format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
|
||||
ins_encode %{
|
||||
#if INCLUDE_ZGC
|
||||
Register d = $dst$$Register;
|
||||
ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
|
||||
assert(d != r12, "Can't be R12!");
|
||||
assert(d != r15, "Can't be R15!");
|
||||
assert(d != rsp, "Can't be RSP!");
|
||||
|
||||
__ lea(d, $mem$$Address);
|
||||
__ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
|
||||
#else
|
||||
ShouldNotReachHere();
|
||||
#endif
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// For XMM and YMM enabled processors
|
||||
instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
|
||||
rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
|
||||
@ -12795,7 +12805,7 @@ instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
|
||||
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
|
||||
|
||||
match(Set dst (LoadBarrierWeakSlowReg mem));
|
||||
predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
|
||||
predicate(UseAVX <= 2);
|
||||
|
||||
effect(DEF dst, KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
@ -12803,7 +12813,7 @@ instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
|
||||
KILL x8, KILL x9, KILL x10, KILL x11,
|
||||
KILL x12, KILL x13, KILL x14, KILL x15);
|
||||
|
||||
format %{"LoadBarrierWeakSlowRegXmm $dst, $mem" %}
|
||||
format %{"LoadBarrierWeakSlowRegXmmAndYmm $dst, $mem" %}
|
||||
ins_encode %{
|
||||
#if INCLUDE_ZGC
|
||||
Register d = $dst$$Register;
|
||||
@ -12834,7 +12844,7 @@ instruct loadBarrierWeakSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
|
||||
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
|
||||
|
||||
match(Set dst (LoadBarrierWeakSlowReg mem));
|
||||
predicate((UseAVX == 3) && (MaxVectorSize >= 16));
|
||||
predicate(UseAVX == 3);
|
||||
|
||||
effect(DEF dst, KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
|
@ -5073,7 +5073,7 @@ jint os::init_2(void) {
|
||||
// initialize thread priority policy
|
||||
prio_init();
|
||||
|
||||
if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
|
||||
if (!FLAG_IS_DEFAULT(AllocateHeapAt) || !FLAG_IS_DEFAULT(AllocateOldGenAt)) {
|
||||
set_coredump_filter(DAX_SHARED_BIT);
|
||||
}
|
||||
|
||||
|
@ -370,7 +370,6 @@ JVM_handle_linux_signal(int sig,
|
||||
if (thread->on_local_stack(addr)) {
|
||||
// stack overflow
|
||||
if (thread->in_stack_yellow_reserved_zone(addr)) {
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
if (thread->thread_state() == _thread_in_Java) {
|
||||
if (thread->in_stack_reserved_zone(addr)) {
|
||||
frame fr;
|
||||
@ -392,9 +391,11 @@ JVM_handle_linux_signal(int sig,
|
||||
}
|
||||
// Throw a stack overflow exception. Guard pages will be reenabled
|
||||
// while unwinding the stack.
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
|
||||
} else {
|
||||
// Thread was in the vm or native code. Return and try to finish.
|
||||
thread->disable_stack_yellow_reserved_zone();
|
||||
return 1;
|
||||
}
|
||||
} else if (thread->in_stack_red_zone(addr)) {
|
||||
|
42
src/hotspot/os_cpu/linux_x86/gc/z/zArguments_linux_x86.cpp
Normal file
42
src/hotspot/os_cpu/linux_x86/gc/z/zArguments_linux_x86.cpp
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/z/zArguments.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
void ZArguments::initialize_platform() {
|
||||
#ifdef COMPILER2
|
||||
// The C2 barrier slow path expects vector registers to be least
|
||||
// 16 bytes wide, which is the minimum width available on all
|
||||
// x86-64 systems. However, the user could have speficied a lower
|
||||
// number on the command-line, in which case we print a warning
|
||||
// and raise it to 16.
|
||||
if (MaxVectorSize < 16) {
|
||||
warning("ZGC requires MaxVectorSize to be at least 16");
|
||||
FLAG_SET_DEFAULT(MaxVectorSize, 16);
|
||||
}
|
||||
#endif
|
||||
}
|
@ -46,6 +46,7 @@ class ciEnv : StackObj {
|
||||
|
||||
friend class CompileBroker;
|
||||
friend class Dependencies; // for get_object, during logging
|
||||
friend class PrepareExtraDataClosure;
|
||||
|
||||
private:
|
||||
Arena* _arena; // Alias for _ciEnv_arena except in init_shared_objects()
|
||||
@ -188,6 +189,10 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
ciMetadata* cached_metadata(Metadata* o) {
|
||||
return _factory->cached_metadata(o);
|
||||
}
|
||||
|
||||
ciInstance* get_instance(oop o) {
|
||||
if (o == NULL) return NULL;
|
||||
return get_object(o)->as_instance();
|
||||
|
@ -78,10 +78,81 @@ ciMethodData::ciMethodData() : ciMetadata(NULL) {
|
||||
_parameters = NULL;
|
||||
}
|
||||
|
||||
void ciMethodData::load_extra_data() {
|
||||
// Check for entries that reference an unloaded method
|
||||
class PrepareExtraDataClosure : public CleanExtraDataClosure {
|
||||
MethodData* _mdo;
|
||||
uint64_t _safepoint_counter;
|
||||
GrowableArray<Method*> _uncached_methods;
|
||||
|
||||
public:
|
||||
PrepareExtraDataClosure(MethodData* mdo)
|
||||
: _mdo(mdo),
|
||||
_safepoint_counter(SafepointSynchronize::safepoint_counter()),
|
||||
_uncached_methods()
|
||||
{ }
|
||||
|
||||
bool is_live(Method* m) {
|
||||
if (!m->method_holder()->is_loader_alive()) {
|
||||
return false;
|
||||
}
|
||||
if (CURRENT_ENV->cached_metadata(m) == NULL) {
|
||||
// Uncached entries need to be pre-populated.
|
||||
_uncached_methods.append(m);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool has_safepointed() {
|
||||
return SafepointSynchronize::safepoint_counter() != _safepoint_counter;
|
||||
}
|
||||
|
||||
bool finish() {
|
||||
if (_uncached_methods.length() == 0) {
|
||||
// Preparation finished iff all Methods* were already cached.
|
||||
return true;
|
||||
}
|
||||
// Holding locks through safepoints is bad practice.
|
||||
MutexUnlocker mu(_mdo->extra_data_lock());
|
||||
for (int i = 0; i < _uncached_methods.length(); ++i) {
|
||||
if (has_safepointed()) {
|
||||
// The metadata in the growable array might contain stale
|
||||
// entries after a safepoint.
|
||||
return false;
|
||||
}
|
||||
Method* method = _uncached_methods.at(i);
|
||||
// Populating ciEnv caches may cause safepoints due
|
||||
// to taking the Compile_lock with safepoint checks.
|
||||
(void)CURRENT_ENV->get_method(method);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void ciMethodData::prepare_metadata() {
|
||||
MethodData* mdo = get_MethodData();
|
||||
|
||||
for (;;) {
|
||||
ResourceMark rm;
|
||||
PrepareExtraDataClosure cl(mdo);
|
||||
mdo->clean_extra_data(&cl);
|
||||
if (cl.finish()) {
|
||||
// When encountering uncached metadata, the Compile_lock might be
|
||||
// acquired when creating ciMetadata handles, causing safepoints
|
||||
// which requires a new round of preparation to clean out potentially
|
||||
// new unloading metadata.
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ciMethodData::load_extra_data() {
|
||||
MethodData* mdo = get_MethodData();
|
||||
MutexLocker ml(mdo->extra_data_lock());
|
||||
// Deferred metadata cleaning due to concurrent class unloading.
|
||||
prepare_metadata();
|
||||
// After metadata preparation, there is no stale metadata,
|
||||
// and no safepoints can introduce more stale metadata.
|
||||
NoSafepointVerifier no_safepoint;
|
||||
|
||||
// speculative trap entries also hold a pointer to a Method so need to be translated
|
||||
DataLayout* dp_src = mdo->extra_data_base();
|
||||
@ -94,22 +165,21 @@ void ciMethodData::load_extra_data() {
|
||||
// New traps in the MDO may have been added since we copied the
|
||||
// data (concurrent deoptimizations before we acquired
|
||||
// extra_data_lock above) or can be removed (a safepoint may occur
|
||||
// in the translate_from call below) as we translate the copy:
|
||||
// in the prepare_metadata call above) as we translate the copy:
|
||||
// update the copy as we go.
|
||||
int tag = dp_src->tag();
|
||||
if (tag != DataLayout::arg_info_data_tag) {
|
||||
memcpy(dp_dst, dp_src, ((intptr_t)MethodData::next_extra(dp_src)) - ((intptr_t)dp_src));
|
||||
size_t entry_size = DataLayout::header_size_in_bytes();
|
||||
if (tag != DataLayout::no_tag) {
|
||||
ProfileData* src_data = dp_src->data_in();
|
||||
entry_size = src_data->size_in_bytes();
|
||||
}
|
||||
memcpy(dp_dst, dp_src, entry_size);
|
||||
|
||||
switch(tag) {
|
||||
case DataLayout::speculative_trap_data_tag: {
|
||||
ciSpeculativeTrapData data_dst(dp_dst);
|
||||
SpeculativeTrapData data_src(dp_src);
|
||||
|
||||
{ // During translation a safepoint can happen or VM lock can be taken (e.g., Compile_lock).
|
||||
MutexUnlocker ml(mdo->extra_data_lock());
|
||||
data_dst.translate_from(&data_src);
|
||||
}
|
||||
data_dst.translate_from(&data_src);
|
||||
break;
|
||||
}
|
||||
case DataLayout::bit_data_tag:
|
||||
|
@ -475,6 +475,7 @@ private:
|
||||
return (address) _data;
|
||||
}
|
||||
|
||||
void prepare_metadata();
|
||||
void load_extra_data();
|
||||
ciProfileData* bci_to_extra_data(int bci, ciMethod* m, bool& two_free_slots);
|
||||
|
||||
|
@ -265,6 +265,24 @@ int ciObjectFactory::metadata_compare(Metadata* const& key, ciMetadata* const& e
|
||||
else return 0;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciObjectFactory::cached_metadata
|
||||
//
|
||||
// Get the ciMetadata corresponding to some Metadata. If the ciMetadata has
|
||||
// already been created, it is returned. Otherwise, null is returned.
|
||||
ciMetadata* ciObjectFactory::cached_metadata(Metadata* key) {
|
||||
ASSERT_IN_VM;
|
||||
|
||||
bool found = false;
|
||||
int index = _ci_metadata->find_sorted<Metadata*, ciObjectFactory::metadata_compare>(key, found);
|
||||
|
||||
if (!found) {
|
||||
return NULL;
|
||||
}
|
||||
return _ci_metadata->at(index)->as_metadata();
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciObjectFactory::get_metadata
|
||||
//
|
||||
|
@ -100,6 +100,7 @@ public:
|
||||
// Get the ciObject corresponding to some oop.
|
||||
ciObject* get(oop key);
|
||||
ciMetadata* get_metadata(Metadata* key);
|
||||
ciMetadata* cached_metadata(Metadata* key);
|
||||
ciSymbol* get_symbol(Symbol* key);
|
||||
|
||||
// Get the ciSymbol corresponding to one of the vmSymbols.
|
||||
|
@ -170,7 +170,7 @@ class DictionaryEntry : public HashtableEntry<InstanceKlass*, mtClass> {
|
||||
for (ProtectionDomainEntry* current = pd_set(); // accessed at a safepoint
|
||||
current != NULL;
|
||||
current = current->_next) {
|
||||
guarantee(oopDesc::is_oop(current->_pd_cache->object_no_keepalive()), "Invalid oop");
|
||||
guarantee(oopDesc::is_oop_or_null(current->_pd_cache->object_no_keepalive()), "Invalid oop");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1795,14 +1795,17 @@ void SystemDictionary::add_to_hierarchy(InstanceKlass* k, TRAPS) {
|
||||
assert(k != NULL, "just checking");
|
||||
assert_locked_or_safepoint(Compile_lock);
|
||||
|
||||
// Link into hierachy. Make sure the vtables are initialized before linking into
|
||||
k->set_init_state(InstanceKlass::loaded);
|
||||
// make sure init_state store is already done.
|
||||
// The compiler reads the hierarchy outside of the Compile_lock.
|
||||
// Access ordering is used to add to hierarchy.
|
||||
|
||||
// Link into hierachy.
|
||||
k->append_to_sibling_list(); // add to superklass/sibling list
|
||||
k->process_interfaces(THREAD); // handle all "implements" declarations
|
||||
k->set_init_state(InstanceKlass::loaded);
|
||||
|
||||
// Now flush all code that depended on old class hierarchy.
|
||||
// Note: must be done *after* linking k into the hierarchy (was bug 12/9/97)
|
||||
// Also, first reinitialize vtable because it may have gotten out of synch
|
||||
// while the new class wasn't connected to the class hierarchy.
|
||||
CodeCache::flush_dependents_on(k);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -503,26 +503,33 @@ Handle SystemDictionaryShared::get_shared_protection_domain(Handle class_loader,
|
||||
Handle SystemDictionaryShared::get_shared_protection_domain(Handle class_loader,
|
||||
ModuleEntry* mod, TRAPS) {
|
||||
ClassLoaderData *loader_data = mod->loader_data();
|
||||
Handle protection_domain;
|
||||
if (mod->shared_protection_domain() == NULL) {
|
||||
Symbol* location = mod->location();
|
||||
if (location != NULL) {
|
||||
Handle url_string = java_lang_String::create_from_symbol(
|
||||
location, CHECK_(protection_domain));
|
||||
Handle location_string = java_lang_String::create_from_symbol(
|
||||
location, CHECK_NH);
|
||||
Handle url;
|
||||
JavaValue result(T_OBJECT);
|
||||
Klass* classLoaders_klass =
|
||||
SystemDictionary::jdk_internal_loader_ClassLoaders_klass();
|
||||
JavaCalls::call_static(&result, classLoaders_klass, vmSymbols::toFileURL_name(),
|
||||
if (location->starts_with("jrt:/")) {
|
||||
url = JavaCalls::construct_new_instance(SystemDictionary::URL_klass(),
|
||||
vmSymbols::string_void_signature(),
|
||||
location_string, CHECK_NH);
|
||||
} else {
|
||||
Klass* classLoaders_klass =
|
||||
SystemDictionary::jdk_internal_loader_ClassLoaders_klass();
|
||||
JavaCalls::call_static(&result, classLoaders_klass, vmSymbols::toFileURL_name(),
|
||||
vmSymbols::toFileURL_signature(),
|
||||
url_string, CHECK_(protection_domain));
|
||||
Handle url = Handle(THREAD, (oop)result.get_jobject());
|
||||
location_string, CHECK_NH);
|
||||
url = Handle(THREAD, (oop)result.get_jobject());
|
||||
}
|
||||
|
||||
Handle pd = get_protection_domain_from_classloader(class_loader, url, THREAD);
|
||||
Handle pd = get_protection_domain_from_classloader(class_loader, url,
|
||||
CHECK_NH);
|
||||
mod->set_shared_protection_domain(loader_data, pd);
|
||||
}
|
||||
}
|
||||
|
||||
protection_domain = Handle(THREAD, mod->shared_protection_domain());
|
||||
Handle protection_domain(THREAD, mod->shared_protection_domain());
|
||||
assert(protection_domain.not_null(), "sanity");
|
||||
return protection_domain;
|
||||
}
|
||||
|
@ -53,29 +53,29 @@ ICRefillVerifier::ICRefillVerifier()
|
||||
_refill_remembered(false)
|
||||
{
|
||||
Thread* thread = Thread::current();
|
||||
assert(thread->missed_ic_stub_refill_mark() == NULL, "nesting not supported");
|
||||
thread->set_missed_ic_stub_refill_mark(this);
|
||||
assert(thread->missed_ic_stub_refill_verifier() == NULL, "nesting not supported");
|
||||
thread->set_missed_ic_stub_refill_verifier(this);
|
||||
}
|
||||
|
||||
ICRefillVerifier::~ICRefillVerifier() {
|
||||
assert(!_refill_requested || _refill_remembered,
|
||||
"Forgot to refill IC stubs after failed IC transition");
|
||||
Thread::current()->set_missed_ic_stub_refill_mark(NULL);
|
||||
Thread::current()->set_missed_ic_stub_refill_verifier(NULL);
|
||||
}
|
||||
|
||||
ICRefillVerifierMark::ICRefillVerifierMark(ICRefillVerifier* verifier) {
|
||||
Thread* thread = Thread::current();
|
||||
assert(thread->missed_ic_stub_refill_mark() == NULL, "nesting not supported");
|
||||
thread->set_missed_ic_stub_refill_mark(this);
|
||||
assert(thread->missed_ic_stub_refill_verifier() == NULL, "nesting not supported");
|
||||
thread->set_missed_ic_stub_refill_verifier(verifier);
|
||||
}
|
||||
|
||||
ICRefillVerifierMark::~ICRefillVerifierMark() {
|
||||
Thread::current()->set_missed_ic_stub_refill_mark(NULL);
|
||||
Thread::current()->set_missed_ic_stub_refill_verifier(NULL);
|
||||
}
|
||||
|
||||
static ICRefillVerifier* current_ic_refill_verifier() {
|
||||
Thread* current = Thread::current();
|
||||
ICRefillVerifier* verifier = reinterpret_cast<ICRefillVerifier*>(current->missed_ic_stub_refill_mark());
|
||||
ICRefillVerifier* verifier = current->missed_ic_stub_refill_verifier();
|
||||
assert(verifier != NULL, "need a verifier for safety");
|
||||
return verifier;
|
||||
}
|
||||
|
@ -1159,6 +1159,19 @@ void nmethod::log_state_change() const {
|
||||
}
|
||||
}
|
||||
|
||||
void nmethod::unlink_from_method(bool acquire_lock) {
|
||||
// We need to check if both the _code and _from_compiled_code_entry_point
|
||||
// refer to this nmethod because there is a race in setting these two fields
|
||||
// in Method* as seen in bugid 4947125.
|
||||
// If the vep() points to the zombie nmethod, the memory for the nmethod
|
||||
// could be flushed and the compiler and vtable stubs could still call
|
||||
// through it.
|
||||
if (method() != NULL && (method()->code() == this ||
|
||||
method()->from_compiled_entry() == verified_entry_point())) {
|
||||
method()->clear_code(acquire_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Common functionality for both make_not_entrant and make_zombie
|
||||
*/
|
||||
@ -1246,17 +1259,7 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
|
||||
JVMCI_ONLY(maybe_invalidate_installed_code());
|
||||
|
||||
// Remove nmethod from method.
|
||||
// We need to check if both the _code and _from_compiled_code_entry_point
|
||||
// refer to this nmethod because there is a race in setting these two fields
|
||||
// in Method* as seen in bugid 4947125.
|
||||
// If the vep() points to the zombie nmethod, the memory for the nmethod
|
||||
// could be flushed and the compiler and vtable stubs could still call
|
||||
// through it.
|
||||
if (method() != NULL && (method()->code() == this ||
|
||||
method()->from_compiled_entry() == verified_entry_point())) {
|
||||
HandleMark hm;
|
||||
method()->clear_code(false /* already owns Patching_lock */);
|
||||
}
|
||||
unlink_from_method(false /* already owns Patching_lock */);
|
||||
} // leave critical region under Patching_lock
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -1283,6 +1286,13 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
|
||||
flush_dependencies(/*delete_immediately*/true);
|
||||
}
|
||||
|
||||
// Clear ICStubs to prevent back patching stubs of zombie or flushed
|
||||
// nmethods during the next safepoint (see ICStub::finalize).
|
||||
{
|
||||
CompiledICLocker ml(this);
|
||||
clear_ic_stubs();
|
||||
}
|
||||
|
||||
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
|
||||
// event and it hasn't already been reported for this nmethod then
|
||||
// report it now. The event may have been reported earlier if the GC
|
||||
@ -2533,6 +2543,7 @@ const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
|
||||
case relocInfo::section_word_type: return "section_word";
|
||||
case relocInfo::poll_type: return "poll";
|
||||
case relocInfo::poll_return_type: return "poll_return";
|
||||
case relocInfo::trampoline_stub_type: return "trampoline_stub";
|
||||
case relocInfo::type_mask: return "type_bit_mask";
|
||||
|
||||
default:
|
||||
|
@ -376,6 +376,8 @@ class nmethod : public CompiledMethod {
|
||||
|
||||
int comp_level() const { return _comp_level; }
|
||||
|
||||
void unlink_from_method(bool acquire_lock);
|
||||
|
||||
// Support for oops in scopes and relocs:
|
||||
// Note: index 0 is reserved for null.
|
||||
oop oop_at(int index) const;
|
||||
|
@ -51,9 +51,9 @@ ScopeDesc::ScopeDesc(const CompiledMethod* code, int decode_offset, bool reexecu
|
||||
}
|
||||
|
||||
|
||||
ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
|
||||
void ScopeDesc::initialize(const ScopeDesc* parent, int decode_offset) {
|
||||
_code = parent->_code;
|
||||
_decode_offset = parent->_sender_decode_offset;
|
||||
_decode_offset = decode_offset;
|
||||
_objects = parent->_objects;
|
||||
_reexecute = false; //reexecute only applies to the first scope
|
||||
_rethrow_exception = false;
|
||||
@ -61,6 +61,14 @@ ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
|
||||
decode_body();
|
||||
}
|
||||
|
||||
ScopeDesc::ScopeDesc(const ScopeDesc* parent) {
|
||||
initialize(parent, parent->_sender_decode_offset);
|
||||
}
|
||||
|
||||
ScopeDesc::ScopeDesc(const ScopeDesc* parent, int decode_offset) {
|
||||
initialize(parent, decode_offset);
|
||||
}
|
||||
|
||||
|
||||
void ScopeDesc::decode_body() {
|
||||
if (decode_offset() == DebugInformationRecorder::serialized_null) {
|
||||
|
@ -67,6 +67,9 @@ class ScopeDesc : public ResourceObj {
|
||||
// avoid a .hpp-.hpp dependency.)
|
||||
ScopeDesc(const CompiledMethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop);
|
||||
|
||||
// Direct access to scope
|
||||
ScopeDesc* at_offset(int decode_offset) { return new ScopeDesc(this, decode_offset); }
|
||||
|
||||
// JVM state
|
||||
Method* method() const { return _method; }
|
||||
int bci() const { return _bci; }
|
||||
@ -85,12 +88,16 @@ class ScopeDesc : public ResourceObj {
|
||||
// Returns where the scope was decoded
|
||||
int decode_offset() const { return _decode_offset; }
|
||||
|
||||
int sender_decode_offset() const { return _sender_decode_offset; }
|
||||
|
||||
// Tells whether sender() returns NULL
|
||||
bool is_top() const;
|
||||
|
||||
private:
|
||||
// Alternative constructor
|
||||
void initialize(const ScopeDesc* parent, int decode_offset);
|
||||
// Alternative constructors
|
||||
ScopeDesc(const ScopeDesc* parent);
|
||||
ScopeDesc(const ScopeDesc* parent, int decode_offset);
|
||||
|
||||
// JVM state
|
||||
Method* _method;
|
||||
|
@ -97,7 +97,7 @@ inline void G1ArchiveAllocator::enable_archive_object_check() {
|
||||
}
|
||||
|
||||
_archive_check_enabled = true;
|
||||
size_t length = Universe::heap()->max_capacity();
|
||||
size_t length = G1CollectedHeap::heap()->max_reserved_capacity();
|
||||
_closed_archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
|
||||
(HeapWord*)Universe::heap()->base() + length,
|
||||
HeapRegion::GrainBytes);
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/shared/gcArguments.inline.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
@ -156,5 +157,9 @@ void G1Arguments::initialize() {
|
||||
}
|
||||
|
||||
CollectedHeap* G1Arguments::create_heap() {
|
||||
return create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
|
||||
if (AllocateOldGenAt != NULL) {
|
||||
return create_heap_with_policy<G1CollectedHeap, G1HeterogeneousCollectorPolicy>();
|
||||
} else {
|
||||
return create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
|
||||
}
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
|
||||
}
|
||||
|
||||
void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
|
||||
assert(_g1h->max_capacity() > 0, "initialization order");
|
||||
assert(_g1h->max_reserved_capacity() > 0, "initialization order");
|
||||
assert(_g1h->capacity() == 0, "initialization order");
|
||||
|
||||
if (G1ConcRSHotCardLimit > 0) {
|
||||
|
@ -161,12 +161,12 @@ HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
|
||||
|
||||
// Private methods.
|
||||
|
||||
HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
|
||||
HeapRegion* G1CollectedHeap::new_region(size_t word_size, HeapRegionType type, bool do_expand) {
|
||||
assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
|
||||
"the only time we use this to allocate a humongous region is "
|
||||
"when we are allocating a single humongous region");
|
||||
|
||||
HeapRegion* res = _hrm.allocate_free_region(is_old);
|
||||
HeapRegion* res = _hrm->allocate_free_region(type);
|
||||
|
||||
if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
|
||||
// Currently, only attempts to allocate GC alloc regions set
|
||||
@ -183,7 +183,7 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_e
|
||||
// always expand the heap by an amount aligned to the heap
|
||||
// region size, the free list should in theory not be empty.
|
||||
// In either case allocate_free_region() will check for NULL.
|
||||
res = _hrm.allocate_free_region(is_old);
|
||||
res = _hrm->allocate_free_region(type);
|
||||
} else {
|
||||
_expand_heap_after_alloc_failure = false;
|
||||
}
|
||||
@ -330,16 +330,16 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||
// Only one region to allocate, try to use a fast path by directly allocating
|
||||
// from the free lists. Do not try to expand here, we will potentially do that
|
||||
// later.
|
||||
HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
|
||||
HeapRegion* hr = new_region(word_size, HeapRegionType::Humongous, false /* do_expand */);
|
||||
if (hr != NULL) {
|
||||
first = hr->hrm_index();
|
||||
}
|
||||
} else {
|
||||
// Policy: Try only empty regions (i.e. already committed first). Maybe we
|
||||
// are lucky enough to find some.
|
||||
first = _hrm.find_contiguous_only_empty(obj_regions);
|
||||
first = _hrm->find_contiguous_only_empty(obj_regions);
|
||||
if (first != G1_NO_HRM_INDEX) {
|
||||
_hrm.allocate_free_regions_starting_at(first, obj_regions);
|
||||
_hrm->allocate_free_regions_starting_at(first, obj_regions);
|
||||
}
|
||||
}
|
||||
|
||||
@ -347,14 +347,14 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||
// Policy: We could not find enough regions for the humongous object in the
|
||||
// free list. Look through the heap to find a mix of free and uncommitted regions.
|
||||
// If so, try expansion.
|
||||
first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
|
||||
first = _hrm->find_contiguous_empty_or_unavailable(obj_regions);
|
||||
if (first != G1_NO_HRM_INDEX) {
|
||||
// We found something. Make sure these regions are committed, i.e. expand
|
||||
// the heap. Alternatively we could do a defragmentation GC.
|
||||
log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
|
||||
word_size * HeapWordSize);
|
||||
|
||||
_hrm.expand_at(first, obj_regions, workers());
|
||||
_hrm->expand_at(first, obj_regions, workers());
|
||||
g1_policy()->record_new_heap_size(num_regions());
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -365,7 +365,7 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||
assert(is_on_master_free_list(hr), "sanity");
|
||||
}
|
||||
#endif
|
||||
_hrm.allocate_free_regions_starting_at(first, obj_regions);
|
||||
_hrm->allocate_free_regions_starting_at(first, obj_regions);
|
||||
} else {
|
||||
// Policy: Potentially trigger a defragmentation GC.
|
||||
}
|
||||
@ -554,7 +554,7 @@ void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
|
||||
bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MemRegion reserved = _hrm.reserved();
|
||||
MemRegion reserved = _hrm->reserved();
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
|
||||
return false;
|
||||
@ -571,7 +571,7 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MutexLockerEx x(Heap_lock);
|
||||
|
||||
MemRegion reserved = _hrm.reserved();
|
||||
MemRegion reserved = _hrm->reserved();
|
||||
HeapWord* prev_last_addr = NULL;
|
||||
HeapRegion* prev_last_region = NULL;
|
||||
|
||||
@ -605,7 +605,7 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
|
||||
// range ended, and adjust the start address so we don't try to allocate
|
||||
// the same region again. If the current range is entirely within that
|
||||
// region, skip it, just adjusting the recorded top.
|
||||
HeapRegion* start_region = _hrm.addr_to_region(start_address);
|
||||
HeapRegion* start_region = _hrm->addr_to_region(start_address);
|
||||
if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
|
||||
start_address = start_region->end();
|
||||
if (start_address > last_address) {
|
||||
@ -615,12 +615,12 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
|
||||
}
|
||||
start_region->set_top(start_address);
|
||||
curr_range = MemRegion(start_address, last_address + 1);
|
||||
start_region = _hrm.addr_to_region(start_address);
|
||||
start_region = _hrm->addr_to_region(start_address);
|
||||
}
|
||||
|
||||
// Perform the actual region allocation, exiting if it fails.
|
||||
// Then note how much new space we have allocated.
|
||||
if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
|
||||
if (!_hrm->allocate_containing_regions(curr_range, &commits, workers())) {
|
||||
return false;
|
||||
}
|
||||
increase_used(word_size * HeapWordSize);
|
||||
@ -632,8 +632,8 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
|
||||
|
||||
// Mark each G1 region touched by the range as archive, add it to
|
||||
// the old set, and set top.
|
||||
HeapRegion* curr_region = _hrm.addr_to_region(start_address);
|
||||
HeapRegion* last_region = _hrm.addr_to_region(last_address);
|
||||
HeapRegion* curr_region = _hrm->addr_to_region(start_address);
|
||||
HeapRegion* last_region = _hrm->addr_to_region(last_address);
|
||||
prev_last_region = last_region;
|
||||
|
||||
while (curr_region != NULL) {
|
||||
@ -650,7 +650,7 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
|
||||
HeapRegion* next_region;
|
||||
if (curr_region != last_region) {
|
||||
top = curr_region->end();
|
||||
next_region = _hrm.next_region_in_heap(curr_region);
|
||||
next_region = _hrm->next_region_in_heap(curr_region);
|
||||
} else {
|
||||
top = last_address + 1;
|
||||
next_region = NULL;
|
||||
@ -671,7 +671,7 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
|
||||
assert(!is_init_completed(), "Expect to be called at JVM init time");
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MemRegion reserved = _hrm.reserved();
|
||||
MemRegion reserved = _hrm->reserved();
|
||||
HeapWord *prev_last_addr = NULL;
|
||||
HeapRegion* prev_last_region = NULL;
|
||||
|
||||
@ -691,8 +691,8 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
|
||||
"Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||
p2i(start_address), p2i(prev_last_addr));
|
||||
|
||||
HeapRegion* start_region = _hrm.addr_to_region(start_address);
|
||||
HeapRegion* last_region = _hrm.addr_to_region(last_address);
|
||||
HeapRegion* start_region = _hrm->addr_to_region(start_address);
|
||||
HeapRegion* last_region = _hrm->addr_to_region(last_address);
|
||||
HeapWord* bottom_address = start_region->bottom();
|
||||
|
||||
// Check for a range beginning in the same region in which the
|
||||
@ -708,7 +708,7 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
|
||||
guarantee(curr_region->is_archive(),
|
||||
"Expected archive region at index %u", curr_region->hrm_index());
|
||||
if (curr_region != last_region) {
|
||||
curr_region = _hrm.next_region_in_heap(curr_region);
|
||||
curr_region = _hrm->next_region_in_heap(curr_region);
|
||||
} else {
|
||||
curr_region = NULL;
|
||||
}
|
||||
@ -757,7 +757,7 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count, b
|
||||
assert(!is_init_completed(), "Expect to be called at JVM init time");
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MemRegion reserved = _hrm.reserved();
|
||||
MemRegion reserved = _hrm->reserved();
|
||||
HeapWord* prev_last_addr = NULL;
|
||||
HeapRegion* prev_last_region = NULL;
|
||||
size_t size_used = 0;
|
||||
@ -779,8 +779,8 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count, b
|
||||
size_used += ranges[i].byte_size();
|
||||
prev_last_addr = last_address;
|
||||
|
||||
HeapRegion* start_region = _hrm.addr_to_region(start_address);
|
||||
HeapRegion* last_region = _hrm.addr_to_region(last_address);
|
||||
HeapRegion* start_region = _hrm->addr_to_region(start_address);
|
||||
HeapRegion* last_region = _hrm->addr_to_region(last_address);
|
||||
|
||||
// Check for ranges that start in the same G1 region in which the previous
|
||||
// range ended, and adjust the start address so we don't try to free
|
||||
@ -791,7 +791,7 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count, b
|
||||
if (start_address > last_address) {
|
||||
continue;
|
||||
}
|
||||
start_region = _hrm.addr_to_region(start_address);
|
||||
start_region = _hrm->addr_to_region(start_address);
|
||||
}
|
||||
prev_last_region = last_region;
|
||||
|
||||
@ -806,11 +806,11 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count, b
|
||||
curr_region->set_free();
|
||||
curr_region->set_top(curr_region->bottom());
|
||||
if (curr_region != last_region) {
|
||||
curr_region = _hrm.next_region_in_heap(curr_region);
|
||||
curr_region = _hrm->next_region_in_heap(curr_region);
|
||||
} else {
|
||||
curr_region = NULL;
|
||||
}
|
||||
_hrm.shrink_at(curr_index, 1);
|
||||
_hrm->shrink_at(curr_index, 1);
|
||||
uncommitted_regions++;
|
||||
}
|
||||
|
||||
@ -1024,6 +1024,8 @@ void G1CollectedHeap::prepare_heap_for_full_collection() {
|
||||
abandon_collection_set(collection_set());
|
||||
|
||||
tear_down_region_sets(false /* free_list_only */);
|
||||
|
||||
hrm()->prepare_for_full_collection_start();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
|
||||
@ -1035,6 +1037,8 @@ void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::prepare_heap_for_mutators() {
|
||||
hrm()->prepare_for_full_collection_end();
|
||||
|
||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||
ClassLoaderDataGraph::purge();
|
||||
MetaspaceUtils::verify_metrics();
|
||||
@ -1071,7 +1075,7 @@ void G1CollectedHeap::abort_refinement() {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::verify_after_full_collection() {
|
||||
_hrm.verify_optional();
|
||||
_hrm->verify_optional();
|
||||
_verifier->verify_region_sets_optional();
|
||||
_verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
|
||||
// Clear the previous marking bitmap, if needed for bitmap verification.
|
||||
@ -1325,7 +1329,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
|
||||
|
||||
|
||||
if (expand(expand_bytes, _workers)) {
|
||||
_hrm.verify_optional();
|
||||
_hrm->verify_optional();
|
||||
_verifier->verify_region_sets_optional();
|
||||
return attempt_allocation_at_safepoint(word_size,
|
||||
false /* expect_null_mutator_alloc_region */);
|
||||
@ -1350,7 +1354,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, do
|
||||
uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
|
||||
assert(regions_to_expand > 0, "Must expand by at least one region");
|
||||
|
||||
uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
|
||||
uint expanded_by = _hrm->expand_by(regions_to_expand, pretouch_workers);
|
||||
if (expand_time_ms != NULL) {
|
||||
*expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
|
||||
}
|
||||
@ -1365,7 +1369,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, do
|
||||
// The expansion of the virtual storage space was unsuccessful.
|
||||
// Let's see if it was because we ran out of swap.
|
||||
if (G1ExitOnExpansionFailure &&
|
||||
_hrm.available() >= regions_to_expand) {
|
||||
_hrm->available() >= regions_to_expand) {
|
||||
// We had head room...
|
||||
vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
|
||||
}
|
||||
@ -1380,7 +1384,7 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
|
||||
HeapRegion::GrainBytes);
|
||||
uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
|
||||
|
||||
uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
|
||||
uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
|
||||
size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
|
||||
|
||||
|
||||
@ -1408,7 +1412,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
|
||||
shrink_helper(shrink_bytes);
|
||||
rebuild_region_sets(true /* free_list_only */);
|
||||
|
||||
_hrm.verify_optional();
|
||||
_hrm->verify_optional();
|
||||
_verifier->verify_region_sets_optional();
|
||||
}
|
||||
|
||||
@ -1486,7 +1490,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
|
||||
_humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
|
||||
_bot(NULL),
|
||||
_listener(),
|
||||
_hrm(),
|
||||
_hrm(NULL),
|
||||
_allocator(NULL),
|
||||
_verifier(NULL),
|
||||
_summary_bytes_used(0),
|
||||
@ -1505,7 +1509,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
|
||||
_survivor(),
|
||||
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
|
||||
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
|
||||
_g1_policy(new G1Policy(_gc_timer_stw)),
|
||||
_g1_policy(G1Policy::create_policy(collector_policy, _gc_timer_stw)),
|
||||
_heap_sizing_policy(NULL),
|
||||
_collection_set(this, _g1_policy),
|
||||
_hot_card_cache(NULL),
|
||||
@ -1632,7 +1636,7 @@ jint G1CollectedHeap::initialize() {
|
||||
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
|
||||
|
||||
size_t init_byte_size = collector_policy()->initial_heap_byte_size();
|
||||
size_t max_byte_size = collector_policy()->max_heap_byte_size();
|
||||
size_t max_byte_size = g1_collector_policy()->heap_reserved_size_bytes();
|
||||
size_t heap_alignment = collector_policy()->heap_alignment();
|
||||
|
||||
// Ensure that the sizes are properly aligned.
|
||||
@ -1692,12 +1696,17 @@ jint G1CollectedHeap::initialize() {
|
||||
ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
|
||||
size_t page_size = actual_reserved_page_size(heap_rs);
|
||||
G1RegionToSpaceMapper* heap_storage =
|
||||
G1RegionToSpaceMapper::create_mapper(g1_rs,
|
||||
g1_rs.size(),
|
||||
page_size,
|
||||
HeapRegion::GrainBytes,
|
||||
1,
|
||||
mtJavaHeap);
|
||||
G1RegionToSpaceMapper::create_heap_mapper(g1_rs,
|
||||
g1_rs.size(),
|
||||
page_size,
|
||||
HeapRegion::GrainBytes,
|
||||
1,
|
||||
mtJavaHeap);
|
||||
if(heap_storage == NULL) {
|
||||
vm_shutdown_during_initialization("Could not initialize G1 heap");
|
||||
return JNI_ERR;
|
||||
}
|
||||
|
||||
os::trace_page_sizes("Heap",
|
||||
collector_policy()->min_heap_byte_size(),
|
||||
max_byte_size,
|
||||
@ -1728,7 +1737,9 @@ jint G1CollectedHeap::initialize() {
|
||||
G1RegionToSpaceMapper* next_bitmap_storage =
|
||||
create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
|
||||
|
||||
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
|
||||
_hrm = HeapRegionManager::create_manager(this, g1_collector_policy());
|
||||
|
||||
_hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
|
||||
_card_table->initialize(cardtable_storage);
|
||||
// Do later initialization work for concurrent refinement.
|
||||
_hot_card_cache->initialize(card_counts_storage);
|
||||
@ -1743,20 +1754,20 @@ jint G1CollectedHeap::initialize() {
|
||||
guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
|
||||
// Also create a G1 rem set.
|
||||
_g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
|
||||
_g1_rem_set->initialize(max_capacity(), max_regions());
|
||||
_g1_rem_set->initialize(max_reserved_capacity(), max_regions());
|
||||
|
||||
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
|
||||
guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
|
||||
guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
|
||||
"too many cards per region");
|
||||
|
||||
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
|
||||
FreeRegionList::set_unrealistically_long_length(max_expandable_regions() + 1);
|
||||
|
||||
_bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
|
||||
|
||||
{
|
||||
HeapWord* start = _hrm.reserved().start();
|
||||
HeapWord* end = _hrm.reserved().end();
|
||||
HeapWord* start = _hrm->reserved().start();
|
||||
HeapWord* end = _hrm->reserved().end();
|
||||
size_t granularity = HeapRegion::GrainBytes;
|
||||
|
||||
_in_cset_fast_test.initialize(start, end, granularity);
|
||||
@ -1807,7 +1818,7 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
// Here we allocate the dummy HeapRegion that is required by the
|
||||
// G1AllocRegion class.
|
||||
HeapRegion* dummy_region = _hrm.get_dummy_region();
|
||||
HeapRegion* dummy_region = _hrm->get_dummy_region();
|
||||
|
||||
// We'll re-use the same region whether the alloc region will
|
||||
// require BOT updates or not and, if it doesn't, then a non-young
|
||||
@ -1927,16 +1938,20 @@ CollectorPolicy* G1CollectedHeap::collector_policy() const {
|
||||
return _collector_policy;
|
||||
}
|
||||
|
||||
G1CollectorPolicy* G1CollectedHeap::g1_collector_policy() const {
|
||||
return _collector_policy;
|
||||
}
|
||||
|
||||
SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
|
||||
return &_soft_ref_policy;
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::capacity() const {
|
||||
return _hrm.length() * HeapRegion::GrainBytes;
|
||||
return _hrm->length() * HeapRegion::GrainBytes;
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
|
||||
return _hrm.total_free_bytes();
|
||||
return _hrm->total_free_bytes();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
|
||||
@ -2002,6 +2017,18 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
||||
}
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
|
||||
if(g1_policy()->force_upgrade_to_full()) {
|
||||
return true;
|
||||
} else if (should_do_concurrent_full_gc(_gc_cause)) {
|
||||
return false;
|
||||
} else if (has_regions_left_for_allocation()) {
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void G1CollectedHeap::allocate_dummy_regions() {
|
||||
// Let's fill up most of the region
|
||||
@ -2152,7 +2179,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::is_in(const void* p) const {
|
||||
if (_hrm.reserved().contains(p)) {
|
||||
if (_hrm->reserved().contains(p)) {
|
||||
// Given that we know that p is in the reserved space,
|
||||
// heap_region_containing() should successfully
|
||||
// return the containing region.
|
||||
@ -2166,7 +2193,7 @@ bool G1CollectedHeap::is_in(const void* p) const {
|
||||
#ifdef ASSERT
|
||||
bool G1CollectedHeap::is_in_exact(const void* p) const {
|
||||
bool contains = reserved_region().contains(p);
|
||||
bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
|
||||
bool available = _hrm->is_available(addr_to_region((HeapWord*)p));
|
||||
if (contains && available) {
|
||||
return true;
|
||||
} else {
|
||||
@ -2197,18 +2224,18 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
|
||||
_hrm.iterate(cl);
|
||||
_hrm->iterate(cl);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
|
||||
HeapRegionClaimer *hrclaimer,
|
||||
uint worker_id) const {
|
||||
_hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
|
||||
_hrm->par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
|
||||
}
|
||||
|
||||
void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
|
||||
HeapRegionClaimer *hrclaimer) const {
|
||||
_hrm.par_iterate(cl, hrclaimer, 0);
|
||||
_hrm->par_iterate(cl, hrclaimer, 0);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
|
||||
@ -2257,7 +2284,11 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::max_capacity() const {
|
||||
return _hrm.reserved().byte_size();
|
||||
return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::max_reserved_capacity() const {
|
||||
return _hrm->max_length() * HeapRegion::GrainBytes;
|
||||
}
|
||||
|
||||
jlong G1CollectedHeap::millis_since_last_gc() {
|
||||
@ -2347,8 +2378,8 @@ void G1CollectedHeap::print_on(outputStream* st) const {
|
||||
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
|
||||
capacity()/K, used_unlocked()/K);
|
||||
st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
p2i(_hrm.reserved().start()),
|
||||
p2i(_hrm.reserved().end()));
|
||||
p2i(_hrm->reserved().start()),
|
||||
p2i(_hrm->reserved().end()));
|
||||
st->cr();
|
||||
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
|
||||
uint young_regions = young_regions_count();
|
||||
@ -3131,7 +3162,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
// output from the concurrent mark thread interfering with this
|
||||
// logging output either.
|
||||
|
||||
_hrm.verify_optional();
|
||||
_hrm->verify_optional();
|
||||
_verifier->verify_region_sets_optional();
|
||||
|
||||
TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
|
||||
@ -3947,7 +3978,7 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
|
||||
bool locked) {
|
||||
assert(!hr->is_free(), "the region should not be free");
|
||||
assert(!hr->is_empty(), "the region should not be empty");
|
||||
assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
|
||||
assert(_hrm->is_available(hr->hrm_index()), "region should be committed");
|
||||
assert(free_list != NULL, "pre-condition");
|
||||
|
||||
if (G1VerifyBitmaps) {
|
||||
@ -3988,7 +4019,7 @@ void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
|
||||
assert(list != NULL, "list can't be null");
|
||||
if (!list->is_empty()) {
|
||||
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
_hrm.insert_list_into_free_list(list);
|
||||
_hrm->insert_list_into_free_list(list);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4521,7 +4552,7 @@ void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
|
||||
// this is that during a full GC string deduplication needs to know if
|
||||
// a collected region was young or old when the full GC was initiated.
|
||||
}
|
||||
_hrm.remove_all_free_regions();
|
||||
_hrm->remove_all_free_regions();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::increase_used(size_t bytes) {
|
||||
@ -4596,7 +4627,7 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
|
||||
_survivor.clear();
|
||||
}
|
||||
|
||||
RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
|
||||
RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
|
||||
heap_region_iterate(&cl);
|
||||
|
||||
if (!free_list_only) {
|
||||
@ -4623,7 +4654,7 @@ HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
|
||||
bool should_allocate = g1_policy()->should_allocate_mutator_region();
|
||||
if (force || should_allocate) {
|
||||
HeapRegion* new_alloc_region = new_region(word_size,
|
||||
false /* is_old */,
|
||||
HeapRegionType::Eden,
|
||||
false /* do_expand */);
|
||||
if (new_alloc_region != NULL) {
|
||||
set_region_short_lived_locked(new_alloc_region);
|
||||
@ -4667,13 +4698,19 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState d
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const bool is_survivor = dest.is_young();
|
||||
HeapRegionType type;
|
||||
if (dest.is_young()) {
|
||||
type = HeapRegionType::Survivor;
|
||||
} else {
|
||||
type = HeapRegionType::Old;
|
||||
}
|
||||
|
||||
HeapRegion* new_alloc_region = new_region(word_size,
|
||||
!is_survivor,
|
||||
type,
|
||||
true /* do_expand */);
|
||||
|
||||
if (new_alloc_region != NULL) {
|
||||
if (is_survivor) {
|
||||
if (type.is_survivor()) {
|
||||
new_alloc_region->set_survivor();
|
||||
_survivor.add(new_alloc_region);
|
||||
_verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
|
||||
@ -4705,14 +4742,14 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
|
||||
HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
|
||||
bool expanded = false;
|
||||
uint index = _hrm.find_highest_free(&expanded);
|
||||
uint index = _hrm->find_highest_free(&expanded);
|
||||
|
||||
if (index != G1_NO_HRM_INDEX) {
|
||||
if (expanded) {
|
||||
log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
|
||||
HeapRegion::GrainWords * HeapWordSize);
|
||||
}
|
||||
_hrm.allocate_free_regions_starting_at(index, 1);
|
||||
_hrm->allocate_free_regions_starting_at(index, 1);
|
||||
return region_at(index);
|
||||
}
|
||||
return NULL;
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include "gc/g1/g1YCTypes.hpp"
|
||||
#include "gc/g1/heapRegionManager.hpp"
|
||||
#include "gc/g1/heapRegionSet.hpp"
|
||||
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
|
||||
#include "gc/shared/barrierSet.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/gcHeapSummary.hpp"
|
||||
@ -194,7 +195,7 @@ private:
|
||||
G1RegionMappingChangedListener _listener;
|
||||
|
||||
// The sequence of all heap regions in the heap.
|
||||
HeapRegionManager _hrm;
|
||||
HeapRegionManager* _hrm;
|
||||
|
||||
// Manages all allocations with regions except humongous object allocations.
|
||||
G1Allocator* _allocator;
|
||||
@ -267,6 +268,9 @@ private:
|
||||
// (e) cause == _wb_conc_mark
|
||||
bool should_do_concurrent_full_gc(GCCause::Cause cause);
|
||||
|
||||
// Return true if should upgrade to full gc after an incremental one.
|
||||
bool should_upgrade_to_full_gc(GCCause::Cause cause);
|
||||
|
||||
// indicates whether we are in young or mixed GC mode
|
||||
G1CollectorState _collector_state;
|
||||
|
||||
@ -369,9 +373,9 @@ private:
|
||||
// Try to allocate a single non-humongous HeapRegion sufficient for
|
||||
// an allocation of the given word_size. If do_expand is true,
|
||||
// attempt to expand the heap if necessary to satisfy the allocation
|
||||
// request. If the region is to be used as an old region or for a
|
||||
// humongous object, set is_old to true. If not, to false.
|
||||
HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
|
||||
// request. 'type' takes the type of region to be allocated. (Use constants
|
||||
// Old, Eden, Humongous, Survivor defined in HeapRegionType.)
|
||||
HeapRegion* new_region(size_t word_size, HeapRegionType type, bool do_expand);
|
||||
|
||||
// Initialize a contiguous set of free regions of length num_regions
|
||||
// and starting at index first so that they appear as a single
|
||||
@ -957,10 +961,13 @@ public:
|
||||
// The current policy object for the collector.
|
||||
G1Policy* g1_policy() const { return _g1_policy; }
|
||||
|
||||
HeapRegionManager* hrm() const { return _hrm; }
|
||||
|
||||
const G1CollectionSet* collection_set() const { return &_collection_set; }
|
||||
G1CollectionSet* collection_set() { return &_collection_set; }
|
||||
|
||||
virtual CollectorPolicy* collector_policy() const;
|
||||
virtual G1CollectorPolicy* g1_collector_policy() const;
|
||||
|
||||
virtual SoftRefPolicy* soft_ref_policy();
|
||||
|
||||
@ -1009,7 +1016,7 @@ public:
|
||||
// But G1CollectedHeap doesn't yet support this.
|
||||
|
||||
virtual bool is_maximal_no_gc() const {
|
||||
return _hrm.available() == 0;
|
||||
return _hrm->available() == 0;
|
||||
}
|
||||
|
||||
// Returns whether there are any regions left in the heap for allocation.
|
||||
@ -1018,19 +1025,22 @@ public:
|
||||
}
|
||||
|
||||
// The current number of regions in the heap.
|
||||
uint num_regions() const { return _hrm.length(); }
|
||||
uint num_regions() const { return _hrm->length(); }
|
||||
|
||||
// The max number of regions in the heap.
|
||||
uint max_regions() const { return _hrm.max_length(); }
|
||||
uint max_regions() const { return _hrm->max_length(); }
|
||||
|
||||
// Max number of regions that can be comitted.
|
||||
uint max_expandable_regions() const { return _hrm->max_expandable_length(); }
|
||||
|
||||
// The number of regions that are completely free.
|
||||
uint num_free_regions() const { return _hrm.num_free_regions(); }
|
||||
uint num_free_regions() const { return _hrm->num_free_regions(); }
|
||||
|
||||
// The number of regions that can be allocated into.
|
||||
uint num_free_or_available_regions() const { return num_free_regions() + _hrm.available(); }
|
||||
uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
|
||||
|
||||
MemoryUsage get_auxiliary_data_memory_usage() const {
|
||||
return _hrm.get_auxiliary_data_memory_usage();
|
||||
return _hrm->get_auxiliary_data_memory_usage();
|
||||
}
|
||||
|
||||
// The number of regions that are not completely free.
|
||||
@ -1038,7 +1048,7 @@ public:
|
||||
|
||||
#ifdef ASSERT
|
||||
bool is_on_master_free_list(HeapRegion* hr) {
|
||||
return _hrm.is_free(hr);
|
||||
return _hrm->is_free(hr);
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
@ -1095,13 +1105,13 @@ public:
|
||||
// Return "TRUE" iff the given object address is in the reserved
|
||||
// region of g1.
|
||||
bool is_in_g1_reserved(const void* p) const {
|
||||
return _hrm.reserved().contains(p);
|
||||
return _hrm->reserved().contains(p);
|
||||
}
|
||||
|
||||
// Returns a MemRegion that corresponds to the space that has been
|
||||
// reserved for the heap
|
||||
MemRegion g1_reserved() const {
|
||||
return _hrm.reserved();
|
||||
return _hrm->reserved();
|
||||
}
|
||||
|
||||
virtual bool is_in_closed_subset(const void* p) const;
|
||||
@ -1227,6 +1237,9 @@ public:
|
||||
// Print the maximum heap capacity.
|
||||
virtual size_t max_capacity() const;
|
||||
|
||||
// Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
|
||||
virtual size_t max_reserved_capacity() const;
|
||||
|
||||
virtual jlong millis_since_last_gc();
|
||||
|
||||
|
||||
|
@ -57,13 +57,13 @@ size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
|
||||
// Inline functions for G1CollectedHeap
|
||||
|
||||
// Return the region with the given index. It assumes the index is valid.
|
||||
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
|
||||
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm->at(index); }
|
||||
|
||||
// Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
|
||||
inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }
|
||||
inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm->at_or_null(index); }
|
||||
|
||||
inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
|
||||
return _hrm.next_region_in_humongous(hr);
|
||||
return _hrm->next_region_in_humongous(hr);
|
||||
}
|
||||
|
||||
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
|
||||
@ -74,7 +74,7 @@ inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
|
||||
}
|
||||
|
||||
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
|
||||
return _hrm.reserved().start() + index * HeapRegion::GrainWords;
|
||||
return _hrm->reserved().start() + index * HeapRegion::GrainWords;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
@ -83,7 +83,7 @@ inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
|
||||
assert(is_in_g1_reserved((const void*) addr),
|
||||
"Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
|
||||
p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
|
||||
return _hrm.addr_to_region((HeapWord*) addr);
|
||||
return _hrm->addr_to_region((HeapWord*) addr);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
@ -266,12 +266,12 @@ inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
|
||||
}
|
||||
|
||||
inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
|
||||
assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
|
||||
assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
|
||||
_humongous_reclaim_candidates.set_candidate(region, value);
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
|
||||
assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
|
||||
assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
|
||||
return _humongous_reclaim_candidates.is_candidate(region);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,3 +55,11 @@ void G1CollectorPolicy::initialize_alignments() {
|
||||
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
|
||||
_heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
|
||||
}
|
||||
|
||||
size_t G1CollectorPolicy::heap_reserved_size_bytes() const {
|
||||
return _max_heap_byte_size;
|
||||
}
|
||||
|
||||
bool G1CollectorPolicy::is_hetero_heap() const {
|
||||
return false;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,6 +38,7 @@ protected:
|
||||
|
||||
public:
|
||||
G1CollectorPolicy();
|
||||
virtual size_t heap_reserved_size_bytes() const;
|
||||
virtual bool is_hetero_heap() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
|
||||
|
@ -603,14 +603,14 @@ void G1HeapVerifier::verify_region_sets() {
|
||||
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
|
||||
|
||||
// First, check the explicit lists.
|
||||
_g1h->_hrm.verify();
|
||||
_g1h->_hrm->verify();
|
||||
|
||||
// Finally, make sure that the region accounting in the lists is
|
||||
// consistent with what we see in the heap.
|
||||
|
||||
VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
|
||||
VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm);
|
||||
_g1h->heap_region_iterate(&cl);
|
||||
cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
|
||||
cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm);
|
||||
}
|
||||
|
||||
void G1HeapVerifier::prepare_for_verify() {
|
||||
@ -851,7 +851,7 @@ class G1CheckCSetFastTableClosure : public HeapRegionClosure {
|
||||
|
||||
bool G1HeapVerifier::check_cset_fast_test() {
|
||||
G1CheckCSetFastTableClosure cl;
|
||||
_g1h->_hrm.iterate(&cl);
|
||||
_g1h->_hrm->iterate(&cl);
|
||||
return !cl.failures();
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
103
src/hotspot/share/gc/g1/g1HeterogeneousCollectorPolicy.cpp
Normal file
103
src/hotspot/share/gc/g1/g1HeterogeneousCollectorPolicy.cpp
Normal file
@ -0,0 +1,103 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/formatBuffer.hpp"
|
||||
|
||||
const double G1HeterogeneousCollectorPolicy::MaxRamFractionForYoung = 0.8;
|
||||
size_t G1HeterogeneousCollectorPolicy::MaxMemoryForYoung;
|
||||
|
||||
static size_t calculate_reasonable_max_memory_for_young(FormatBuffer<100> &calc_str, double max_ram_fraction_for_young) {
|
||||
julong phys_mem;
|
||||
// If MaxRam is specified, we use that as maximum physical memory available.
|
||||
if (FLAG_IS_DEFAULT(MaxRAM)) {
|
||||
phys_mem = os::physical_memory();
|
||||
calc_str.append("Physical_Memory");
|
||||
} else {
|
||||
phys_mem = (julong)MaxRAM;
|
||||
calc_str.append("MaxRAM");
|
||||
}
|
||||
|
||||
julong reasonable_max = phys_mem;
|
||||
|
||||
// If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
|
||||
// reasonable max size of young generation.
|
||||
if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
|
||||
reasonable_max = (julong)(phys_mem / MaxRAMFraction);
|
||||
calc_str.append(" / MaxRAMFraction");
|
||||
} else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
|
||||
reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
|
||||
calc_str.append(" * MaxRAMPercentage / 100");
|
||||
} else {
|
||||
// We use our own fraction to calculate max size of young generation.
|
||||
reasonable_max = phys_mem * max_ram_fraction_for_young;
|
||||
calc_str.append(" * %0.2f", max_ram_fraction_for_young);
|
||||
}
|
||||
|
||||
return (size_t)reasonable_max;
|
||||
}
|
||||
|
||||
void G1HeterogeneousCollectorPolicy::initialize_flags() {
|
||||
|
||||
FormatBuffer<100> calc_str("");
|
||||
|
||||
MaxMemoryForYoung = calculate_reasonable_max_memory_for_young(calc_str, MaxRamFractionForYoung);
|
||||
|
||||
if (MaxNewSize > MaxMemoryForYoung) {
|
||||
if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
|
||||
MaxMemoryForYoung, calc_str.buffer());
|
||||
} else {
|
||||
log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
|
||||
"Dram usage can be lowered by setting MaxNewSize to a lower value", MaxMemoryForYoung, calc_str.buffer());
|
||||
}
|
||||
MaxNewSize = MaxMemoryForYoung;
|
||||
}
|
||||
if (NewSize > MaxMemoryForYoung) {
|
||||
if (FLAG_IS_CMDLINE(NewSize)) {
|
||||
log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
|
||||
MaxMemoryForYoung, calc_str.buffer());
|
||||
}
|
||||
NewSize = MaxMemoryForYoung;
|
||||
}
|
||||
|
||||
// After setting new size flags, call base class initialize_flags()
|
||||
G1CollectorPolicy::initialize_flags();
|
||||
}
|
||||
|
||||
size_t G1HeterogeneousCollectorPolicy::reasonable_max_memory_for_young() {
|
||||
return MaxMemoryForYoung;
|
||||
}
|
||||
|
||||
size_t G1HeterogeneousCollectorPolicy::heap_reserved_size_bytes() const {
|
||||
return 2 * _max_heap_byte_size;
|
||||
}
|
||||
|
||||
bool G1HeterogeneousCollectorPolicy::is_hetero_heap() const {
|
||||
return true;
|
||||
}
|
48
src/hotspot/share/gc/g1/g1HeterogeneousCollectorPolicy.hpp
Normal file
48
src/hotspot/share/gc/g1/g1HeterogeneousCollectorPolicy.hpp
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
|
||||
#define SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
|
||||
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
|
||||
|
||||
class G1HeterogeneousCollectorPolicy : public G1CollectorPolicy {
|
||||
private:
|
||||
// Max fraction of dram to use for young generation when MaxRAMFraction and
|
||||
// MaxRAMPercentage are not specified on commandline.
|
||||
static const double MaxRamFractionForYoung;
|
||||
static size_t MaxMemoryForYoung;
|
||||
|
||||
protected:
|
||||
virtual void initialize_flags();
|
||||
|
||||
public:
|
||||
G1HeterogeneousCollectorPolicy() {}
|
||||
virtual size_t heap_reserved_size_bytes() const;
|
||||
virtual bool is_hetero_heap() const;
|
||||
static size_t reasonable_max_memory_for_young();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSCOLLECTORPOLICY_HPP
|
58
src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.cpp
Normal file
58
src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.cpp
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
|
||||
|
||||
G1HeterogeneousHeapPolicy::G1HeterogeneousHeapPolicy(G1CollectorPolicy* policy, STWGCTimer* gc_timer) :
|
||||
G1Policy(policy, gc_timer), _manager(NULL) {}
|
||||
|
||||
// We call the super class init(), after which we provision young_list_target_length() regions in dram.
|
||||
void G1HeterogeneousHeapPolicy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
|
||||
G1Policy::init(g1h, collection_set);
|
||||
_manager = HeterogeneousHeapRegionManager::manager();
|
||||
_manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
|
||||
}
|
||||
|
||||
// After a collection pause, young list target length is updated. So we need to make sure we have enough regions in dram for young gen.
|
||||
void G1HeterogeneousHeapPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
|
||||
G1Policy::record_collection_pause_end(pause_time_ms, cards_scanned, heap_used_bytes_before_gc);
|
||||
_manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
|
||||
}
|
||||
|
||||
// After a full collection, young list target length is updated. So we need to make sure we have enough regions in dram for young gen.
|
||||
void G1HeterogeneousHeapPolicy::record_full_collection_end() {
|
||||
G1Policy::record_full_collection_end();
|
||||
_manager->adjust_dram_regions((uint)young_list_target_length(), G1CollectedHeap::heap()->workers());
|
||||
}
|
||||
|
||||
bool G1HeterogeneousHeapPolicy::force_upgrade_to_full() {
|
||||
if (_manager->has_borrowed_regions()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
48
src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.hpp
Normal file
48
src/hotspot/share/gc/g1/g1HeterogeneousHeapPolicy.hpp
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
|
||||
#define SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
|
||||
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
|
||||
|
||||
class G1HeterogeneousHeapPolicy : public G1Policy {
|
||||
// Stash a pointer to the hrm.
|
||||
HeterogeneousHeapRegionManager* _manager;
|
||||
|
||||
public:
|
||||
G1HeterogeneousHeapPolicy(G1CollectorPolicy* policy, STWGCTimer* gc_timer);
|
||||
|
||||
// initialize policy
|
||||
virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
|
||||
// Record end of an evacuation pause.
|
||||
virtual void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
|
||||
// Record the end of full collection.
|
||||
virtual void record_full_collection_end();
|
||||
|
||||
virtual bool force_upgrade_to_full();
|
||||
};
|
||||
#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
|
51
src/hotspot/share/gc/g1/g1HeterogeneousHeapYoungGenSizer.cpp
Normal file
51
src/hotspot/share/gc/g1/g1HeterogeneousHeapYoungGenSizer.cpp
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
|
||||
#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
|
||||
G1HeterogeneousHeapYoungGenSizer::G1HeterogeneousHeapYoungGenSizer() : G1YoungGenSizer() {
|
||||
// will be used later when min and max young size is calculated.
|
||||
_max_young_length = (uint)(G1HeterogeneousCollectorPolicy::reasonable_max_memory_for_young() / HeapRegion::GrainBytes);
|
||||
}
|
||||
|
||||
// Since heap is sized potentially to larger value accounting for dram + nvdimm, we need to limit
|
||||
// max young gen size to the available dram.
|
||||
// Call parent class method first and then adjust sizes based on available dram
|
||||
void G1HeterogeneousHeapYoungGenSizer::adjust_max_new_size(uint number_of_heap_regions) {
|
||||
G1YoungGenSizer::adjust_max_new_size(number_of_heap_regions);
|
||||
adjust_lengths_based_on_dram_memory();
|
||||
}
|
||||
|
||||
void G1HeterogeneousHeapYoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
|
||||
G1YoungGenSizer::heap_size_changed(new_number_of_heap_regions);
|
||||
adjust_lengths_based_on_dram_memory();
|
||||
}
|
||||
|
||||
void G1HeterogeneousHeapYoungGenSizer::adjust_lengths_based_on_dram_memory() {
|
||||
_min_desired_young_length = MIN2(_min_desired_young_length, _max_young_length);
|
||||
_max_desired_young_length = MIN2(_max_desired_young_length, _max_young_length);
|
||||
}
|
51
src/hotspot/share/gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp
Normal file
51
src/hotspot/share/gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP
|
||||
#define SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP
|
||||
|
||||
#include "gc/g1/g1YoungGenSizer.hpp"
|
||||
|
||||
// This class prevents the size of young generation of G1 heap to exceed dram
|
||||
// memory available. If set on command line, MaxRAM and MaxRAMFraction/MaxRAMPercentage
|
||||
// are used to determine the maximum size that young generation can grow.
|
||||
// Else we set the maximum size to 80% of dram available in the system.
|
||||
|
||||
class G1HeterogeneousHeapYoungGenSizer : public G1YoungGenSizer {
|
||||
private:
|
||||
// maximum no of regions that young generation can grow to. Calculated in constructor.
|
||||
uint _max_young_length;
|
||||
void adjust_lengths_based_on_dram_memory();
|
||||
|
||||
public:
|
||||
G1HeterogeneousHeapYoungGenSizer();
|
||||
|
||||
// Calculate the maximum length of the young gen given the number of regions
|
||||
// depending on the sizing algorithm.
|
||||
virtual void adjust_max_new_size(uint number_of_heap_regions);
|
||||
|
||||
virtual void heap_size_changed(uint new_number_of_heap_regions);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1HETEROGENEOUSHEAPYOUNGGENSIZER_HPP
|
@ -100,6 +100,12 @@ size_t G1PageBasedVirtualSpace::uncommitted_size() const {
|
||||
return reserved_size() - committed_size();
|
||||
}
|
||||
|
||||
void G1PageBasedVirtualSpace::commit_and_set_special() {
|
||||
commit_internal(addr_to_page_index(_low_boundary), addr_to_page_index(_high_boundary));
|
||||
_special = true;
|
||||
_dirty.initialize(reserved_size()/_page_size);
|
||||
}
|
||||
|
||||
size_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
|
||||
return (addr - _low_boundary) / _page_size;
|
||||
}
|
||||
|
@ -136,6 +136,8 @@ class G1PageBasedVirtualSpace {
|
||||
// Memory left to use/expand in this virtual space.
|
||||
size_t uncommitted_size() const;
|
||||
|
||||
void commit_and_set_special();
|
||||
|
||||
bool contains(const void* p) const;
|
||||
|
||||
MemRegion reserved() {
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "gc/g1/g1ConcurrentMark.hpp"
|
||||
#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefine.hpp"
|
||||
#include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
|
||||
#include "gc/g1/g1HotCardCache.hpp"
|
||||
#include "gc/g1/g1IHOPControl.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
@ -46,7 +47,7 @@
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/pair.hpp"
|
||||
|
||||
G1Policy::G1Policy(STWGCTimer* gc_timer) :
|
||||
G1Policy::G1Policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer) :
|
||||
_predictor(G1ConfidencePercent / 100.0),
|
||||
_analytics(new G1Analytics(&_predictor)),
|
||||
_remset_tracker(),
|
||||
@ -62,7 +63,7 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) :
|
||||
_survivor_surv_rate_group(new SurvRateGroup()),
|
||||
_reserve_factor((double) G1ReservePercent / 100.0),
|
||||
_reserve_regions(0),
|
||||
_young_gen_sizer(),
|
||||
_young_gen_sizer(G1YoungGenSizer::create_gen_sizer(policy)),
|
||||
_free_regions_at_end_of_collection(0),
|
||||
_max_rs_lengths(0),
|
||||
_rs_lengths_prediction(0),
|
||||
@ -83,6 +84,15 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) :
|
||||
|
||||
G1Policy::~G1Policy() {
|
||||
delete _ihop_control;
|
||||
delete _young_gen_sizer;
|
||||
}
|
||||
|
||||
G1Policy* G1Policy::create_policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer_stw) {
|
||||
if (policy->is_hetero_heap()) {
|
||||
return new G1HeterogeneousHeapPolicy(policy, gc_timer_stw);
|
||||
} else {
|
||||
return new G1Policy(policy, gc_timer_stw);
|
||||
}
|
||||
}
|
||||
|
||||
G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
|
||||
@ -94,9 +104,9 @@ void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
|
||||
assert(Heap_lock->owned_by_self(), "Locking discipline.");
|
||||
|
||||
if (!adaptive_young_list_length()) {
|
||||
_young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
|
||||
_young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
|
||||
}
|
||||
_young_gen_sizer.adjust_max_new_size(_g1h->max_regions());
|
||||
_young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
|
||||
|
||||
_free_regions_at_end_of_collection = _g1h->num_free_regions();
|
||||
|
||||
@ -176,7 +186,7 @@ void G1Policy::record_new_heap_size(uint new_number_of_regions) {
|
||||
// smaller than 1.0) we'll get 1.
|
||||
_reserve_regions = (uint) ceil(reserve_regions_d);
|
||||
|
||||
_young_gen_sizer.heap_size_changed(new_number_of_regions);
|
||||
_young_gen_sizer->heap_size_changed(new_number_of_regions);
|
||||
|
||||
_ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
|
||||
}
|
||||
@ -195,14 +205,14 @@ uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) con
|
||||
}
|
||||
desired_min_length += base_min_length;
|
||||
// make sure we don't go below any user-defined minimum bound
|
||||
return MAX2(_young_gen_sizer.min_desired_young_length(), desired_min_length);
|
||||
return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
|
||||
}
|
||||
|
||||
uint G1Policy::calculate_young_list_desired_max_length() const {
|
||||
// Here, we might want to also take into account any additional
|
||||
// constraints (i.e., user-defined minimum bound). Currently, we
|
||||
// effectively don't set this bound.
|
||||
return _young_gen_sizer.max_desired_young_length();
|
||||
return _young_gen_sizer->max_desired_young_length();
|
||||
}
|
||||
|
||||
uint G1Policy::update_young_list_max_and_target_length() {
|
||||
@ -218,6 +228,7 @@ uint G1Policy::update_young_list_max_and_target_length(size_t rs_lengths) {
|
||||
uint G1Policy::update_young_list_target_length(size_t rs_lengths) {
|
||||
YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
|
||||
_young_list_target_length = young_lengths.first;
|
||||
|
||||
return young_lengths.second;
|
||||
}
|
||||
|
||||
@ -900,7 +911,7 @@ bool G1Policy::can_expand_young_list() const {
|
||||
}
|
||||
|
||||
bool G1Policy::adaptive_young_list_length() const {
|
||||
return _young_gen_sizer.adaptive_young_list_length();
|
||||
return _young_gen_sizer->adaptive_young_list_length();
|
||||
}
|
||||
|
||||
size_t G1Policy::desired_survivor_size(uint max_regions) const {
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_VM_GC_G1_G1POLICY_HPP
|
||||
#define SHARE_VM_GC_G1_G1POLICY_HPP
|
||||
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1InCSetState.hpp"
|
||||
@ -91,7 +92,7 @@ class G1Policy: public CHeapObj<mtGC> {
|
||||
// for the first time during initialization.
|
||||
uint _reserve_regions;
|
||||
|
||||
G1YoungGenSizer _young_gen_sizer;
|
||||
G1YoungGenSizer* _young_gen_sizer;
|
||||
|
||||
uint _free_regions_at_end_of_collection;
|
||||
|
||||
@ -282,10 +283,12 @@ private:
|
||||
void abort_time_to_mixed_tracking();
|
||||
public:
|
||||
|
||||
G1Policy(STWGCTimer* gc_timer);
|
||||
G1Policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer);
|
||||
|
||||
virtual ~G1Policy();
|
||||
|
||||
static G1Policy* create_policy(G1CollectorPolicy* policy, STWGCTimer* gc_timer_stw);
|
||||
|
||||
G1CollectorState* collector_state() const;
|
||||
|
||||
G1GCPhaseTimes* phase_times() const { return _phase_times; }
|
||||
@ -298,7 +301,7 @@ public:
|
||||
// This should be called after the heap is resized.
|
||||
void record_new_heap_size(uint new_number_of_regions);
|
||||
|
||||
void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
|
||||
virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
|
||||
|
||||
void note_gc_start();
|
||||
|
||||
@ -308,11 +311,11 @@ public:
|
||||
|
||||
// Record the start and end of an evacuation pause.
|
||||
void record_collection_pause_start(double start_time_sec);
|
||||
void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
|
||||
virtual void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
|
||||
|
||||
// Record the start and end of a full collection.
|
||||
void record_full_collection_start();
|
||||
void record_full_collection_end();
|
||||
virtual void record_full_collection_end();
|
||||
|
||||
// Must currently be called while the world is stopped.
|
||||
void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
|
||||
@ -432,6 +435,10 @@ public:
|
||||
void update_max_gc_locker_expansion();
|
||||
|
||||
void update_survivors_policy();
|
||||
|
||||
virtual bool force_upgrade_to_full() {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1POLICY_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,11 +25,15 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1BiasedArray.hpp"
|
||||
#include "gc/g1/g1RegionToSpaceMapper.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
#include "utilities/formatBuffer.hpp"
|
||||
|
||||
G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
|
||||
size_t used_size,
|
||||
@ -170,16 +174,156 @@ void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, b
|
||||
}
|
||||
}
|
||||
|
||||
static bool map_nvdimm_space(ReservedSpace rs) {
|
||||
assert(AllocateOldGenAt != NULL, "");
|
||||
int _backing_fd = os::create_file_for_heap(AllocateOldGenAt);
|
||||
if (_backing_fd == -1) {
|
||||
log_error(gc, init)("Could not create file for Old generation at location %s", AllocateOldGenAt);
|
||||
return false;
|
||||
}
|
||||
// commit this memory in nv-dimm
|
||||
char* ret = os::attempt_reserve_memory_at(rs.size(), rs.base(), _backing_fd);
|
||||
|
||||
if (ret != rs.base()) {
|
||||
if (ret != NULL) {
|
||||
os::unmap_memory(rs.base(), rs.size());
|
||||
}
|
||||
log_error(gc, init)("Error in mapping Old Gen to given AllocateOldGenAt = %s", AllocateOldGenAt);
|
||||
os::close(_backing_fd);
|
||||
return false;
|
||||
}
|
||||
|
||||
os::close(_backing_fd);
|
||||
return true;
|
||||
}
|
||||
|
||||
G1RegionToHeteroSpaceMapper::G1RegionToHeteroSpaceMapper(ReservedSpace rs,
|
||||
size_t actual_size,
|
||||
size_t page_size,
|
||||
size_t alloc_granularity,
|
||||
size_t commit_factor,
|
||||
MemoryType type) :
|
||||
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
|
||||
_rs(rs),
|
||||
_num_committed_dram(0),
|
||||
_num_committed_nvdimm(0),
|
||||
_page_size(page_size),
|
||||
_commit_factor(commit_factor),
|
||||
_type(type) {
|
||||
assert(actual_size == 2 * MaxHeapSize, "For 2-way heterogenuous heap, reserved space is two times MaxHeapSize");
|
||||
}
|
||||
|
||||
bool G1RegionToHeteroSpaceMapper::initialize() {
|
||||
// Since we need to re-map the reserved space - 'Xmx' to nv-dimm and 'Xmx' to dram, we need to release the reserved memory first.
|
||||
// Because on some OSes (e.g. Windows) you cannot do a file mapping on memory reserved with regular mapping.
|
||||
os::release_memory(_rs.base(), _rs.size());
|
||||
// First half of size Xmx is for nv-dimm.
|
||||
ReservedSpace rs_nvdimm = _rs.first_part(MaxHeapSize);
|
||||
assert(rs_nvdimm.base() == _rs.base(), "We should get the same base address");
|
||||
|
||||
// Second half of reserved memory is mapped to dram.
|
||||
ReservedSpace rs_dram = _rs.last_part(MaxHeapSize);
|
||||
|
||||
assert(rs_dram.size() == rs_nvdimm.size() && rs_nvdimm.size() == MaxHeapSize, "They all should be same");
|
||||
|
||||
// Reserve dram memory
|
||||
char* base = os::attempt_reserve_memory_at(rs_dram.size(), rs_dram.base());
|
||||
if (base != rs_dram.base()) {
|
||||
if (base != NULL) {
|
||||
os::release_memory(base, rs_dram.size());
|
||||
}
|
||||
log_error(gc, init)("Error in re-mapping memory on dram during G1 heterogenous memory initialization");
|
||||
return false;
|
||||
}
|
||||
|
||||
// We reserve and commit this entire space to NV-DIMM.
|
||||
if (!map_nvdimm_space(rs_nvdimm)) {
|
||||
log_error(gc, init)("Error in re-mapping memory to nv-dimm during G1 heterogenous memory initialization");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_region_granularity >= (_page_size * _commit_factor)) {
|
||||
_dram_mapper = new G1RegionsLargerThanCommitSizeMapper(rs_dram, rs_dram.size(), _page_size, _region_granularity, _commit_factor, _type);
|
||||
} else {
|
||||
_dram_mapper = new G1RegionsSmallerThanCommitSizeMapper(rs_dram, rs_dram.size(), _page_size, _region_granularity, _commit_factor, _type);
|
||||
}
|
||||
|
||||
_start_index_of_nvdimm = 0;
|
||||
_start_index_of_dram = (uint)(rs_nvdimm.size() / _region_granularity);
|
||||
return true;
|
||||
}
|
||||
|
||||
void G1RegionToHeteroSpaceMapper::commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
|
||||
uint end_idx = (start_idx + (uint)num_regions - 1);
|
||||
|
||||
uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0;
|
||||
uint num_nvdimm = (uint)num_regions - num_dram;
|
||||
|
||||
if (num_nvdimm > 0) {
|
||||
// We do not need to commit nv-dimm regions, since they are committed in the beginning.
|
||||
_num_committed_nvdimm += num_nvdimm;
|
||||
}
|
||||
if (num_dram > 0) {
|
||||
_dram_mapper->commit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram, pretouch_gang);
|
||||
_num_committed_dram += num_dram;
|
||||
}
|
||||
}
|
||||
|
||||
void G1RegionToHeteroSpaceMapper::uncommit_regions(uint start_idx, size_t num_regions) {
|
||||
uint end_idx = (start_idx + (uint)num_regions - 1);
|
||||
uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0;
|
||||
uint num_nvdimm = (uint)num_regions - num_dram;
|
||||
|
||||
if (num_nvdimm > 0) {
|
||||
// We do not uncommit memory for nv-dimm regions.
|
||||
_num_committed_nvdimm -= num_nvdimm;
|
||||
}
|
||||
|
||||
if (num_dram > 0) {
|
||||
_dram_mapper->uncommit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram);
|
||||
_num_committed_dram -= num_dram;
|
||||
}
|
||||
}
|
||||
|
||||
uint G1RegionToHeteroSpaceMapper::num_committed_dram() const {
|
||||
return _num_committed_dram;
|
||||
}
|
||||
|
||||
uint G1RegionToHeteroSpaceMapper::num_committed_nvdimm() const {
|
||||
return _num_committed_nvdimm;
|
||||
}
|
||||
|
||||
G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_heap_mapper(ReservedSpace rs,
|
||||
size_t actual_size,
|
||||
size_t page_size,
|
||||
size_t region_granularity,
|
||||
size_t commit_factor,
|
||||
MemoryType type) {
|
||||
if (AllocateOldGenAt != NULL) {
|
||||
G1RegionToHeteroSpaceMapper* mapper = new G1RegionToHeteroSpaceMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
|
||||
if (!mapper->initialize()) {
|
||||
delete mapper;
|
||||
return NULL;
|
||||
}
|
||||
return (G1RegionToSpaceMapper*)mapper;
|
||||
} else {
|
||||
return create_mapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
|
||||
}
|
||||
}
|
||||
|
||||
G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
|
||||
size_t actual_size,
|
||||
size_t page_size,
|
||||
size_t region_granularity,
|
||||
size_t commit_factor,
|
||||
MemoryType type) {
|
||||
|
||||
if (region_granularity >= (page_size * commit_factor)) {
|
||||
return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
|
||||
} else {
|
||||
return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
|
||||
}
|
||||
}
|
||||
|
||||
void G1RegionToSpaceMapper::commit_and_set_special() {
|
||||
_storage.commit_and_set_special();
|
||||
}
|
||||
|
@ -70,6 +70,7 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
|
||||
return _commit_map.at(idx);
|
||||
}
|
||||
|
||||
void commit_and_set_special();
|
||||
virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL) = 0;
|
||||
virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0;
|
||||
|
||||
@ -87,6 +88,37 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
|
||||
size_t region_granularity,
|
||||
size_t byte_translation_factor,
|
||||
MemoryType type);
|
||||
|
||||
static G1RegionToSpaceMapper* create_heap_mapper(ReservedSpace rs,
|
||||
size_t actual_size,
|
||||
size_t page_size,
|
||||
size_t region_granularity,
|
||||
size_t byte_translation_factor,
|
||||
MemoryType type);
|
||||
};
|
||||
|
||||
// G1RegionToSpaceMapper implementation where
|
||||
// part of space is mapped to dram and part to nv-dimm
|
||||
class G1RegionToHeteroSpaceMapper : public G1RegionToSpaceMapper {
|
||||
private:
|
||||
size_t _pages_per_region;
|
||||
ReservedSpace _rs;
|
||||
G1RegionToSpaceMapper* _dram_mapper;
|
||||
uint _num_committed_dram;
|
||||
uint _num_committed_nvdimm;
|
||||
uint _start_index_of_nvdimm;
|
||||
uint _start_index_of_dram;
|
||||
size_t _page_size;
|
||||
size_t _commit_factor;
|
||||
MemoryType _type;
|
||||
|
||||
public:
|
||||
G1RegionToHeteroSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type);
|
||||
bool initialize();
|
||||
uint num_committed_dram() const;
|
||||
uint num_committed_nvdimm() const;
|
||||
|
||||
virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL);
|
||||
virtual void uncommit_regions(uint start_idx, size_t num_regions = 1);
|
||||
};
|
||||
#endif // SHARE_VM_GC_G1_G1REGIONTOSPACEMAPPER_HPP
|
||||
|
@ -138,8 +138,8 @@ void VM_G1CollectForAllocation::doit() {
|
||||
// kind of GC.
|
||||
_result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
|
||||
} else {
|
||||
bool should_upgrade_to_full = !g1h->should_do_concurrent_full_gc(_gc_cause) &&
|
||||
!g1h->has_regions_left_for_allocation();
|
||||
bool should_upgrade_to_full = g1h->should_upgrade_to_full_gc(_gc_cause);
|
||||
|
||||
if (should_upgrade_to_full) {
|
||||
// There has been a request to perform a GC to free some space. We have no
|
||||
// information on how much memory has been asked for. In case there are
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,12 +23,14 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
|
||||
#include "gc/g1/g1YoungGenSizer.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "logging/log.hpp"
|
||||
|
||||
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
|
||||
_min_desired_young_length(0), _max_desired_young_length(0), _adaptive_size(true) {
|
||||
_adaptive_size(true), _min_desired_young_length(0), _max_desired_young_length(0) {
|
||||
|
||||
if (FLAG_IS_CMDLINE(NewRatio)) {
|
||||
if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
@ -127,3 +129,11 @@ void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
|
||||
recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length,
|
||||
&_max_desired_young_length);
|
||||
}
|
||||
|
||||
G1YoungGenSizer* G1YoungGenSizer::create_gen_sizer(G1CollectorPolicy* policy) {
|
||||
if (policy->is_hetero_heap()) {
|
||||
return new G1HeterogeneousHeapYoungGenSizer();
|
||||
} else {
|
||||
return new G1YoungGenSizer();
|
||||
}
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
|
||||
#define SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
|
||||
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// There are three command line options related to the young gen size:
|
||||
@ -63,7 +64,7 @@
|
||||
//
|
||||
// NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
|
||||
// combined with either NewSize or MaxNewSize. (A warning message is printed.)
|
||||
class G1YoungGenSizer {
|
||||
class G1YoungGenSizer : public CHeapObj<mtGC> {
|
||||
private:
|
||||
enum SizerKind {
|
||||
SizerDefaults,
|
||||
@ -73,8 +74,6 @@ private:
|
||||
SizerNewRatio
|
||||
};
|
||||
SizerKind _sizer_kind;
|
||||
uint _min_desired_young_length;
|
||||
uint _max_desired_young_length;
|
||||
|
||||
// False when using a fixed young generation size due to command-line options,
|
||||
// true otherwise.
|
||||
@ -87,13 +86,17 @@ private:
|
||||
// given the number of heap regions depending on the kind of sizing algorithm.
|
||||
void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
|
||||
|
||||
protected:
|
||||
uint _min_desired_young_length;
|
||||
uint _max_desired_young_length;
|
||||
|
||||
public:
|
||||
G1YoungGenSizer();
|
||||
// Calculate the maximum length of the young gen given the number of regions
|
||||
// depending on the sizing algorithm.
|
||||
void adjust_max_new_size(uint number_of_heap_regions);
|
||||
virtual void adjust_max_new_size(uint number_of_heap_regions);
|
||||
|
||||
void heap_size_changed(uint new_number_of_heap_regions);
|
||||
virtual void heap_size_changed(uint new_number_of_heap_regions);
|
||||
uint min_desired_young_length() const {
|
||||
return _min_desired_young_length;
|
||||
}
|
||||
@ -104,6 +107,8 @@ public:
|
||||
bool adaptive_young_list_length() const {
|
||||
return _adaptive_size;
|
||||
}
|
||||
|
||||
static G1YoungGenSizer* create_gen_sizer(G1CollectorPolicy* policy);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
|
||||
|
@ -71,9 +71,9 @@ bool G1YoungRemSetSamplingThread::should_start_periodic_gc() {
|
||||
|
||||
// Check if load is lower than max.
|
||||
double recent_load;
|
||||
if ((G1PeriodicGCSystemLoadThreshold > 0) &&
|
||||
if ((G1PeriodicGCSystemLoadThreshold > 0.0f) &&
|
||||
(os::loadavg(&recent_load, 1) == -1 || recent_load > G1PeriodicGCSystemLoadThreshold)) {
|
||||
log_debug(gc, periodic)("Load %1.2f is higher than threshold " UINTX_FORMAT ". Skipping.",
|
||||
log_debug(gc, periodic)("Load %1.2f is higher than threshold %1.2f. Skipping.",
|
||||
recent_load, G1PeriodicGCSystemLoadThreshold);
|
||||
return false;
|
||||
}
|
||||
|
@ -311,10 +311,21 @@
|
||||
"perform a concurrent GC as periodic GC, otherwise use a STW " \
|
||||
"Full GC.") \
|
||||
\
|
||||
manageable(uintx, G1PeriodicGCSystemLoadThreshold, 0, \
|
||||
"Maximum recent system wide system load as returned by the 1m " \
|
||||
"value of getloadavg() at which G1 triggers a periodic GC. A " \
|
||||
"load above this value cancels a given periodic GC. A value of " \
|
||||
"zero disables this check.") \
|
||||
manageable(double, G1PeriodicGCSystemLoadThreshold, 0.0, \
|
||||
"Maximum recent system wide load as returned by the 1m value " \
|
||||
"of getloadavg() at which G1 triggers a periodic GC. A load " \
|
||||
"above this value cancels a given periodic GC. A value of zero " \
|
||||
"disables this check.") \
|
||||
range(0.0, (double)max_uintx) \
|
||||
\
|
||||
experimental(uintx, G1YoungExpansionBufferPercent, 10, \
|
||||
"When heterogenous heap is enabled by AllocateOldGenAt " \
|
||||
"option, after every GC, young gen is re-sized which " \
|
||||
"involves system calls to commit/uncommit memory. To " \
|
||||
"reduce these calls, we keep a buffer of extra regions to " \
|
||||
"absorb small changes in young gen length. This flag takes " \
|
||||
"the buffer size as an percentage of young gen length") \
|
||||
range(0, 100) \
|
||||
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1_GLOBALS_HPP
|
||||
|
@ -28,6 +28,8 @@
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc/g1/heapRegionSet.inline.hpp"
|
||||
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
|
||||
#include "gc/shared/collectorPolicy.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
@ -54,18 +56,25 @@ public:
|
||||
};
|
||||
|
||||
HeapRegionManager::HeapRegionManager() :
|
||||
_regions(), _heap_mapper(NULL),
|
||||
_prev_bitmap_mapper(NULL),
|
||||
_next_bitmap_mapper(NULL),
|
||||
_bot_mapper(NULL),
|
||||
_cardtable_mapper(NULL),
|
||||
_card_counts_mapper(NULL),
|
||||
_free_list("Free list", new MasterFreeRegionListChecker()),
|
||||
_available_map(mtGC),
|
||||
_num_committed(0),
|
||||
_allocated_heapregions_length(0)
|
||||
_allocated_heapregions_length(0),
|
||||
_regions(), _heap_mapper(NULL),
|
||||
_prev_bitmap_mapper(NULL),
|
||||
_next_bitmap_mapper(NULL),
|
||||
_free_list("Free list", new MasterFreeRegionListChecker())
|
||||
{ }
|
||||
|
||||
HeapRegionManager* HeapRegionManager::create_manager(G1CollectedHeap* heap, G1CollectorPolicy* policy) {
|
||||
if (policy->is_hetero_heap()) {
|
||||
return new HeterogeneousHeapRegionManager((uint)(policy->max_heap_byte_size() / HeapRegion::GrainBytes) /*heap size as num of regions*/);
|
||||
}
|
||||
return new HeapRegionManager();
|
||||
}
|
||||
|
||||
void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
G1RegionToSpaceMapper* prev_bitmap,
|
||||
G1RegionToSpaceMapper* next_bitmap,
|
||||
@ -514,7 +523,7 @@ void HeapRegionManager::verify_optional() {
|
||||
#endif // PRODUCT
|
||||
|
||||
HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
|
||||
_n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
|
||||
_n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm->_allocated_heapregions_length), _claims(NULL) {
|
||||
assert(n_workers > 0, "Need at least one worker.");
|
||||
uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
|
||||
memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions);
|
||||
|
@ -26,8 +26,10 @@
|
||||
#define SHARE_VM_GC_G1_HEAPREGIONMANAGER_HPP
|
||||
|
||||
#include "gc/g1/g1BiasedArray.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1RegionToSpaceMapper.hpp"
|
||||
#include "gc/g1/heapRegionSet.hpp"
|
||||
#include "gc/shared/collectorPolicy.hpp"
|
||||
#include "services/memoryUsage.hpp"
|
||||
|
||||
class HeapRegion;
|
||||
@ -71,17 +73,10 @@ class HeapRegionManager: public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
friend class HeapRegionClaimer;
|
||||
|
||||
G1HeapRegionTable _regions;
|
||||
|
||||
G1RegionToSpaceMapper* _heap_mapper;
|
||||
G1RegionToSpaceMapper* _prev_bitmap_mapper;
|
||||
G1RegionToSpaceMapper* _next_bitmap_mapper;
|
||||
G1RegionToSpaceMapper* _bot_mapper;
|
||||
G1RegionToSpaceMapper* _cardtable_mapper;
|
||||
G1RegionToSpaceMapper* _card_counts_mapper;
|
||||
|
||||
FreeRegionList _free_list;
|
||||
|
||||
// Each bit in this bitmap indicates that the corresponding region is available
|
||||
// for allocation.
|
||||
CHeapBitMap _available_map;
|
||||
@ -95,11 +90,8 @@ class HeapRegionManager: public CHeapObj<mtGC> {
|
||||
HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
|
||||
HeapWord* heap_end() const {return _regions.end_address_mapped(); }
|
||||
|
||||
void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL);
|
||||
|
||||
// Pass down commit calls to the VirtualSpace.
|
||||
void commit_regions(uint index, size_t num_regions = 1, WorkGang* pretouch_gang = NULL);
|
||||
void uncommit_regions(uint index, size_t num_regions = 1);
|
||||
|
||||
// Notify other data structures about change in the heap layout.
|
||||
void update_committed_space(HeapWord* old_end, HeapWord* new_end);
|
||||
@ -117,6 +109,16 @@ class HeapRegionManager: public CHeapObj<mtGC> {
|
||||
// the heap. Returns the length of the sequence found. If this value is zero, no
|
||||
// sequence could be found, otherwise res_idx contains the start index of this range.
|
||||
uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
|
||||
|
||||
protected:
|
||||
G1HeapRegionTable _regions;
|
||||
G1RegionToSpaceMapper* _heap_mapper;
|
||||
G1RegionToSpaceMapper* _prev_bitmap_mapper;
|
||||
G1RegionToSpaceMapper* _next_bitmap_mapper;
|
||||
FreeRegionList _free_list;
|
||||
|
||||
void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL);
|
||||
void uncommit_regions(uint index, size_t num_regions = 1);
|
||||
// Allocate a new HeapRegion for the given index.
|
||||
HeapRegion* new_heap_region(uint hrm_index);
|
||||
#ifdef ASSERT
|
||||
@ -127,18 +129,25 @@ public:
|
||||
// Empty constructor, we'll initialize it with the initialize() method.
|
||||
HeapRegionManager();
|
||||
|
||||
void initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
G1RegionToSpaceMapper* prev_bitmap,
|
||||
G1RegionToSpaceMapper* next_bitmap,
|
||||
G1RegionToSpaceMapper* bot,
|
||||
G1RegionToSpaceMapper* cardtable,
|
||||
G1RegionToSpaceMapper* card_counts);
|
||||
static HeapRegionManager* create_manager(G1CollectedHeap* heap, G1CollectorPolicy* policy);
|
||||
|
||||
virtual void initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
G1RegionToSpaceMapper* prev_bitmap,
|
||||
G1RegionToSpaceMapper* next_bitmap,
|
||||
G1RegionToSpaceMapper* bot,
|
||||
G1RegionToSpaceMapper* cardtable,
|
||||
G1RegionToSpaceMapper* card_counts);
|
||||
|
||||
// Prepare heap regions before and after full collection.
|
||||
// Nothing to be done in this class.
|
||||
virtual void prepare_for_full_collection_start() {}
|
||||
virtual void prepare_for_full_collection_end() {}
|
||||
|
||||
// Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
|
||||
// new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
|
||||
// the heap from the lowest address, this region (and its associated data
|
||||
// structures) are available and we do not need to check further.
|
||||
HeapRegion* get_dummy_region() { return new_heap_region(0); }
|
||||
virtual HeapRegion* get_dummy_region() { return new_heap_region(0); }
|
||||
|
||||
// Return the HeapRegion at the given index. Assume that the index
|
||||
// is valid.
|
||||
@ -167,8 +176,8 @@ public:
|
||||
_free_list.add_ordered(list);
|
||||
}
|
||||
|
||||
HeapRegion* allocate_free_region(bool is_old) {
|
||||
HeapRegion* hr = _free_list.remove_region(is_old);
|
||||
virtual HeapRegion* allocate_free_region(HeapRegionType type) {
|
||||
HeapRegion* hr = _free_list.remove_region(!type.is_young());
|
||||
|
||||
if (hr != NULL) {
|
||||
assert(hr->next() == NULL, "Single region should not have next");
|
||||
@ -202,6 +211,9 @@ public:
|
||||
// Return the maximum number of regions in the heap.
|
||||
uint max_length() const { return (uint)_regions.length(); }
|
||||
|
||||
// Return maximum number of regions that heap can expand to.
|
||||
virtual uint max_expandable_length() const { return (uint)_regions.length(); }
|
||||
|
||||
MemoryUsage get_auxiliary_data_memory_usage() const;
|
||||
|
||||
MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
|
||||
@ -210,26 +222,26 @@ public:
|
||||
// HeapRegions, or re-use existing ones. Returns the number of regions the
|
||||
// sequence was expanded by. If a HeapRegion allocation fails, the resulting
|
||||
// number of regions might be smaller than what's desired.
|
||||
uint expand_by(uint num_regions, WorkGang* pretouch_workers);
|
||||
virtual uint expand_by(uint num_regions, WorkGang* pretouch_workers);
|
||||
|
||||
// Makes sure that the regions from start to start+num_regions-1 are available
|
||||
// for allocation. Returns the number of regions that were committed to achieve
|
||||
// this.
|
||||
uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
|
||||
virtual uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
|
||||
|
||||
// Find a contiguous set of empty regions of length num. Returns the start index of
|
||||
// that set, or G1_NO_HRM_INDEX.
|
||||
uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
|
||||
virtual uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
|
||||
// Find a contiguous set of empty or unavailable regions of length num. Returns the
|
||||
// start index of that set, or G1_NO_HRM_INDEX.
|
||||
uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
|
||||
virtual uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
|
||||
|
||||
HeapRegion* next_region_in_heap(const HeapRegion* r) const;
|
||||
|
||||
// Find the highest free or uncommitted region in the reserved heap,
|
||||
// and if uncommitted, commit it. If none are available, return G1_NO_HRM_INDEX.
|
||||
// Set the 'expanded' boolean true if a new region was committed.
|
||||
uint find_highest_free(bool* expanded);
|
||||
virtual uint find_highest_free(bool* expanded);
|
||||
|
||||
// Allocate the regions that contain the address range specified, committing the
|
||||
// regions if necessary. Return false if any of the regions is already committed
|
||||
@ -244,13 +256,13 @@ public:
|
||||
|
||||
// Uncommit up to num_regions_to_remove regions that are completely free.
|
||||
// Return the actual number of uncommitted regions.
|
||||
uint shrink_by(uint num_regions_to_remove);
|
||||
virtual uint shrink_by(uint num_regions_to_remove);
|
||||
|
||||
// Uncommit a number of regions starting at the specified index, which must be available,
|
||||
// empty, and free.
|
||||
void shrink_at(uint index, size_t num_regions);
|
||||
|
||||
void verify();
|
||||
virtual void verify();
|
||||
|
||||
// Do some sanity checking.
|
||||
void verify_optional() PRODUCT_RETURN;
|
||||
|
@ -234,6 +234,21 @@ void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {
|
||||
verify_optional();
|
||||
}
|
||||
|
||||
uint FreeRegionList::num_of_regions_in_range(uint start, uint end) const {
|
||||
HeapRegion* cur = _head;
|
||||
uint num = 0;
|
||||
while (cur != NULL) {
|
||||
uint index = cur->hrm_index();
|
||||
if (index > end) {
|
||||
break;
|
||||
} else if (index >= start) {
|
||||
num++;
|
||||
}
|
||||
cur = cur->next();
|
||||
}
|
||||
return num;
|
||||
}
|
||||
|
||||
void FreeRegionList::verify() {
|
||||
// See comment in HeapRegionSetBase::verify() about MT safety and
|
||||
// verification.
|
||||
|
@ -194,6 +194,8 @@ public:
|
||||
void remove_starting_at(HeapRegion* first, uint num_regions);
|
||||
|
||||
virtual void verify();
|
||||
|
||||
uint num_of_regions_in_range(uint start, uint end) const;
|
||||
};
|
||||
|
||||
// Iterator class that provides a convenient way to iterate over the
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,6 +26,11 @@
|
||||
#include "gc/g1/g1HeapRegionTraceType.hpp"
|
||||
#include "gc/g1/heapRegionType.hpp"
|
||||
|
||||
const HeapRegionType HeapRegionType::Eden = HeapRegionType(EdenTag);
|
||||
const HeapRegionType HeapRegionType::Survivor = HeapRegionType(SurvTag);
|
||||
const HeapRegionType HeapRegionType::Old = HeapRegionType(OldTag);
|
||||
const HeapRegionType HeapRegionType::Humongous = HeapRegionType(StartsHumongousTag);
|
||||
|
||||
bool HeapRegionType::is_valid(Tag tag) {
|
||||
switch (tag) {
|
||||
case FreeTag:
|
||||
|
@ -117,6 +117,9 @@ private:
|
||||
_tag = tag;
|
||||
}
|
||||
|
||||
// Private constructor used static constants
|
||||
HeapRegionType(Tag t) : _tag(t) { hrt_assert_is_valid(_tag); }
|
||||
|
||||
public:
|
||||
// Queries
|
||||
|
||||
@ -186,6 +189,11 @@ public:
|
||||
G1HeapRegionTraceType::Type get_trace_type();
|
||||
|
||||
HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); }
|
||||
|
||||
static const HeapRegionType Eden;
|
||||
static const HeapRegionType Survivor;
|
||||
static const HeapRegionType Old;
|
||||
static const HeapRegionType Humongous;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_HEAPREGIONTYPE_HPP
|
||||
|
523
src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.cpp
Normal file
523
src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.cpp
Normal file
@ -0,0 +1,523 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefine.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc/g1/heapRegionSet.inline.hpp"
|
||||
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
|
||||
HeterogeneousHeapRegionManager* HeterogeneousHeapRegionManager::manager() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
assert(g1h != NULL, "Uninitialized access to HeterogeneousHeapRegionManager::manager()");
|
||||
|
||||
HeapRegionManager* hrm = g1h->hrm();
|
||||
assert(hrm != NULL, "Uninitialized access to HeterogeneousHeapRegionManager::manager()");
|
||||
return (HeterogeneousHeapRegionManager*)hrm;
|
||||
}
|
||||
|
||||
void HeterogeneousHeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
G1RegionToSpaceMapper* prev_bitmap,
|
||||
G1RegionToSpaceMapper* next_bitmap,
|
||||
G1RegionToSpaceMapper* bot,
|
||||
G1RegionToSpaceMapper* cardtable,
|
||||
G1RegionToSpaceMapper* card_counts) {
|
||||
HeapRegionManager::initialize(heap_storage, prev_bitmap, next_bitmap, bot, cardtable, card_counts);
|
||||
|
||||
// We commit bitmap for all regions during initialization and mark the bitmap space as special.
|
||||
// This allows regions to be un-committed while concurrent-marking threads are accessing the bitmap concurrently.
|
||||
_prev_bitmap_mapper->commit_and_set_special();
|
||||
_next_bitmap_mapper->commit_and_set_special();
|
||||
}
|
||||
|
||||
// expand_by() is called to grow the heap. We grow into nvdimm now.
|
||||
// Dram regions are committed later as needed during mutator region allocation or
|
||||
// when young list target length is determined after gc cycle.
|
||||
uint HeterogeneousHeapRegionManager::expand_by(uint num_regions, WorkGang* pretouch_workers) {
|
||||
uint num_regions_possible = total_regions_committed() >= max_expandable_length() ? 0 : max_expandable_length() - total_regions_committed();
|
||||
uint num_expanded = expand_nvdimm(MIN2(num_regions, num_regions_possible), pretouch_workers);
|
||||
return num_expanded;
|
||||
}
|
||||
|
||||
// Expands heap starting from 'start' index. The question is should we expand from one memory (e.g. nvdimm) to another (e.g. dram).
|
||||
// Looking at the code, expand_at() is called for humongous allocation where 'start' is in nv-dimm.
|
||||
// So we only allocate regions in the same kind of memory as 'start'.
|
||||
uint HeterogeneousHeapRegionManager::expand_at(uint start, uint num_regions, WorkGang* pretouch_workers) {
|
||||
if (num_regions == 0) {
|
||||
return 0;
|
||||
}
|
||||
uint target_num_regions = MIN2(num_regions, max_expandable_length() - total_regions_committed());
|
||||
uint end = is_in_nvdimm(start) ? end_index_of_nvdimm() : end_index_of_dram();
|
||||
|
||||
uint num_expanded = expand_in_range(start, end, target_num_regions, pretouch_workers);
|
||||
assert(total_regions_committed() <= max_expandable_length(), "must be");
|
||||
return num_expanded;
|
||||
}
|
||||
|
||||
// This function ensures that there are 'expected_num_regions' committed regions in dram.
|
||||
// If new regions are committed, it un-commits that many regions from nv-dimm.
|
||||
// If there are already more regions committed in dram, extra regions are un-committed.
|
||||
void HeterogeneousHeapRegionManager::adjust_dram_regions(uint expected_num_regions, WorkGang* pretouch_workers) {
|
||||
|
||||
// Release back the extra regions allocated in evacuation failure scenario.
|
||||
if(_no_borrowed_regions > 0) {
|
||||
_no_borrowed_regions -= shrink_dram(_no_borrowed_regions);
|
||||
_no_borrowed_regions -= shrink_nvdimm(_no_borrowed_regions);
|
||||
}
|
||||
|
||||
if(expected_num_regions > free_list_dram_length()) {
|
||||
// If we are going to expand DRAM, we expand a little more so that we can absorb small variations in Young gen sizing.
|
||||
uint targeted_dram_regions = expected_num_regions * (1 + (double)G1YoungExpansionBufferPercent / 100);
|
||||
uint to_be_made_available = targeted_dram_regions - free_list_dram_length();
|
||||
|
||||
#ifdef ASSERT
|
||||
uint total_committed_before = total_regions_committed();
|
||||
#endif
|
||||
uint can_be_made_available = shrink_nvdimm(to_be_made_available);
|
||||
uint ret = expand_dram(can_be_made_available, pretouch_workers);
|
||||
#ifdef ASSERT
|
||||
assert(ret == can_be_made_available, "should be equal");
|
||||
assert(total_committed_before == total_regions_committed(), "invariant not met");
|
||||
#endif
|
||||
} else {
|
||||
uint to_be_released = free_list_dram_length() - expected_num_regions;
|
||||
// if number of extra DRAM regions is small, do not shrink.
|
||||
if (to_be_released < expected_num_regions * G1YoungExpansionBufferPercent / 100) {
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
uint total_committed_before = total_regions_committed();
|
||||
#endif
|
||||
uint ret = shrink_dram(to_be_released);
|
||||
assert(ret == to_be_released, "Should be able to shrink by given amount");
|
||||
ret = expand_nvdimm(to_be_released, pretouch_workers);
|
||||
#ifdef ASSERT
|
||||
assert(ret == to_be_released, "Should be able to expand by given amount");
|
||||
assert(total_committed_before == total_regions_committed(), "invariant not met");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::total_regions_committed() const {
|
||||
return num_committed_dram() + num_committed_nvdimm();
|
||||
}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::num_committed_dram() const {
|
||||
// This class does not keep count of committed regions in dram and nv-dimm.
|
||||
// G1RegionToHeteroSpaceMapper keeps this information.
|
||||
return static_cast<G1RegionToHeteroSpaceMapper*>(_heap_mapper)->num_committed_dram();
|
||||
}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::num_committed_nvdimm() const {
|
||||
// See comment for num_committed_dram()
|
||||
return static_cast<G1RegionToHeteroSpaceMapper*>(_heap_mapper)->num_committed_nvdimm();
|
||||
}
|
||||
|
||||
// Return maximum number of regions that heap can expand to.
|
||||
uint HeterogeneousHeapRegionManager::max_expandable_length() const {
|
||||
return _max_regions;
|
||||
}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::find_unavailable_in_range(uint start_idx, uint end_idx, uint* res_idx) const {
|
||||
guarantee(res_idx != NULL, "checking");
|
||||
guarantee(start_idx <= (max_length() + 1), "checking");
|
||||
|
||||
uint num_regions = 0;
|
||||
|
||||
uint cur = start_idx;
|
||||
while (cur <= end_idx && is_available(cur)) {
|
||||
cur++;
|
||||
}
|
||||
if (cur == end_idx + 1) {
|
||||
return num_regions;
|
||||
}
|
||||
*res_idx = cur;
|
||||
while (cur <= end_idx && !is_available(cur)) {
|
||||
cur++;
|
||||
}
|
||||
num_regions = cur - *res_idx;
|
||||
|
||||
#ifdef ASSERT
|
||||
for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
|
||||
assert(!is_available(i), "just checking");
|
||||
}
|
||||
assert(cur == end_idx + 1 || num_regions == 0 || is_available(cur),
|
||||
"The region at the current position %u must be available or at the end", cur);
|
||||
#endif
|
||||
return num_regions;
|
||||
}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::expand_dram(uint num_regions, WorkGang* pretouch_workers) {
|
||||
return expand_in_range(start_index_of_dram(), end_index_of_dram(), num_regions, pretouch_workers);
|
||||
}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::expand_nvdimm(uint num_regions, WorkGang* pretouch_workers) {
|
||||
return expand_in_range(start_index_of_nvdimm(), end_index_of_nvdimm(), num_regions, pretouch_workers);
|
||||
}
|
||||
|
||||
// Follows same logic as expand_at() form HeapRegionManager.
|
||||
uint HeterogeneousHeapRegionManager::expand_in_range(uint start, uint end, uint num_regions, WorkGang* pretouch_gang) {
|
||||
|
||||
uint so_far = 0;
|
||||
uint chunk_start = 0;
|
||||
uint num_last_found = 0;
|
||||
while (so_far < num_regions &&
|
||||
(num_last_found = find_unavailable_in_range(start, end, &chunk_start)) > 0) {
|
||||
uint to_commit = MIN2(num_regions - so_far, num_last_found);
|
||||
make_regions_available(chunk_start, to_commit, pretouch_gang);
|
||||
so_far += to_commit;
|
||||
start = chunk_start + to_commit + 1;
|
||||
}
|
||||
|
||||
return so_far;
|
||||
}
|
||||
|
||||
// Shrink in the range of indexes which are reserved for dram.
|
||||
uint HeterogeneousHeapRegionManager::shrink_dram(uint num_regions, bool update_free_list) {
|
||||
return shrink_in_range(start_index_of_dram(), end_index_of_dram(), num_regions, update_free_list);
|
||||
}
|
||||
|
||||
// Shrink in the range of indexes which are reserved for nv-dimm.
|
||||
uint HeterogeneousHeapRegionManager::shrink_nvdimm(uint num_regions, bool update_free_list) {
|
||||
return shrink_in_range(start_index_of_nvdimm(), end_index_of_nvdimm(), num_regions, update_free_list);
|
||||
}
|
||||
|
||||
// Find empty regions in given range, un-commit them and return the count.
|
||||
uint HeterogeneousHeapRegionManager::shrink_in_range(uint start, uint end, uint num_regions, bool update_free_list) {
|
||||
|
||||
if (num_regions == 0) {
|
||||
return 0;
|
||||
}
|
||||
uint so_far = 0;
|
||||
uint idx_last_found = 0;
|
||||
uint num_last_found;
|
||||
while (so_far < num_regions &&
|
||||
(num_last_found = find_empty_in_range_reverse(start, end, &idx_last_found)) > 0) {
|
||||
uint to_uncommit = MIN2(num_regions - so_far, num_last_found);
|
||||
if(update_free_list) {
|
||||
_free_list.remove_starting_at(at(idx_last_found + num_last_found - to_uncommit), to_uncommit);
|
||||
}
|
||||
uncommit_regions(idx_last_found + num_last_found - to_uncommit, to_uncommit);
|
||||
so_far += to_uncommit;
|
||||
end = idx_last_found;
|
||||
}
|
||||
return so_far;
|
||||
}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::find_empty_in_range_reverse(uint start_idx, uint end_idx, uint* res_idx) {
|
||||
guarantee(res_idx != NULL, "checking");
|
||||
guarantee(start_idx < max_length(), "checking");
|
||||
guarantee(end_idx < max_length(), "checking");
|
||||
if(start_idx > end_idx) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint num_regions_found = 0;
|
||||
|
||||
jlong cur = end_idx;
|
||||
while (cur >= start_idx && !(is_available(cur) && at(cur)->is_empty())) {
|
||||
cur--;
|
||||
}
|
||||
if (cur == start_idx - 1) {
|
||||
return num_regions_found;
|
||||
}
|
||||
jlong old_cur = cur;
|
||||
// cur indexes the first empty region
|
||||
while (cur >= start_idx && is_available(cur) && at(cur)->is_empty()) {
|
||||
cur--;
|
||||
}
|
||||
*res_idx = cur + 1;
|
||||
num_regions_found = old_cur - cur;
|
||||
|
||||
#ifdef ASSERT
|
||||
for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
|
||||
assert(at(i)->is_empty(), "just checking");
|
||||
}
|
||||
#endif
|
||||
return num_regions_found;
|
||||
}
|
||||
|
||||
HeapRegion* HeterogeneousHeapRegionManager::allocate_free_region(HeapRegionType type) {
|
||||
|
||||
// We want to prevent mutators from proceeding when we have borrowed regions from the last collection. This
|
||||
// will force a full collection to remedy the situation.
|
||||
// Free region requests from GC threads can proceed.
|
||||
if(type.is_eden() || type.is_humongous()) {
|
||||
if(has_borrowed_regions()) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// old and humongous regions are allocated from nv-dimm; eden and survivor regions are allocated from dram
|
||||
// assumption: dram regions take higher indexes
|
||||
bool from_nvdimm = (type.is_old() || type.is_humongous()) ? true : false;
|
||||
bool from_head = from_nvdimm;
|
||||
HeapRegion* hr = _free_list.remove_region(from_head);
|
||||
|
||||
if (hr != NULL && ( (from_nvdimm && !is_in_nvdimm(hr->hrm_index())) || (!from_nvdimm && !is_in_dram(hr->hrm_index())) ) ) {
|
||||
_free_list.add_ordered(hr);
|
||||
hr = NULL;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
uint total_committed_before = total_regions_committed();
|
||||
#endif
|
||||
|
||||
if (hr == NULL) {
|
||||
if (!from_nvdimm) {
|
||||
uint ret = shrink_nvdimm(1);
|
||||
if (ret == 1) {
|
||||
ret = expand_dram(1, NULL);
|
||||
assert(ret == 1, "We should be able to commit one region");
|
||||
hr = _free_list.remove_region(from_head);
|
||||
}
|
||||
}
|
||||
else { /*is_old*/
|
||||
uint ret = shrink_dram(1);
|
||||
if (ret == 1) {
|
||||
ret = expand_nvdimm(1, NULL);
|
||||
assert(ret == 1, "We should be able to commit one region");
|
||||
hr = _free_list.remove_region(from_head);
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifdef ASSERT
|
||||
assert(total_committed_before == total_regions_committed(), "invariant not met");
|
||||
#endif
|
||||
|
||||
// When an old region is requested (which happens during collection pause) and we can't find any empty region
|
||||
// in the set of available regions (which is an evacuation failure scenario), we borrow (or pre-allocate) an unavailable region
|
||||
// from nv-dimm. This region is used to evacuate surviving objects from eden, survivor or old.
|
||||
if(hr == NULL && type.is_old()) {
|
||||
hr = borrow_old_region_for_gc();
|
||||
}
|
||||
|
||||
if (hr != NULL) {
|
||||
assert(hr->next() == NULL, "Single region should not have next");
|
||||
assert(is_available(hr->hrm_index()), "Must be committed");
|
||||
}
|
||||
return hr;
|
||||
}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::find_contiguous_only_empty(size_t num) {
|
||||
if (has_borrowed_regions()) {
|
||||
return G1_NO_HRM_INDEX;
|
||||
}
|
||||
return find_contiguous(start_index_of_nvdimm(), end_index_of_nvdimm(), num, true);
|
||||
}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::find_contiguous_empty_or_unavailable(size_t num) {
|
||||
if (has_borrowed_regions()) {
|
||||
return G1_NO_HRM_INDEX;
|
||||
}
|
||||
return find_contiguous(start_index_of_nvdimm(), end_index_of_nvdimm(), num, false);
|
||||
}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::find_contiguous(size_t start, size_t end, size_t num, bool empty_only) {
|
||||
uint found = 0;
|
||||
size_t length_found = 0;
|
||||
uint cur = (uint)start;
|
||||
uint length_unavailable = 0;
|
||||
|
||||
while (length_found < num && cur <= end) {
|
||||
HeapRegion* hr = _regions.get_by_index(cur);
|
||||
if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
|
||||
// This region is a potential candidate for allocation into.
|
||||
if (!is_available(cur)) {
|
||||
if(shrink_dram(1) == 1) {
|
||||
uint ret = expand_in_range(cur, cur, 1, NULL);
|
||||
assert(ret == 1, "We should be able to expand at this index");
|
||||
} else {
|
||||
length_unavailable++;
|
||||
}
|
||||
}
|
||||
length_found++;
|
||||
}
|
||||
else {
|
||||
// This region is not a candidate. The next region is the next possible one.
|
||||
found = cur + 1;
|
||||
length_found = 0;
|
||||
}
|
||||
cur++;
|
||||
}
|
||||
|
||||
if (length_found == num) {
|
||||
for (uint i = found; i < (found + num); i++) {
|
||||
HeapRegion* hr = _regions.get_by_index(i);
|
||||
// sanity check
|
||||
guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
|
||||
"Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
|
||||
" that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr));
|
||||
}
|
||||
if (!empty_only && length_unavailable > (max_expandable_length() - total_regions_committed())) {
|
||||
// if 'length_unavailable' number of regions will be made available, we will exceed max regions.
|
||||
return G1_NO_HRM_INDEX;
|
||||
}
|
||||
return found;
|
||||
}
|
||||
else {
|
||||
return G1_NO_HRM_INDEX;
|
||||
}
|
||||
}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::find_highest_free(bool* expanded) {
|
||||
// Loop downwards from the highest dram region index, looking for an
|
||||
// entry which is either free or not yet committed. If not yet
|
||||
// committed, expand_at that index.
|
||||
uint curr = end_index_of_dram();
|
||||
while (true) {
|
||||
HeapRegion *hr = _regions.get_by_index(curr);
|
||||
if (hr == NULL && !(total_regions_committed() < _max_regions)) {
|
||||
uint res = shrink_nvdimm(1);
|
||||
if (res == 1) {
|
||||
res = expand_in_range(curr, curr, 1, NULL);
|
||||
assert(res == 1, "We should be able to expand since shrink was successful");
|
||||
*expanded = true;
|
||||
return curr;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (hr->is_free()) {
|
||||
*expanded = false;
|
||||
return curr;
|
||||
}
|
||||
}
|
||||
if (curr == start_index_of_dram()) {
|
||||
return G1_NO_HRM_INDEX;
|
||||
}
|
||||
curr--;
|
||||
}
|
||||
}
|
||||
|
||||
// We need to override this since region 0 which serves are dummy region in base class may not be available here.
|
||||
// This is a corner condition when either number of regions is small. When adaptive sizing is used, initial heap size
|
||||
// could be just one region. This region is commited in dram to be used for young generation, leaving region 0 (which is in nvdimm)
|
||||
// unavailable.
|
||||
HeapRegion* HeterogeneousHeapRegionManager::get_dummy_region() {
|
||||
uint curr = 0;
|
||||
|
||||
while (curr < _regions.length()) {
|
||||
if (is_available(curr)) {
|
||||
return new_heap_region(curr);
|
||||
}
|
||||
curr++;
|
||||
}
|
||||
assert(false, "We should always find a region available for dummy region");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// First shrink in dram, then in nv-dimm.
|
||||
uint HeterogeneousHeapRegionManager::shrink_by(uint num_regions) {
|
||||
// This call is made at end of full collection. Before making this call the region sets are tore down (tear_down_region_sets()).
|
||||
// So shrink() calls below do not need to remove uncomitted regions from free list.
|
||||
uint ret = shrink_dram(num_regions, false /* update_free_list */);
|
||||
ret += shrink_nvdimm(num_regions - ret, false /* update_free_list */);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void HeterogeneousHeapRegionManager::verify() {
|
||||
HeapRegionManager::verify();
|
||||
}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::free_list_dram_length() const {
|
||||
return _free_list.num_of_regions_in_range(start_index_of_dram(), end_index_of_dram());
|
||||
}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::free_list_nvdimm_length() const {
|
||||
return _free_list.num_of_regions_in_range(start_index_of_nvdimm(), end_index_of_nvdimm());
|
||||
}
|
||||
|
||||
bool HeterogeneousHeapRegionManager::is_in_nvdimm(uint index) const {
|
||||
return index >= start_index_of_nvdimm() && index <= end_index_of_nvdimm();
|
||||
}
|
||||
|
||||
bool HeterogeneousHeapRegionManager::is_in_dram(uint index) const {
|
||||
return index >= start_index_of_dram() && index <= end_index_of_dram();
|
||||
}
|
||||
|
||||
// We have to make sure full collection copies all surviving objects to NV-DIMM.
|
||||
// We might not have enough regions in nvdimm_set, so we need to make more regions on NV-DIMM available for full collection.
|
||||
// Note: by doing this we are breaking the in-variant that total number of committed regions is equal to current heap size.
|
||||
// After full collection ends, we will re-establish this in-variant by freeing DRAM regions.
|
||||
void HeterogeneousHeapRegionManager::prepare_for_full_collection_start() {
|
||||
_total_commited_before_full_gc = total_regions_committed() - _no_borrowed_regions;
|
||||
_no_borrowed_regions = 0;
|
||||
expand_nvdimm(num_committed_dram(), NULL);
|
||||
remove_all_free_regions();
|
||||
}
|
||||
|
||||
// We need to bring back the total committed regions to before full collection start.
|
||||
// Unless we are close to OOM, all regular (not pinned) regions in DRAM should be free.
|
||||
// We shrink all free regions in DRAM and if needed from NV-DIMM (when there are pinned DRAM regions)
|
||||
// If we can't bring back committed regions count to _total_commited_before_full_gc, we keep the extra count in _no_borrowed_regions.
|
||||
// When this GC finishes, new regions won't be allocated since has_borrowed_regions() is true. VM will be forced to re-try GC
|
||||
// with clear soft references followed by OOM error in worst case.
|
||||
void HeterogeneousHeapRegionManager::prepare_for_full_collection_end() {
|
||||
uint shrink_size = total_regions_committed() - _total_commited_before_full_gc;
|
||||
uint so_far = 0;
|
||||
uint idx_last_found = 0;
|
||||
uint num_last_found;
|
||||
uint end = (uint)_regions.length() - 1;
|
||||
while (so_far < shrink_size &&
|
||||
(num_last_found = find_empty_in_range_reverse(0, end, &idx_last_found)) > 0) {
|
||||
uint to_uncommit = MIN2(shrink_size - so_far, num_last_found);
|
||||
uncommit_regions(idx_last_found + num_last_found - to_uncommit, to_uncommit);
|
||||
so_far += to_uncommit;
|
||||
end = idx_last_found;
|
||||
}
|
||||
// See comment above the function.
|
||||
_no_borrowed_regions = shrink_size - so_far;
|
||||
}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::start_index_of_dram() const { return _max_regions;}
|
||||
|
||||
uint HeterogeneousHeapRegionManager::end_index_of_dram() const { return 2*_max_regions - 1; }
|
||||
|
||||
uint HeterogeneousHeapRegionManager::start_index_of_nvdimm() const { return 0; }
|
||||
|
||||
uint HeterogeneousHeapRegionManager::end_index_of_nvdimm() const { return _max_regions - 1; }
|
||||
|
||||
// This function is called when there are no free nv-dimm regions.
|
||||
// It borrows a region from the set of unavailable regions in nv-dimm for GC purpose.
|
||||
HeapRegion* HeterogeneousHeapRegionManager::borrow_old_region_for_gc() {
|
||||
assert(free_list_nvdimm_length() == 0, "this function should be called only when there are no nv-dimm regions in free list");
|
||||
|
||||
uint ret = expand_nvdimm(1, NULL);
|
||||
if(ret != 1) {
|
||||
return NULL;
|
||||
}
|
||||
HeapRegion* hr = _free_list.remove_region(true /*from_head*/);
|
||||
assert(is_in_nvdimm(hr->hrm_index()), "allocated region should be in nv-dimm");
|
||||
_no_borrowed_regions++;
|
||||
return hr;
|
||||
}
|
||||
|
||||
bool HeterogeneousHeapRegionManager::has_borrowed_regions() const {
|
||||
return _no_borrowed_regions > 0;
|
||||
}
|
150
src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.hpp
Normal file
150
src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.hpp
Normal file
@ -0,0 +1,150 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP
|
||||
#define SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP
|
||||
|
||||
#include "gc/g1/heapRegionManager.hpp"
|
||||
|
||||
// This class manages heap regions on heterogenous memory comprising of dram and nv-dimm.
|
||||
// Regions in dram (dram_set) are used for young objects and archive regions (CDS).
|
||||
// Regions in nv-dimm (nvdimm_set) are used for old objects and humongous objects.
|
||||
// At any point there are some regions committed on dram and some on nv-dimm with the following guarantees:
|
||||
// 1. The total number of regions committed in dram and nv-dimm equals the current size of heap.
|
||||
// 2. Consequently, total number of regions committed is less than or equal to Xmx.
|
||||
// 3. To maintain the guarantee stated by 1., whenever one set grows (new regions committed), the other set shrinks (regions un-committed).
|
||||
// 3a. If more dram regions are needed (young generation expansion), corresponding number of regions in nv-dimm are un-committed.
|
||||
// 3b. When old generation or humongous set grows, and new regions need to be committed to nv-dimm, corresponding number of regions
|
||||
// are un-committed in dram.
|
||||
class HeterogeneousHeapRegionManager : public HeapRegionManager {
|
||||
const uint _max_regions;
|
||||
uint _max_dram_regions;
|
||||
uint _max_nvdimm_regions;
|
||||
uint _start_index_of_nvdimm;
|
||||
uint _total_commited_before_full_gc;
|
||||
uint _no_borrowed_regions;
|
||||
|
||||
uint total_regions_committed() const;
|
||||
uint num_committed_dram() const;
|
||||
uint num_committed_nvdimm() const;
|
||||
|
||||
// Similar to find_unavailable_from_idx() function from base class, difference is this function searches in range [start, end].
|
||||
uint find_unavailable_in_range(uint start_idx, uint end_idx, uint* res_idx) const;
|
||||
|
||||
// Expand into dram. Maintains the invariant that total number of committed regions is less than current heap size.
|
||||
uint expand_dram(uint num_regions, WorkGang* pretouch_workers);
|
||||
|
||||
// Expand into nv-dimm.
|
||||
uint expand_nvdimm(uint num_regions, WorkGang* pretouch_workers);
|
||||
|
||||
// Expand by finding unavailable regions in [start, end] range.
|
||||
uint expand_in_range(uint start, uint end, uint num_regions, WorkGang* pretouch_workers);
|
||||
|
||||
// Shrink dram set of regions.
|
||||
uint shrink_dram(uint num_regions, bool update_free_list = true);
|
||||
|
||||
// Shrink nv-dimm set of regions.
|
||||
uint shrink_nvdimm(uint num_regions, bool update_free_list = true);
|
||||
|
||||
// Shrink regions from [start, end] range.
|
||||
uint shrink_in_range(uint start, uint end, uint num_regions, bool update_free_list = true);
|
||||
|
||||
// Similar to find_empty_from_idx_reverse() in base class. Only here it searches in a range.
|
||||
uint find_empty_in_range_reverse(uint start_idx, uint end_idx, uint* res_idx);
|
||||
|
||||
// Similar to find_contiguous() in base class, with [start, end] range
|
||||
uint find_contiguous(size_t start, size_t end, size_t num, bool empty_only);
|
||||
|
||||
// This function is called when there are no free nv-dimm regions.
|
||||
// It borrows a region from the set of unavailable regions in nv-dimm for GC purpose.
|
||||
HeapRegion* borrow_old_region_for_gc();
|
||||
|
||||
uint free_list_dram_length() const;
|
||||
uint free_list_nvdimm_length() const;
|
||||
|
||||
// is region with given index in nv-dimm?
|
||||
bool is_in_nvdimm(uint index) const;
|
||||
bool is_in_dram(uint index) const;
|
||||
|
||||
public:
|
||||
|
||||
// Empty constructor, we'll initialize it with the initialize() method.
|
||||
HeterogeneousHeapRegionManager(uint num_regions) : _max_regions(num_regions), _max_dram_regions(0),
|
||||
_max_nvdimm_regions(0), _start_index_of_nvdimm(0),
|
||||
_total_commited_before_full_gc(0), _no_borrowed_regions(0)
|
||||
{}
|
||||
|
||||
static HeterogeneousHeapRegionManager* manager();
|
||||
|
||||
virtual void initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
G1RegionToSpaceMapper* prev_bitmap,
|
||||
G1RegionToSpaceMapper* next_bitmap,
|
||||
G1RegionToSpaceMapper* bot,
|
||||
G1RegionToSpaceMapper* cardtable,
|
||||
G1RegionToSpaceMapper* card_counts);
|
||||
|
||||
uint start_index_of_nvdimm() const;
|
||||
uint start_index_of_dram() const;
|
||||
uint end_index_of_nvdimm() const;
|
||||
uint end_index_of_dram() const;
|
||||
|
||||
// Override.
|
||||
HeapRegion* get_dummy_region();
|
||||
|
||||
// Adjust dram_set to provision 'expected_num_regions' regions.
|
||||
void adjust_dram_regions(uint expected_num_regions, WorkGang* pretouch_workers);
|
||||
|
||||
// Prepare heap regions before and after full collection.
|
||||
void prepare_for_full_collection_start();
|
||||
void prepare_for_full_collection_end();
|
||||
|
||||
virtual HeapRegion* allocate_free_region(HeapRegionType type);
|
||||
|
||||
// Return maximum number of regions that heap can expand to.
|
||||
uint max_expandable_length() const;
|
||||
|
||||
// Override. Expand in nv-dimm.
|
||||
uint expand_by(uint num_regions, WorkGang* pretouch_workers);
|
||||
|
||||
// Override.
|
||||
uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
|
||||
|
||||
// Override. This function is called for humongous allocation, so we need to find empty regions in nv-dimm.
|
||||
uint find_contiguous_only_empty(size_t num);
|
||||
|
||||
// Override. This function is called for humongous allocation, so we need to find empty or unavailable regions in nv-dimm.
|
||||
uint find_contiguous_empty_or_unavailable(size_t num);
|
||||
|
||||
// Overrides base class implementation to find highest free region in dram.
|
||||
uint find_highest_free(bool* expanded);
|
||||
|
||||
// Override. This fuction is called to shrink the heap, we shrink in dram first then in nv-dimm.
|
||||
uint shrink_by(uint num_regions_to_remove);
|
||||
|
||||
bool has_borrowed_regions() const;
|
||||
|
||||
void verify();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_HETEROGENEOUSHEAPREGIONMANAGER_HPP
|
@ -53,7 +53,7 @@
|
||||
nonstatic_field(HeapRegionManager, _num_committed, uint) \
|
||||
\
|
||||
nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
|
||||
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \
|
||||
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager*) \
|
||||
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
|
||||
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
|
||||
nonstatic_field(G1CollectedHeap, _archive_set, HeapRegionSetBase) \
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/adjoiningGenerations.hpp"
|
||||
#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
|
||||
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
|
||||
#include "gc/parallel/generationSizer.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
@ -40,8 +41,8 @@
|
||||
AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
|
||||
GenerationSizer* policy,
|
||||
size_t alignment) :
|
||||
_virtual_spaces(old_young_rs, policy->min_old_size(),
|
||||
policy->min_young_size(), alignment) {
|
||||
_virtual_spaces(new AdjoiningVirtualSpaces(old_young_rs, policy->min_old_size(),
|
||||
policy->min_young_size(), alignment)) {
|
||||
size_t init_low_byte_size = policy->initial_old_size();
|
||||
size_t min_low_byte_size = policy->min_old_size();
|
||||
size_t max_low_byte_size = policy->max_old_size();
|
||||
@ -61,21 +62,21 @@ AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
|
||||
// generation.
|
||||
|
||||
// Does the actual creation of the virtual spaces
|
||||
_virtual_spaces.initialize(max_low_byte_size,
|
||||
init_low_byte_size,
|
||||
init_high_byte_size);
|
||||
_virtual_spaces->initialize(max_low_byte_size,
|
||||
init_low_byte_size,
|
||||
init_high_byte_size);
|
||||
|
||||
// Place the young gen at the high end. Passes in the virtual space.
|
||||
_young_gen = new ASPSYoungGen(_virtual_spaces.high(),
|
||||
_virtual_spaces.high()->committed_size(),
|
||||
_young_gen = new ASPSYoungGen(_virtual_spaces->high(),
|
||||
_virtual_spaces->high()->committed_size(),
|
||||
min_high_byte_size,
|
||||
_virtual_spaces.high_byte_size_limit());
|
||||
_virtual_spaces->high_byte_size_limit());
|
||||
|
||||
// Place the old gen at the low end. Passes in the virtual space.
|
||||
_old_gen = new ASPSOldGen(_virtual_spaces.low(),
|
||||
_virtual_spaces.low()->committed_size(),
|
||||
_old_gen = new ASPSOldGen(_virtual_spaces->low(),
|
||||
_virtual_spaces->low()->committed_size(),
|
||||
min_low_byte_size,
|
||||
_virtual_spaces.low_byte_size_limit(),
|
||||
_virtual_spaces->low_byte_size_limit(),
|
||||
"old", 1);
|
||||
|
||||
young_gen()->initialize_work();
|
||||
@ -92,8 +93,9 @@ AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
|
||||
} else {
|
||||
|
||||
// Layout the reserved space for the generations.
|
||||
// If OldGen is allocated on nv-dimm, we need to split the reservation (this is required for windows).
|
||||
ReservedSpace old_rs =
|
||||
virtual_spaces()->reserved_space().first_part(max_low_byte_size);
|
||||
virtual_spaces()->reserved_space().first_part(max_low_byte_size, policy->is_hetero_heap() /* split */);
|
||||
ReservedSpace heap_rs =
|
||||
virtual_spaces()->reserved_space().last_part(max_low_byte_size);
|
||||
ReservedSpace young_rs = heap_rs.first_part(max_high_byte_size);
|
||||
@ -117,6 +119,8 @@ AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs,
|
||||
}
|
||||
}
|
||||
|
||||
AdjoiningGenerations::AdjoiningGenerations() { }
|
||||
|
||||
size_t AdjoiningGenerations::reserved_byte_size() {
|
||||
return virtual_spaces()->reserved_space().size();
|
||||
}
|
||||
@ -279,3 +283,13 @@ void AdjoiningGenerations::adjust_boundary_for_young_gen_needs(size_t eden_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AdjoiningGenerations* AdjoiningGenerations::create_adjoining_generations(ReservedSpace old_young_rs,
|
||||
GenerationSizer* policy,
|
||||
size_t alignment) {
|
||||
if (policy->is_hetero_heap() && UseAdaptiveGCBoundary) {
|
||||
return new AdjoiningGenerationsForHeteroHeap(old_young_rs, policy, alignment);
|
||||
} else {
|
||||
return new AdjoiningGenerations(old_young_rs, policy, alignment);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,27 +43,29 @@
|
||||
class AdjoiningGenerations : public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
// The young generation and old generation, respectively
|
||||
PSYoungGen* _young_gen;
|
||||
PSOldGen* _old_gen;
|
||||
|
||||
// The spaces used by the two generations.
|
||||
AdjoiningVirtualSpaces _virtual_spaces;
|
||||
|
||||
// Move boundary up to expand old gen. Checks are made to
|
||||
// determine if the move can be done with specified limits.
|
||||
void request_old_gen_expansion(size_t desired_change_in_bytes);
|
||||
// Move boundary down to expand young gen.
|
||||
bool request_young_gen_expansion(size_t desired_change_in_bytes);
|
||||
|
||||
protected:
|
||||
// The young generation and old generation, respectively
|
||||
PSYoungGen* _young_gen;
|
||||
PSOldGen* _old_gen;
|
||||
|
||||
// The spaces used by the two generations.
|
||||
AdjoiningVirtualSpaces* _virtual_spaces;
|
||||
|
||||
public:
|
||||
AdjoiningGenerations(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
|
||||
AdjoiningGenerations();
|
||||
|
||||
// Accessors
|
||||
PSYoungGen* young_gen() { return _young_gen; }
|
||||
PSOldGen* old_gen() { return _old_gen; }
|
||||
|
||||
AdjoiningVirtualSpaces* virtual_spaces() { return &_virtual_spaces; }
|
||||
AdjoiningVirtualSpaces* virtual_spaces() { return _virtual_spaces; }
|
||||
|
||||
// Additional space is needed in the old generation. Check
|
||||
// the available space and attempt to move the boundary if more space
|
||||
@ -74,7 +76,9 @@ class AdjoiningGenerations : public CHeapObj<mtGC> {
|
||||
|
||||
// Return the total byte size of the reserved space
|
||||
// for the adjoining generations.
|
||||
size_t reserved_byte_size();
|
||||
};
|
||||
virtual size_t reserved_byte_size();
|
||||
|
||||
// Return new AdjoiningGenerations instance based on collector policy (specifically - whether heap is heterogeneous).
|
||||
static AdjoiningGenerations* create_adjoining_generations(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
|
||||
};
|
||||
#endif // SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONS_HPP
|
||||
|
@ -0,0 +1,260 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
|
||||
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
|
||||
#include "gc/parallel/generationSizer.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
#include "gc/parallel/psFileBackedVirtualspace.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
// Create two virtual spaces (HeteroVirtualSpaces), low() on nv-dimm memory, high() on dram.
|
||||
// create ASPSOldGen and ASPSYoungGen the same way as in base class
|
||||
|
||||
AdjoiningGenerationsForHeteroHeap::AdjoiningGenerationsForHeteroHeap(ReservedSpace old_young_rs, GenerationSizer* policy, size_t alignment) :
|
||||
_total_size_limit(policy->max_heap_byte_size()) {
|
||||
size_t init_old_byte_size = policy->initial_old_size();
|
||||
size_t min_old_byte_size = policy->min_old_size();
|
||||
size_t max_old_byte_size = policy->max_old_size();
|
||||
size_t init_young_byte_size = policy->initial_young_size();
|
||||
size_t min_young_byte_size = policy->min_young_size();
|
||||
size_t max_young_byte_size = policy->max_young_size();
|
||||
// create HeteroVirtualSpaces which is composed of non-overlapping virtual spaces.
|
||||
HeteroVirtualSpaces* hetero_virtual_spaces = new HeteroVirtualSpaces(old_young_rs, min_old_byte_size,
|
||||
min_young_byte_size, _total_size_limit, alignment);
|
||||
|
||||
assert(min_old_byte_size <= init_old_byte_size &&
|
||||
init_old_byte_size <= max_old_byte_size, "Parameter check");
|
||||
assert(min_young_byte_size <= init_young_byte_size &&
|
||||
init_young_byte_size <= max_young_byte_size, "Parameter check");
|
||||
|
||||
assert(UseAdaptiveGCBoundary, "Should be used only when UseAdaptiveGCBoundary is true");
|
||||
|
||||
// Initialize the virtual spaces. Then pass a virtual space to each generation
|
||||
// for initialization of the generation.
|
||||
|
||||
// Does the actual creation of the virtual spaces
|
||||
hetero_virtual_spaces->initialize(max_old_byte_size, init_old_byte_size, init_young_byte_size);
|
||||
|
||||
_young_gen = new ASPSYoungGen(hetero_virtual_spaces->high(),
|
||||
hetero_virtual_spaces->high()->committed_size() /* intial_size */,
|
||||
min_young_byte_size,
|
||||
hetero_virtual_spaces->max_young_size());
|
||||
|
||||
_old_gen = new ASPSOldGen(hetero_virtual_spaces->low(),
|
||||
hetero_virtual_spaces->low()->committed_size() /* intial_size */,
|
||||
min_old_byte_size,
|
||||
hetero_virtual_spaces->max_old_size(), "old", 1);
|
||||
|
||||
young_gen()->initialize_work();
|
||||
assert(young_gen()->reserved().byte_size() <= young_gen()->gen_size_limit(), "Consistency check");
|
||||
assert(old_young_rs.size() >= young_gen()->gen_size_limit(), "Consistency check");
|
||||
|
||||
old_gen()->initialize_work("old", 1);
|
||||
assert(old_gen()->reserved().byte_size() <= old_gen()->gen_size_limit(), "Consistency check");
|
||||
assert(old_young_rs.size() >= old_gen()->gen_size_limit(), "Consistency check");
|
||||
|
||||
_virtual_spaces = hetero_virtual_spaces;
|
||||
}
|
||||
|
||||
size_t AdjoiningGenerationsForHeteroHeap::required_reserved_memory(GenerationSizer* policy) {
|
||||
// This is the size that young gen can grow to, when AdaptiveGCBoundary is true.
|
||||
size_t max_yg_size = policy->max_heap_byte_size() - policy->min_old_size();
|
||||
// This is the size that old gen can grow to, when AdaptiveGCBoundary is true.
|
||||
size_t max_old_size = policy->max_heap_byte_size() - policy->min_young_size();
|
||||
|
||||
return max_yg_size + max_old_size;
|
||||
}
|
||||
|
||||
// We override this function since size of reservedspace here is more than heap size and
|
||||
// callers expect this function to return heap size.
|
||||
size_t AdjoiningGenerationsForHeteroHeap::reserved_byte_size() {
|
||||
return total_size_limit();
|
||||
}
|
||||
|
||||
AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::HeteroVirtualSpaces(ReservedSpace rs, size_t min_old_byte_size, size_t min_yg_byte_size, size_t max_total_size, size_t alignment) :
|
||||
AdjoiningVirtualSpaces(rs, min_old_byte_size, min_yg_byte_size, alignment),
|
||||
_max_total_size(max_total_size),
|
||||
_min_old_byte_size(min_old_byte_size), _min_young_byte_size(min_yg_byte_size),
|
||||
_max_old_byte_size(_max_total_size - _min_young_byte_size),
|
||||
_max_young_byte_size(_max_total_size - _min_old_byte_size) {
|
||||
}
|
||||
|
||||
void AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::initialize(size_t initial_old_reserved_size, size_t init_old_byte_size,
|
||||
size_t init_young_byte_size) {
|
||||
|
||||
// This is the reserved space exclusively for old generation.
|
||||
ReservedSpace low_rs = _reserved_space.first_part(_max_old_byte_size, true);
|
||||
// Intially we only assign 'initial_old_reserved_size' of the reserved space to old virtual space.
|
||||
low_rs = low_rs.first_part(initial_old_reserved_size);
|
||||
|
||||
// This is the reserved space exclusively for young generation.
|
||||
ReservedSpace high_rs = _reserved_space.last_part(_max_old_byte_size).first_part(_max_young_byte_size);
|
||||
|
||||
// Carve out 'initial_young_reserved_size' of reserved space.
|
||||
size_t initial_young_reserved_size = _max_total_size - initial_old_reserved_size;
|
||||
high_rs = high_rs.last_part(_max_young_byte_size - initial_young_reserved_size);
|
||||
|
||||
_low = new PSFileBackedVirtualSpace(low_rs, alignment(), AllocateOldGenAt);
|
||||
if (!static_cast <PSFileBackedVirtualSpace*>(_low)->initialize()) {
|
||||
vm_exit_during_initialization("Could not map space for old generation at given AllocateOldGenAt path");
|
||||
}
|
||||
|
||||
if (!_low->expand_by(init_old_byte_size)) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for object heap");
|
||||
}
|
||||
|
||||
_high = new PSVirtualSpaceHighToLow(high_rs, alignment());
|
||||
if (!_high->expand_by(init_young_byte_size)) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for object heap");
|
||||
}
|
||||
}
|
||||
|
||||
// Since the virtual spaces are non-overlapping, there is no boundary as such.
|
||||
// We replicate the same behavior and maintain the same invariants as base class 'AdjoiningVirtualSpaces' by
|
||||
// increasing old generation size and decreasing young generation size by same amount.
|
||||
bool AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::adjust_boundary_up(size_t change_in_bytes) {
|
||||
assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
|
||||
DEBUG_ONLY(size_t total_size_before = young_vs()->reserved_size() + old_vs()->reserved_size());
|
||||
|
||||
size_t bytes_needed = change_in_bytes;
|
||||
size_t uncommitted_in_old = MIN2(old_vs()->uncommitted_size(), bytes_needed);
|
||||
bool old_expanded = false;
|
||||
|
||||
// 1. Try to expand old within its reserved space.
|
||||
if (uncommitted_in_old != 0) {
|
||||
if (!old_vs()->expand_by(uncommitted_in_old)) {
|
||||
return false;
|
||||
}
|
||||
old_expanded = true;
|
||||
bytes_needed -= uncommitted_in_old;
|
||||
if (bytes_needed == 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
size_t bytes_to_add_in_old = 0;
|
||||
|
||||
// 2. Get uncommitted memory from Young virtualspace.
|
||||
size_t young_uncommitted = MIN2(young_vs()->uncommitted_size(), bytes_needed);
|
||||
if (young_uncommitted > 0) {
|
||||
young_vs()->set_reserved(young_vs()->reserved_low_addr() + young_uncommitted,
|
||||
young_vs()->reserved_high_addr(),
|
||||
young_vs()->special());
|
||||
bytes_needed -= young_uncommitted;
|
||||
bytes_to_add_in_old = young_uncommitted;
|
||||
}
|
||||
|
||||
// 3. Get committed memory from Young virtualspace
|
||||
if (bytes_needed > 0) {
|
||||
size_t shrink_size = align_down(bytes_needed, young_vs()->alignment());
|
||||
bool ret = young_vs()->shrink_by(shrink_size);
|
||||
assert(ret, "We should be able to shrink young space");
|
||||
young_vs()->set_reserved(young_vs()->reserved_low_addr() + shrink_size,
|
||||
young_vs()->reserved_high_addr(),
|
||||
young_vs()->special());
|
||||
|
||||
bytes_to_add_in_old += shrink_size;
|
||||
}
|
||||
|
||||
// 4. Increase size of old space
|
||||
old_vs()->set_reserved(old_vs()->reserved_low_addr(),
|
||||
old_vs()->reserved_high_addr() + bytes_to_add_in_old,
|
||||
old_vs()->special());
|
||||
if (!old_vs()->expand_by(bytes_to_add_in_old) && !old_expanded) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DEBUG_ONLY(size_t total_size_after = young_vs()->reserved_size() + old_vs()->reserved_size());
|
||||
assert(total_size_after == total_size_before, "should be equal");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Read comment for adjust_boundary_up()
|
||||
// Increase young generation size and decrease old generation size by same amount.
|
||||
bool AdjoiningGenerationsForHeteroHeap::HeteroVirtualSpaces::adjust_boundary_down(size_t change_in_bytes) {
|
||||
assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
|
||||
DEBUG_ONLY(size_t total_size_before = young_vs()->reserved_size() + old_vs()->reserved_size());
|
||||
|
||||
size_t bytes_needed = change_in_bytes;
|
||||
size_t uncommitted_in_young = MIN2(young_vs()->uncommitted_size(), bytes_needed);
|
||||
bool young_expanded = false;
|
||||
|
||||
// 1. Try to expand old within its reserved space.
|
||||
if (uncommitted_in_young > 0) {
|
||||
if (!young_vs()->expand_by(uncommitted_in_young)) {
|
||||
return false;
|
||||
}
|
||||
young_expanded = true;
|
||||
bytes_needed -= uncommitted_in_young;
|
||||
if (bytes_needed == 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
size_t bytes_to_add_in_young = 0;
|
||||
|
||||
// 2. Get uncommitted memory from Old virtualspace.
|
||||
size_t old_uncommitted = MIN2(old_vs()->uncommitted_size(), bytes_needed);
|
||||
if (old_uncommitted > 0) {
|
||||
old_vs()->set_reserved(old_vs()->reserved_low_addr(),
|
||||
old_vs()->reserved_high_addr() - old_uncommitted,
|
||||
old_vs()->special());
|
||||
bytes_needed -= old_uncommitted;
|
||||
bytes_to_add_in_young = old_uncommitted;
|
||||
}
|
||||
|
||||
// 3. Get committed memory from Old virtualspace
|
||||
if (bytes_needed > 0) {
|
||||
size_t shrink_size = align_down(bytes_needed, old_vs()->alignment());
|
||||
bool ret = old_vs()->shrink_by(shrink_size);
|
||||
assert(ret, "We should be able to shrink young space");
|
||||
old_vs()->set_reserved(old_vs()->reserved_low_addr(),
|
||||
old_vs()->reserved_high_addr() - shrink_size,
|
||||
old_vs()->special());
|
||||
|
||||
bytes_to_add_in_young += shrink_size;
|
||||
}
|
||||
|
||||
assert(bytes_to_add_in_young <= change_in_bytes, "should not be more than requested size");
|
||||
// 4. Increase size of young space
|
||||
young_vs()->set_reserved(young_vs()->reserved_low_addr() - bytes_to_add_in_young,
|
||||
young_vs()->reserved_high_addr(),
|
||||
young_vs()->special());
|
||||
if (!young_vs()->expand_by(bytes_to_add_in_young) && !young_expanded) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DEBUG_ONLY(size_t total_size_after = young_vs()->reserved_size() + old_vs()->reserved_size());
|
||||
assert(total_size_after == total_size_before, "should be equal");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP
|
||||
#define SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP
|
||||
|
||||
#include "gc/parallel/adjoiningGenerations.hpp"
|
||||
|
||||
class AdjoiningGenerationsForHeteroHeap : public AdjoiningGenerations {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
// Maximum total size of the generations. This is equal to the heap size specified by user.
|
||||
// When adjusting young and old generation sizes, we need ensure that sum of the generation sizes does not exceed this.
|
||||
size_t _total_size_limit;
|
||||
|
||||
size_t total_size_limit() const {
|
||||
return _total_size_limit;
|
||||
}
|
||||
|
||||
// HeteroVirtualSpaces creates non-overlapping virtual spaces. Here _low and _high do not share a reserved space, i.e. there is no boundary
|
||||
// separating the two virtual spaces.
|
||||
class HeteroVirtualSpaces : public AdjoiningVirtualSpaces {
|
||||
size_t _max_total_size;
|
||||
size_t _min_old_byte_size;
|
||||
size_t _min_young_byte_size;
|
||||
size_t _max_old_byte_size;
|
||||
size_t _max_young_byte_size;
|
||||
|
||||
// Internally we access the virtual spaces using these methods. It increases readability, since we were not really
|
||||
// dealing with adjoining virtual spaces separated by a boundary as is the case in base class.
|
||||
// Externally they are accessed using low() and high() methods of base class.
|
||||
PSVirtualSpace* young_vs() { return high(); }
|
||||
PSVirtualSpace* old_vs() { return low(); }
|
||||
|
||||
public:
|
||||
HeteroVirtualSpaces(ReservedSpace rs,
|
||||
size_t min_old_byte_size,
|
||||
size_t min_young_byte_size, size_t max_total_size,
|
||||
size_t alignment);
|
||||
|
||||
// Increase old generation size and decrease young generation size by same amount
|
||||
bool adjust_boundary_up(size_t size_in_bytes);
|
||||
// Increase young generation size and decrease old generation size by same amount
|
||||
bool adjust_boundary_down(size_t size_in_bytes);
|
||||
|
||||
size_t max_young_size() const { return _max_young_byte_size; }
|
||||
size_t max_old_size() const { return _max_old_byte_size; }
|
||||
|
||||
void initialize(size_t initial_old_reserved_size, size_t init_low_byte_size,
|
||||
size_t init_high_byte_size);
|
||||
};
|
||||
|
||||
public:
|
||||
AdjoiningGenerationsForHeteroHeap(ReservedSpace rs, GenerationSizer* policy, size_t alignment);
|
||||
|
||||
// Given the size policy, calculate the total amount of memory that needs to be reserved.
|
||||
// We need to reserve more memory than Xmx, since we use non-overlapping virtual spaces for the young and old generations.
|
||||
static size_t required_reserved_memory(GenerationSizer* policy);
|
||||
|
||||
// Return the total byte size of the reserved space
|
||||
size_t reserved_byte_size();
|
||||
};
|
||||
#endif // SHARE_VM_GC_PARALLEL_ADJOININGGENERATIONSFORHETEROHEAP_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -59,7 +59,8 @@
|
||||
// moved up consistently. AdjoiningVirtualSpaces provide the
|
||||
// interfaces for moving the this boundary.
|
||||
|
||||
class AdjoiningVirtualSpaces {
|
||||
class AdjoiningVirtualSpaces : public CHeapObj<mtGC> {
|
||||
protected:
|
||||
// space at the high end and the low end, respectively
|
||||
PSVirtualSpace* _high;
|
||||
PSVirtualSpace* _low;
|
||||
@ -84,17 +85,17 @@ class AdjoiningVirtualSpaces {
|
||||
size_t alignment);
|
||||
|
||||
// accessors
|
||||
PSVirtualSpace* high() { return _high; }
|
||||
PSVirtualSpace* low() { return _low; }
|
||||
virtual PSVirtualSpace* high() { return _high; }
|
||||
virtual PSVirtualSpace* low() { return _low; }
|
||||
ReservedSpace reserved_space() { return _reserved_space; }
|
||||
size_t min_low_byte_size() { return _min_low_byte_size; }
|
||||
size_t min_high_byte_size() { return _min_high_byte_size; }
|
||||
size_t alignment() const { return _alignment; }
|
||||
|
||||
// move boundary between the two spaces up
|
||||
bool adjust_boundary_up(size_t size_in_bytes);
|
||||
virtual bool adjust_boundary_up(size_t size_in_bytes);
|
||||
// and down
|
||||
bool adjust_boundary_down(size_t size_in_bytes);
|
||||
virtual bool adjust_boundary_down(size_t size_in_bytes);
|
||||
|
||||
// Maximum byte size for the high space.
|
||||
size_t high_byte_size_limit() {
|
||||
@ -107,9 +108,8 @@ class AdjoiningVirtualSpaces {
|
||||
|
||||
// Sets the boundaries for the virtual spaces and commits and
|
||||
// initial size;
|
||||
void initialize(size_t max_low_byte_size,
|
||||
virtual void initialize(size_t max_low_byte_size,
|
||||
size_t init_low_byte_size,
|
||||
size_t init_high_byte_size);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_PARALLEL_ADJOININGVIRTUALSPACES_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -67,3 +67,11 @@ void GenerationSizer::initialize_size_info() {
|
||||
}
|
||||
GenCollectorPolicy::initialize_size_info();
|
||||
}
|
||||
|
||||
bool GenerationSizer::is_hetero_heap() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t GenerationSizer::heap_reserved_size_bytes() const {
|
||||
return _max_heap_byte_size;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,7 +32,6 @@
|
||||
|
||||
class GenerationSizer : public GenCollectorPolicy {
|
||||
private:
|
||||
|
||||
// The alignment used for boundary between young gen and old gen
|
||||
static size_t default_gen_alignment() { return 64 * K * HeapWordSize; }
|
||||
|
||||
@ -41,5 +40,9 @@ class GenerationSizer : public GenCollectorPolicy {
|
||||
void initialize_alignments();
|
||||
void initialize_flags();
|
||||
void initialize_size_info();
|
||||
|
||||
public:
|
||||
virtual size_t heap_reserved_size_bytes() const;
|
||||
virtual bool is_hetero_heap() const;
|
||||
};
|
||||
#endif // SHARE_VM_GC_PARALLEL_GENERATIONSIZER_HPP
|
||||
|
106
src/hotspot/share/gc/parallel/heterogeneousGenerationSizer.cpp
Normal file
106
src/hotspot/share/gc/parallel/heterogeneousGenerationSizer.cpp
Normal file
@ -0,0 +1,106 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/heterogeneousGenerationSizer.hpp"
|
||||
#include "gc/shared/collectorPolicy.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/formatBuffer.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
const double HeterogeneousGenerationSizer::MaxRamFractionForYoung = 0.8;
|
||||
|
||||
// Check the available dram memory to limit NewSize and MaxNewSize before
|
||||
// calling base class initialize_flags().
|
||||
void HeterogeneousGenerationSizer::initialize_flags() {
|
||||
FormatBuffer<100> calc_str("");
|
||||
|
||||
julong phys_mem;
|
||||
// If MaxRam is specified, we use that as maximum physical memory available.
|
||||
if (FLAG_IS_DEFAULT(MaxRAM)) {
|
||||
phys_mem = os::physical_memory();
|
||||
calc_str.append("Physical_Memory");
|
||||
} else {
|
||||
phys_mem = (julong)MaxRAM;
|
||||
calc_str.append("MaxRAM");
|
||||
}
|
||||
|
||||
julong reasonable_max = phys_mem;
|
||||
|
||||
// If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
|
||||
// reasonable max size of young generation.
|
||||
if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
|
||||
reasonable_max = (julong)(phys_mem / MaxRAMFraction);
|
||||
calc_str.append(" / MaxRAMFraction");
|
||||
} else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
|
||||
reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
|
||||
calc_str.append(" * MaxRAMPercentage / 100");
|
||||
} else {
|
||||
// We use our own fraction to calculate max size of young generation.
|
||||
reasonable_max = phys_mem * MaxRamFractionForYoung;
|
||||
calc_str.append(" * %0.2f", MaxRamFractionForYoung);
|
||||
}
|
||||
reasonable_max = align_up(reasonable_max, _gen_alignment);
|
||||
|
||||
if (MaxNewSize > reasonable_max) {
|
||||
if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
|
||||
(size_t)reasonable_max, calc_str.buffer());
|
||||
} else {
|
||||
log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
|
||||
"Dram usage can be lowered by setting MaxNewSize to a lower value", (size_t)reasonable_max, calc_str.buffer());
|
||||
}
|
||||
MaxNewSize = reasonable_max;
|
||||
}
|
||||
if (NewSize > reasonable_max) {
|
||||
if (FLAG_IS_CMDLINE(NewSize)) {
|
||||
log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
|
||||
(size_t)reasonable_max, calc_str.buffer());
|
||||
}
|
||||
NewSize = reasonable_max;
|
||||
}
|
||||
|
||||
// After setting new size flags, call base class initialize_flags()
|
||||
GenerationSizer::initialize_flags();
|
||||
}
|
||||
|
||||
bool HeterogeneousGenerationSizer::is_hetero_heap() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t HeterogeneousGenerationSizer::heap_reserved_size_bytes() const {
|
||||
if (UseAdaptiveGCBoundary) {
|
||||
// This is the size that young gen can grow to, when UseAdaptiveGCBoundary is true.
|
||||
size_t max_yg_size = _max_heap_byte_size - _min_old_size;
|
||||
// This is the size that old gen can grow to, when UseAdaptiveGCBoundary is true.
|
||||
size_t max_old_size = _max_heap_byte_size - _min_young_size;
|
||||
|
||||
return max_yg_size + max_old_size;
|
||||
} else {
|
||||
return _max_heap_byte_size;
|
||||
}
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP
|
||||
#define SHARE_VM_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP
|
||||
|
||||
#include "gc/parallel/generationSizer.hpp"
|
||||
|
||||
// There is a nice batch of tested generation sizing code in
|
||||
// GenCollectorPolicy. Lets reuse it!
|
||||
|
||||
class HeterogeneousGenerationSizer : public GenerationSizer {
|
||||
private:
|
||||
// Max fraction of dram to use for young generation when MaxRAMFraction and
|
||||
// MaxRAMPercentage are not specified on commandline.
|
||||
static const double MaxRamFractionForYoung;
|
||||
|
||||
protected:
|
||||
virtual void initialize_flags();
|
||||
|
||||
public:
|
||||
virtual size_t heap_reserved_size_bytes() const;
|
||||
virtual bool is_hetero_heap() const;
|
||||
};
|
||||
#endif // SHARE_VM_GC_PARALLEL_HETEROGENEOUSGENERATIONSIZER_HPP
|
@ -24,6 +24,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/heterogeneousGenerationSizer.hpp"
|
||||
#include "gc/parallel/parallelArguments.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
#include "gc/shared/adaptiveSizePolicy.hpp"
|
||||
@ -93,5 +94,9 @@ void ParallelArguments::initialize() {
|
||||
}
|
||||
|
||||
CollectedHeap* ParallelArguments::create_heap() {
|
||||
return create_heap_with_policy<ParallelScavengeHeap, GenerationSizer>();
|
||||
if (AllocateOldGenAt != NULL) {
|
||||
return create_heap_with_policy<ParallelScavengeHeap, HeterogeneousGenerationSizer>();
|
||||
} else {
|
||||
return create_heap_with_policy<ParallelScavengeHeap, GenerationSizer>();
|
||||
}
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc/parallel/adjoiningGenerations.hpp"
|
||||
#include "gc/parallel/adjoiningGenerationsForHeteroHeap.hpp"
|
||||
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
|
||||
#include "gc/parallel/gcTaskManager.hpp"
|
||||
#include "gc/parallel/generationSizer.hpp"
|
||||
@ -58,7 +59,7 @@ PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
|
||||
GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
|
||||
|
||||
jint ParallelScavengeHeap::initialize() {
|
||||
const size_t heap_size = _collector_policy->max_heap_byte_size();
|
||||
size_t heap_size = _collector_policy->heap_reserved_size_bytes();
|
||||
|
||||
ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
|
||||
|
||||
@ -86,7 +87,7 @@ jint ParallelScavengeHeap::initialize() {
|
||||
double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
|
||||
double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
|
||||
|
||||
_gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
|
||||
_gens = AdjoiningGenerations::create_adjoining_generations(heap_rs, _collector_policy, generation_alignment());
|
||||
|
||||
_old_gen = _gens->old_gen();
|
||||
_young_gen = _gens->young_gen();
|
||||
@ -104,7 +105,7 @@ jint ParallelScavengeHeap::initialize() {
|
||||
GCTimeRatio
|
||||
);
|
||||
|
||||
assert(!UseAdaptiveGCBoundary ||
|
||||
assert(_collector_policy->is_hetero_heap() || !UseAdaptiveGCBoundary ||
|
||||
(old_gen()->virtual_space()->high_boundary() ==
|
||||
young_gen()->virtual_space()->low_boundary()),
|
||||
"Boundaries must meet");
|
||||
|
@ -111,6 +111,8 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
|
||||
virtual CollectorPolicy* collector_policy() const { return _collector_policy; }
|
||||
|
||||
virtual GenerationSizer* ps_collector_policy() const { return _collector_policy; }
|
||||
|
||||
virtual SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; }
|
||||
|
||||
virtual GrowableArray<GCMemoryManager*> memory_managers();
|
||||
|
87
src/hotspot/share/gc/parallel/psFileBackedVirtualspace.cpp
Normal file
87
src/hotspot/share/gc/parallel/psFileBackedVirtualspace.cpp
Normal file
@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/psFileBackedVirtualspace.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
|
||||
PSFileBackedVirtualSpace::PSFileBackedVirtualSpace(ReservedSpace rs, size_t alignment, const char* path) : PSVirtualSpace(rs, alignment),
|
||||
_file_path(path), _fd(-1), _mapping_succeeded(false) {
|
||||
assert(!rs.special(), "ReservedSpace passed to PSFileBackedVirtualSpace cannot be special");
|
||||
}
|
||||
|
||||
bool PSFileBackedVirtualSpace::initialize() {
|
||||
_fd = os::create_file_for_heap(_file_path);
|
||||
if (_fd == -1) {
|
||||
return false;
|
||||
}
|
||||
// We map the reserved space to a file at initialization.
|
||||
char* ret = os::replace_existing_mapping_with_file_mapping(reserved_low_addr(), reserved_size(), _fd);
|
||||
if (ret != reserved_low_addr()) {
|
||||
os::close(_fd);
|
||||
return false;
|
||||
}
|
||||
// _mapping_succeeded is false if we return before this point.
|
||||
// expand calls later check value of this flag and return error if it is false.
|
||||
_mapping_succeeded = true;
|
||||
_special = true;
|
||||
os::close(_fd);
|
||||
return true;
|
||||
}
|
||||
|
||||
PSFileBackedVirtualSpace::PSFileBackedVirtualSpace(ReservedSpace rs, const char* path) {
|
||||
PSFileBackedVirtualSpace(rs, os::vm_page_size(), path);
|
||||
}
|
||||
|
||||
bool PSFileBackedVirtualSpace::expand_by(size_t bytes) {
|
||||
assert(special(), "Since entire space is committed at initialization, _special should always be true for PSFileBackedVirtualSpace");
|
||||
|
||||
// if mapping did not succeed during intialization return false
|
||||
if (!_mapping_succeeded) {
|
||||
return false;
|
||||
}
|
||||
return PSVirtualSpace::expand_by(bytes);
|
||||
|
||||
}
|
||||
|
||||
bool PSFileBackedVirtualSpace::shrink_by(size_t bytes) {
|
||||
assert(special(), "Since entire space is committed at initialization, _special should always be true for PSFileBackedVirtualSpace");
|
||||
return PSVirtualSpace::shrink_by(bytes);
|
||||
}
|
||||
|
||||
size_t PSFileBackedVirtualSpace::expand_into(PSVirtualSpace* space, size_t bytes) {
|
||||
// not supported. Since doing this will change page mapping which will lead to large TLB penalties.
|
||||
assert(false, "expand_into() should not be called for PSFileBackedVirtualSpace");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void PSFileBackedVirtualSpace::release() {
|
||||
os::close(_fd);
|
||||
_fd = -1;
|
||||
_file_path = NULL;
|
||||
|
||||
PSVirtualSpace::release();
|
||||
}
|
||||
|
@ -19,17 +19,28 @@
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
// key: compiler.err.unicode.backtick
|
||||
// key: compiler.misc.feature.raw.string.literals
|
||||
// key: compiler.warn.preview.feature.use.plural
|
||||
// options: --enable-preview -source 12 -Xlint:preview
|
||||
#ifndef SHARE_VM_GC_PARALLEL_PSFILEBACKEDVIRTUALSPACE_HPP
|
||||
#define SHARE_VM_GC_PARALLEL_PSFILEBACKEDVIRTUALSPACE_HPP
|
||||
|
||||
class RawStringLiteral {
|
||||
String m() {
|
||||
return `abc` + \u0060`def`;
|
||||
}
|
||||
}
|
||||
#include "gc/parallel/psVirtualspace.hpp"
|
||||
|
||||
class PSFileBackedVirtualSpace : public PSVirtualSpace {
|
||||
private:
|
||||
const char* _file_path;
|
||||
int _fd;
|
||||
bool _mapping_succeeded;
|
||||
public:
|
||||
PSFileBackedVirtualSpace(ReservedSpace rs, size_t alignment, const char* file_path);
|
||||
PSFileBackedVirtualSpace(ReservedSpace rs, const char* file_path);
|
||||
|
||||
bool initialize();
|
||||
bool expand_by(size_t bytes);
|
||||
bool shrink_by(size_t bytes);
|
||||
size_t expand_into(PSVirtualSpace* space, size_t bytes);
|
||||
void release();
|
||||
};
|
||||
#endif // SHARE_VM_GC_PARALLEL_PSFILEBACKEDVIRTUALSPACE_HPP
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
|
||||
#include "gc/parallel/psCardTable.hpp"
|
||||
#include "gc/parallel/psFileBackedVirtualspace.hpp"
|
||||
#include "gc/parallel/psMarkSweepDecorator.hpp"
|
||||
#include "gc/parallel/psOldGen.hpp"
|
||||
#include "gc/shared/cardTableBarrierSet.hpp"
|
||||
@ -71,7 +72,14 @@ void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
|
||||
|
||||
void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
|
||||
|
||||
_virtual_space = new PSVirtualSpace(rs, alignment);
|
||||
if(ParallelScavengeHeap::heap()->ps_collector_policy()->is_hetero_heap()) {
|
||||
_virtual_space = new PSFileBackedVirtualSpace(rs, alignment, AllocateOldGenAt);
|
||||
if (!(static_cast <PSFileBackedVirtualSpace*>(_virtual_space))->initialize()) {
|
||||
vm_exit_during_initialization("Could not map space for PSOldGen at given AllocateOldGenAt path");
|
||||
}
|
||||
} else {
|
||||
_virtual_space = new PSVirtualSpace(rs, alignment);
|
||||
}
|
||||
if (!_virtual_space->expand_by(_init_gen_size)) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for "
|
||||
"object heap");
|
||||
|
@ -1995,7 +1995,10 @@ bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_po
|
||||
assert(young_gen->virtual_space()->alignment() ==
|
||||
old_gen->virtual_space()->alignment(), "alignments do not match");
|
||||
|
||||
if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
|
||||
// We also return false when it's a heterogenous heap because old generation cannot absorb data from eden
|
||||
// when it is allocated on different memory (example, nv-dimm) than young.
|
||||
if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary) ||
|
||||
ParallelScavengeHeap::heap()->ps_collector_policy()->is_hetero_heap()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -275,11 +275,7 @@ public:
|
||||
template <typename T>
|
||||
static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
|
||||
size_t length) {
|
||||
return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
|
||||
dst_obj, dst_offset_in_bytes, dst_raw,
|
||||
length);
|
||||
}
|
||||
size_t length);
|
||||
|
||||
// Off-heap oop accesses. These accessors get resolved when
|
||||
// IN_HEAP is not set (e.g. when using the NativeAccess API), it is
|
||||
|
60
src/hotspot/share/gc/shared/barrierSet.inline.hpp
Normal file
60
src/hotspot/share/gc/shared/barrierSet.inline.hpp
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
|
||||
#define SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
|
||||
|
||||
#include "gc/shared/barrierSet.hpp"
|
||||
#include "oops/accessDecorators.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline bool BarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
|
||||
size_t length) {
|
||||
T* src = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
|
||||
T* dst = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
|
||||
|
||||
if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
|
||||
// Covariant, copy without checks
|
||||
return Raw::oop_arraycopy(NULL, 0, src, NULL, 0, dst, length);
|
||||
}
|
||||
|
||||
// Copy each element with checking casts
|
||||
Klass* const dst_klass = objArrayOop(dst_obj)->element_klass();
|
||||
for (const T* const end = src + length; src < end; src++, dst++) {
|
||||
const T elem = *src;
|
||||
if (!oopDesc::is_instanceof_or_null(CompressedOops::decode(elem), dst_klass)) {
|
||||
return false;
|
||||
}
|
||||
*dst = elem;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
|
@ -28,6 +28,7 @@
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
void GCArguments::initialize() {
|
||||
@ -53,4 +54,28 @@ void GCArguments::initialize() {
|
||||
// If class unloading is disabled, also disable concurrent class unloading.
|
||||
FLAG_SET_CMDLINE(bool, ClassUnloadingWithConcurrentMark, false);
|
||||
}
|
||||
|
||||
if (!FLAG_IS_DEFAULT(AllocateOldGenAt)) {
|
||||
// CompressedOops not supported when AllocateOldGenAt is set.
|
||||
LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
|
||||
LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedClassPointers, false));
|
||||
// When AllocateOldGenAt is set, we cannot use largepages for entire heap memory.
|
||||
// Only young gen which is allocated in dram can use large pages, but we currently don't support that.
|
||||
FLAG_SET_DEFAULT(UseLargePages, false);
|
||||
}
|
||||
}
|
||||
|
||||
bool GCArguments::check_args_consistency() {
|
||||
bool status = true;
|
||||
if (!FLAG_IS_DEFAULT(AllocateHeapAt) && !FLAG_IS_DEFAULT(AllocateOldGenAt)) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"AllocateHeapAt and AllocateOldGenAt cannot be used together.\n");
|
||||
status = false;
|
||||
}
|
||||
if (!FLAG_IS_DEFAULT(AllocateOldGenAt) && (UseSerialGC || UseConcMarkSweepGC || UseEpsilonGC || UseZGC)) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"AllocateOldGenAt is not supported for selected GC.\n");
|
||||
status = false;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
@ -39,6 +39,7 @@ public:
|
||||
virtual void initialize();
|
||||
virtual size_t conservative_max_heap_alignment() = 0;
|
||||
virtual CollectedHeap* create_heap() = 0;
|
||||
static bool check_args_consistency();
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SHARED_GCARGUMENTS_HPP
|
||||
|
@ -983,7 +983,7 @@ void ShenandoahBarrierSetC2::clone_barrier_at_expansion(ArrayCopyNode* ac, Node*
|
||||
Node* c = new ProjNode(call,TypeFunc::Control);
|
||||
c = igvn.transform(c);
|
||||
Node* m = new ProjNode(call, TypeFunc::Memory);
|
||||
c = igvn.transform(m);
|
||||
m = igvn.transform(m);
|
||||
|
||||
Node* dest = ac->in(ArrayCopyNode::Dest);
|
||||
assert(dest->is_AddP(), "bad input");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -189,6 +189,15 @@ LoadBarrierNode::LoadBarrierNode(Compile* C,
|
||||
bs->register_potential_barrier_node(this);
|
||||
}
|
||||
|
||||
uint LoadBarrierNode::size_of() const {
|
||||
return sizeof(*this);
|
||||
}
|
||||
|
||||
uint LoadBarrierNode::cmp(const Node& n) const {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
}
|
||||
|
||||
const Type *LoadBarrierNode::bottom_type() const {
|
||||
const Type** floadbarrier = (const Type **)(Compile::current()->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
|
||||
Node* in_oop = in(Oop);
|
||||
@ -198,6 +207,11 @@ const Type *LoadBarrierNode::bottom_type() const {
|
||||
return TypeTuple::make(Number_of_Outputs, floadbarrier);
|
||||
}
|
||||
|
||||
const TypePtr* LoadBarrierNode::adr_type() const {
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
|
||||
const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
|
||||
const Type* val_t = phase->type(in(Oop));
|
||||
@ -441,6 +455,11 @@ Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
uint LoadBarrierNode::match_edge(uint idx) const {
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
|
||||
Node* out_res = proj_out_or_null(Oop);
|
||||
if (out_res == NULL) {
|
||||
@ -1151,7 +1170,7 @@ static bool split_barrier_thru_phi(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
|
||||
if (lb->in(LoadBarrierNode::Oop)->is_Phi()) {
|
||||
Node* oop_phi = lb->in(LoadBarrierNode::Oop);
|
||||
|
||||
if (oop_phi->in(2) == oop_phi) {
|
||||
if ((oop_phi->req() != 3) || (oop_phi->in(2) == oop_phi)) {
|
||||
// Ignore phis with only one input
|
||||
return false;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -62,10 +62,14 @@ public:
|
||||
bool oop_reload_allowed);
|
||||
|
||||
virtual int Opcode() const;
|
||||
virtual uint size_of() const;
|
||||
virtual uint cmp(const Node& n) const;
|
||||
virtual const Type *bottom_type() const;
|
||||
virtual const TypePtr* adr_type() const;
|
||||
virtual const Type *Value(PhaseGVN *phase) const;
|
||||
virtual Node *Identity(PhaseGVN *phase);
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual uint match_edge(uint idx) const;
|
||||
|
||||
LoadBarrierNode* has_dominating_barrier(PhaseIdealLoop* phase,
|
||||
bool linear_only,
|
||||
|
@ -19,7 +19,6 @@
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
@ -91,6 +90,9 @@ void ZArguments::initialize() {
|
||||
// Verification of stacks not (yet) supported, for the same reason
|
||||
// we need fixup_partial_loads
|
||||
DEBUG_ONLY(FLAG_SET_DEFAULT(VerifyStack, false));
|
||||
|
||||
// Initialize platform specific arguments
|
||||
initialize_platform();
|
||||
}
|
||||
|
||||
CollectedHeap* ZArguments::create_heap() {
|
||||
|
@ -29,6 +29,9 @@
|
||||
class CollectedHeap;
|
||||
|
||||
class ZArguments : public GCArguments {
|
||||
private:
|
||||
void initialize_platform();
|
||||
|
||||
public:
|
||||
virtual void initialize();
|
||||
virtual size_t conservative_max_heap_alignment();
|
||||
|
@ -42,6 +42,11 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
|
||||
}
|
||||
|
||||
if (nm->is_unloading()) {
|
||||
// We don't need to take the lock when unlinking nmethods from
|
||||
// the Method, because it is only concurrently unlinked by
|
||||
// the entry barrier, which acquires the per nmethod lock.
|
||||
nm->unlink_from_method(false /* acquire_lock */);
|
||||
|
||||
// We can end up calling nmethods that are unloading
|
||||
// since we clear compiled ICs lazily. Returning false
|
||||
// will re-resovle the call and update the compiled IC.
|
||||
|
@ -611,15 +611,20 @@ public:
|
||||
return;
|
||||
}
|
||||
|
||||
ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
|
||||
|
||||
if (nm->is_unloading()) {
|
||||
// Unlinking of the dependencies must happen before the
|
||||
// handshake separating unlink and purge.
|
||||
nm->flush_dependencies(false /* delete_immediately */);
|
||||
|
||||
// We don't need to take the lock when unlinking nmethods from
|
||||
// the Method, because it is only concurrently unlinked by
|
||||
// the entry barrier, which acquires the per nmethod lock.
|
||||
nm->unlink_from_method(false /* acquire_lock */);
|
||||
return;
|
||||
}
|
||||
|
||||
ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
|
||||
|
||||
// Heal oops and disarm
|
||||
ZNMethodOopClosure cl;
|
||||
ZNMethodTable::entry_oops_do(entry, &cl);
|
||||
|
@ -128,7 +128,8 @@ uintptr_t ZObjectAllocator::alloc_medium_object(size_t size, ZAllocationFlags fl
|
||||
}
|
||||
|
||||
uintptr_t ZObjectAllocator::alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags) {
|
||||
assert(ZThread::is_java() || ZThread::is_vm(), "Should be a Java or VM thread");
|
||||
assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_runtime_worker(),
|
||||
"Should be a Java, VM or Runtime worker thread");
|
||||
|
||||
// Non-worker small page allocation can never use the reserve
|
||||
flags.set_no_reserve();
|
||||
@ -193,7 +194,8 @@ uintptr_t ZObjectAllocator::alloc_object(size_t size) {
|
||||
}
|
||||
|
||||
uintptr_t ZObjectAllocator::alloc_object_for_relocation(size_t size) {
|
||||
assert(ZThread::is_java() || ZThread::is_worker() || ZThread::is_vm(), "Unknown thread");
|
||||
assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_worker() || ZThread::is_runtime_worker(),
|
||||
"Unknown thread");
|
||||
|
||||
ZAllocationFlags flags;
|
||||
flags.set_relocation();
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user