Merge
This commit is contained in:
commit
87b7e052f2
1
.hgtags
1
.hgtags
@ -309,3 +309,4 @@ e7dbbef69d12b6a74dfad331b7188e7f893e8d29 jdk9-b62
|
||||
8ffdeabc7c2b9a8280bf46cae026ac46b4d31c26 jdk9-b64
|
||||
4915246064b2f89d5f00c96e758686b7fdad36a6 jdk9-b65
|
||||
ff3fc75f3214ad7e03595be1b0d0f38d887b6f0e jdk9-b66
|
||||
56166ce66037952fa21e9f680b31bf8eb47312c0 jdk9-b67
|
||||
|
@ -309,3 +309,4 @@ ea38728b4f4bdd8fd0d7a89b18069f521cf05013 jdk9-b61
|
||||
82cf9aab9a83e41c8194ba01af9666afdb856cbe jdk9-b64
|
||||
7c31f9d7b932f7924f1258d52885b1c7c3e078c2 jdk9-b65
|
||||
dc6e8336f51bb6b67b7245766179eab5ca7720b4 jdk9-b66
|
||||
f546760134eb861fcfecd4ce611b0040b0d25a6a jdk9-b67
|
||||
|
@ -338,14 +338,16 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
|
||||
# no adjustment
|
||||
;;
|
||||
slowdebug )
|
||||
# Add runtime stack smashing and undefined behavior checks
|
||||
CFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
|
||||
CXXFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
|
||||
# Add runtime stack smashing and undefined behavior checks.
|
||||
# Not all versions of gcc support -fstack-protector
|
||||
STACK_PROTECTOR_CFLAG="-fstack-protector-all"
|
||||
FLAGS_COMPILER_CHECK_ARGUMENTS([$STACK_PROTECTOR_CFLAG], [], [STACK_PROTECTOR_CFLAG=""])
|
||||
|
||||
CFLAGS_DEBUG_OPTIONS="$STACK_PROTECTOR_CFLAG --param ssp-buffer-size=1"
|
||||
CXXFLAGS_DEBUG_OPTIONS="$STACK_PROTECTOR_CFLAG --param ssp-buffer-size=1"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
AC_SUBST(CFLAGS_DEBUG_OPTIONS)
|
||||
AC_SUBST(CXXFLAGS_DEBUG_OPTIONS)
|
||||
|
||||
# Optimization levels
|
||||
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
|
@ -718,8 +718,6 @@ C_O_FLAG_DEBUG
|
||||
C_O_FLAG_NORM
|
||||
C_O_FLAG_HI
|
||||
C_O_FLAG_HIGHEST
|
||||
CXXFLAGS_DEBUG_OPTIONS
|
||||
CFLAGS_DEBUG_OPTIONS
|
||||
CXXFLAGS_DEBUG_SYMBOLS
|
||||
CFLAGS_DEBUG_SYMBOLS
|
||||
CXX_FLAG_DEPS
|
||||
@ -4366,7 +4364,7 @@ VS_SDK_PLATFORM_NAME_2013=
|
||||
#CUSTOM_AUTOCONF_INCLUDE
|
||||
|
||||
# Do not change or remove the following line, it is needed for consistency checks:
|
||||
DATE_WHEN_GENERATED=1432629750
|
||||
DATE_WHEN_GENERATED=1433337614
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
@ -41837,14 +41835,80 @@ $as_echo "$ac_cv_c_bigendian" >&6; }
|
||||
# no adjustment
|
||||
;;
|
||||
slowdebug )
|
||||
# Add runtime stack smashing and undefined behavior checks
|
||||
CFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
|
||||
CXXFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
|
||||
;;
|
||||
esac
|
||||
# Add runtime stack smashing and undefined behavior checks.
|
||||
# Not all versions of gcc support -fstack-protector
|
||||
STACK_PROTECTOR_CFLAG="-fstack-protector-all"
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler supports \"$STACK_PROTECTOR_CFLAG\"" >&5
|
||||
$as_echo_n "checking if compiler supports \"$STACK_PROTECTOR_CFLAG\"... " >&6; }
|
||||
supports=yes
|
||||
|
||||
saved_cflags="$CFLAGS"
|
||||
CFLAGS="$CFLAGS $STACK_PROTECTOR_CFLAG"
|
||||
ac_ext=c
|
||||
ac_cpp='$CPP $CPPFLAGS'
|
||||
ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
|
||||
ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
|
||||
ac_compiler_gnu=$ac_cv_c_compiler_gnu
|
||||
|
||||
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
|
||||
/* end confdefs.h. */
|
||||
int i;
|
||||
_ACEOF
|
||||
if ac_fn_c_try_compile "$LINENO"; then :
|
||||
|
||||
else
|
||||
supports=no
|
||||
fi
|
||||
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
|
||||
ac_ext=cpp
|
||||
ac_cpp='$CXXCPP $CPPFLAGS'
|
||||
ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
|
||||
ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
|
||||
ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
|
||||
|
||||
CFLAGS="$saved_cflags"
|
||||
|
||||
saved_cxxflags="$CXXFLAGS"
|
||||
CXXFLAGS="$CXXFLAG $STACK_PROTECTOR_CFLAG"
|
||||
ac_ext=cpp
|
||||
ac_cpp='$CXXCPP $CPPFLAGS'
|
||||
ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
|
||||
ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
|
||||
ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
|
||||
|
||||
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
|
||||
/* end confdefs.h. */
|
||||
int i;
|
||||
_ACEOF
|
||||
if ac_fn_cxx_try_compile "$LINENO"; then :
|
||||
|
||||
else
|
||||
supports=no
|
||||
fi
|
||||
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
|
||||
ac_ext=cpp
|
||||
ac_cpp='$CXXCPP $CPPFLAGS'
|
||||
ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
|
||||
ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
|
||||
ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
|
||||
|
||||
CXXFLAGS="$saved_cxxflags"
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $supports" >&5
|
||||
$as_echo "$supports" >&6; }
|
||||
if test "x$supports" = "xyes" ; then
|
||||
:
|
||||
else
|
||||
STACK_PROTECTOR_CFLAG=""
|
||||
fi
|
||||
|
||||
|
||||
CFLAGS_DEBUG_OPTIONS="$STACK_PROTECTOR_CFLAG --param ssp-buffer-size=1"
|
||||
CXXFLAGS_DEBUG_OPTIONS="$STACK_PROTECTOR_CFLAG --param ssp-buffer-size=1"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Optimization levels
|
||||
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
|
@ -309,3 +309,4 @@ d27f7e0a7aca129969de23e9934408a31b4abf4c jdk9-b62
|
||||
0a5e5a7c3539e8bde73d9fe55750e49a49cb8dac jdk9-b64
|
||||
afc1e295c4bf83f9a5dd539c29914edd4a754a3f jdk9-b65
|
||||
44ee68f7dbacab24a45115fd6a8ccdc7eb6e8f0b jdk9-b66
|
||||
4418697e56f1f43597f55c7cb6573549c6117868 jdk9-b67
|
||||
|
@ -469,3 +469,4 @@ ee878f3d6732856f7725c590312bfbe2ffa52cc7 jdk9-b58
|
||||
bf92b8db249cdfa5651ef954b6c0743a7e0ea4cd jdk9-b64
|
||||
e7ae94c4f35e940ea423fc1dd260435df34a77c0 jdk9-b65
|
||||
197e94e0dacddd16816f101d24fc0442ab518326 jdk9-b66
|
||||
d47dfabd16d48eb96a451edd1b61194a39ee0eb5 jdk9-b67
|
||||
|
@ -2813,6 +2813,13 @@ void Assembler::orl(Register dst, Register src) {
|
||||
emit_arith(0x0B, 0xC0, dst, src);
|
||||
}
|
||||
|
||||
void Assembler::orl(Address dst, Register src) {
|
||||
InstructionMark im(this);
|
||||
prefix(dst, src);
|
||||
emit_int8(0x09);
|
||||
emit_operand(src, dst);
|
||||
}
|
||||
|
||||
void Assembler::packuswb(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
|
||||
@ -6907,6 +6914,19 @@ void Assembler::rclq(Register dst, int imm8) {
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::rcrq(Register dst, int imm8) {
|
||||
assert(isShiftCount(imm8 >> 1), "illegal shift count");
|
||||
int encode = prefixq_and_encode(dst->encoding());
|
||||
if (imm8 == 1) {
|
||||
emit_int8((unsigned char)0xD1);
|
||||
emit_int8((unsigned char)(0xD8 | encode));
|
||||
} else {
|
||||
emit_int8((unsigned char)0xC1);
|
||||
emit_int8((unsigned char)(0xD8 | encode));
|
||||
emit_int8(imm8);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::rorq(Register dst, int imm8) {
|
||||
assert(isShiftCount(imm8 >> 1), "illegal shift count");
|
||||
int encode = prefixq_and_encode(dst->encoding());
|
||||
|
@ -1594,6 +1594,7 @@ private:
|
||||
void orl(Register dst, int32_t imm32);
|
||||
void orl(Register dst, Address src);
|
||||
void orl(Register dst, Register src);
|
||||
void orl(Address dst, Register src);
|
||||
|
||||
void orq(Address dst, int32_t imm32);
|
||||
void orq(Register dst, int32_t imm32);
|
||||
@ -1694,6 +1695,8 @@ private:
|
||||
|
||||
void rclq(Register dst, int imm8);
|
||||
|
||||
void rcrq(Register dst, int imm8);
|
||||
|
||||
void rdtsc();
|
||||
|
||||
void ret(int imm16);
|
||||
|
@ -7750,6 +7750,503 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
|
||||
pop(tmp2);
|
||||
pop(tmp1);
|
||||
}
|
||||
|
||||
//Helper functions for square_to_len()
|
||||
|
||||
/**
|
||||
* Store the squares of x[], right shifted one bit (divided by 2) into z[]
|
||||
* Preserves x and z and modifies rest of the registers.
|
||||
*/
|
||||
|
||||
void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
|
||||
// Perform square and right shift by 1
|
||||
// Handle odd xlen case first, then for even xlen do the following
|
||||
// jlong carry = 0;
|
||||
// for (int j=0, i=0; j < xlen; j+=2, i+=4) {
|
||||
// huge_128 product = x[j:j+1] * x[j:j+1];
|
||||
// z[i:i+1] = (carry << 63) | (jlong)(product >>> 65);
|
||||
// z[i+2:i+3] = (jlong)(product >>> 1);
|
||||
// carry = (jlong)product;
|
||||
// }
|
||||
|
||||
xorq(tmp5, tmp5); // carry
|
||||
xorq(rdxReg, rdxReg);
|
||||
xorl(tmp1, tmp1); // index for x
|
||||
xorl(tmp4, tmp4); // index for z
|
||||
|
||||
Label L_first_loop, L_first_loop_exit;
|
||||
|
||||
testl(xlen, 1);
|
||||
jccb(Assembler::zero, L_first_loop); //jump if xlen is even
|
||||
|
||||
// Square and right shift by 1 the odd element using 32 bit multiply
|
||||
movl(raxReg, Address(x, tmp1, Address::times_4, 0));
|
||||
imulq(raxReg, raxReg);
|
||||
shrq(raxReg, 1);
|
||||
adcq(tmp5, 0);
|
||||
movq(Address(z, tmp4, Address::times_4, 0), raxReg);
|
||||
incrementl(tmp1);
|
||||
addl(tmp4, 2);
|
||||
|
||||
// Square and right shift by 1 the rest using 64 bit multiply
|
||||
bind(L_first_loop);
|
||||
cmpptr(tmp1, xlen);
|
||||
jccb(Assembler::equal, L_first_loop_exit);
|
||||
|
||||
// Square
|
||||
movq(raxReg, Address(x, tmp1, Address::times_4, 0));
|
||||
rorq(raxReg, 32); // convert big-endian to little-endian
|
||||
mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax
|
||||
|
||||
// Right shift by 1 and save carry
|
||||
shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1
|
||||
rcrq(rdxReg, 1);
|
||||
rcrq(raxReg, 1);
|
||||
adcq(tmp5, 0);
|
||||
|
||||
// Store result in z
|
||||
movq(Address(z, tmp4, Address::times_4, 0), rdxReg);
|
||||
movq(Address(z, tmp4, Address::times_4, 8), raxReg);
|
||||
|
||||
// Update indices for x and z
|
||||
addl(tmp1, 2);
|
||||
addl(tmp4, 4);
|
||||
jmp(L_first_loop);
|
||||
|
||||
bind(L_first_loop_exit);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Perform the following multiply add operation using BMI2 instructions
|
||||
* carry:sum = sum + op1*op2 + carry
|
||||
* op2 should be in rdx
|
||||
* op2 is preserved, all other registers are modified
|
||||
*/
|
||||
void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) {
|
||||
// assert op2 is rdx
|
||||
mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1
|
||||
addq(sum, carry);
|
||||
adcq(tmp2, 0);
|
||||
addq(sum, op1);
|
||||
adcq(tmp2, 0);
|
||||
movq(carry, tmp2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform the following multiply add operation:
|
||||
* carry:sum = sum + op1*op2 + carry
|
||||
* Preserves op1, op2 and modifies rest of registers
|
||||
*/
|
||||
void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) {
|
||||
// rdx:rax = op1 * op2
|
||||
movq(raxReg, op2);
|
||||
mulq(op1);
|
||||
|
||||
// rdx:rax = sum + carry + rdx:rax
|
||||
addq(sum, carry);
|
||||
adcq(rdxReg, 0);
|
||||
addq(sum, raxReg);
|
||||
adcq(rdxReg, 0);
|
||||
|
||||
// carry:sum = rdx:sum
|
||||
movq(carry, rdxReg);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add 64 bit long carry into z[] with carry propogation.
|
||||
* Preserves z and carry register values and modifies rest of registers.
|
||||
*
|
||||
*/
|
||||
void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) {
|
||||
Label L_fourth_loop, L_fourth_loop_exit;
|
||||
|
||||
movl(tmp1, 1);
|
||||
subl(zlen, 2);
|
||||
addq(Address(z, zlen, Address::times_4, 0), carry);
|
||||
|
||||
bind(L_fourth_loop);
|
||||
jccb(Assembler::carryClear, L_fourth_loop_exit);
|
||||
subl(zlen, 2);
|
||||
jccb(Assembler::negative, L_fourth_loop_exit);
|
||||
addq(Address(z, zlen, Address::times_4, 0), tmp1);
|
||||
jmp(L_fourth_loop);
|
||||
bind(L_fourth_loop_exit);
|
||||
}
|
||||
|
||||
/**
|
||||
* Shift z[] left by 1 bit.
|
||||
* Preserves x, len, z and zlen registers and modifies rest of the registers.
|
||||
*
|
||||
*/
|
||||
void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
|
||||
|
||||
Label L_fifth_loop, L_fifth_loop_exit;
|
||||
|
||||
// Fifth loop
|
||||
// Perform primitiveLeftShift(z, zlen, 1)
|
||||
|
||||
const Register prev_carry = tmp1;
|
||||
const Register new_carry = tmp4;
|
||||
const Register value = tmp2;
|
||||
const Register zidx = tmp3;
|
||||
|
||||
// int zidx, carry;
|
||||
// long value;
|
||||
// carry = 0;
|
||||
// for (zidx = zlen-2; zidx >=0; zidx -= 2) {
|
||||
// (carry:value) = (z[i] << 1) | carry ;
|
||||
// z[i] = value;
|
||||
// }
|
||||
|
||||
movl(zidx, zlen);
|
||||
xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register
|
||||
|
||||
bind(L_fifth_loop);
|
||||
decl(zidx); // Use decl to preserve carry flag
|
||||
decl(zidx);
|
||||
jccb(Assembler::negative, L_fifth_loop_exit);
|
||||
|
||||
if (UseBMI2Instructions) {
|
||||
movq(value, Address(z, zidx, Address::times_4, 0));
|
||||
rclq(value, 1);
|
||||
rorxq(value, value, 32);
|
||||
movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form
|
||||
}
|
||||
else {
|
||||
// clear new_carry
|
||||
xorl(new_carry, new_carry);
|
||||
|
||||
// Shift z[i] by 1, or in previous carry and save new carry
|
||||
movq(value, Address(z, zidx, Address::times_4, 0));
|
||||
shlq(value, 1);
|
||||
adcl(new_carry, 0);
|
||||
|
||||
orq(value, prev_carry);
|
||||
rorq(value, 0x20);
|
||||
movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form
|
||||
|
||||
// Set previous carry = new carry
|
||||
movl(prev_carry, new_carry);
|
||||
}
|
||||
jmp(L_fifth_loop);
|
||||
|
||||
bind(L_fifth_loop_exit);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Code for BigInteger::squareToLen() intrinsic
|
||||
*
|
||||
* rdi: x
|
||||
* rsi: len
|
||||
* r8: z
|
||||
* rcx: zlen
|
||||
* r12: tmp1
|
||||
* r13: tmp2
|
||||
* r14: tmp3
|
||||
* r15: tmp4
|
||||
* rbx: tmp5
|
||||
*
|
||||
*/
|
||||
void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
|
||||
|
||||
Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, fifth_loop, fifth_loop_exit, L_last_x, L_multiply;
|
||||
push(tmp1);
|
||||
push(tmp2);
|
||||
push(tmp3);
|
||||
push(tmp4);
|
||||
push(tmp5);
|
||||
|
||||
// First loop
|
||||
// Store the squares, right shifted one bit (i.e., divided by 2).
|
||||
square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg);
|
||||
|
||||
// Add in off-diagonal sums.
|
||||
//
|
||||
// Second, third (nested) and fourth loops.
|
||||
// zlen +=2;
|
||||
// for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) {
|
||||
// carry = 0;
|
||||
// long op2 = x[xidx:xidx+1];
|
||||
// for (int j=xidx-2,k=zidx; j >= 0; j-=2) {
|
||||
// k -= 2;
|
||||
// long op1 = x[j:j+1];
|
||||
// long sum = z[k:k+1];
|
||||
// carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs);
|
||||
// z[k:k+1] = sum;
|
||||
// }
|
||||
// add_one_64(z, k, carry, tmp_regs);
|
||||
// }
|
||||
|
||||
const Register carry = tmp5;
|
||||
const Register sum = tmp3;
|
||||
const Register op1 = tmp4;
|
||||
Register op2 = tmp2;
|
||||
|
||||
push(zlen);
|
||||
push(len);
|
||||
addl(zlen,2);
|
||||
bind(L_second_loop);
|
||||
xorq(carry, carry);
|
||||
subl(zlen, 4);
|
||||
subl(len, 2);
|
||||
push(zlen);
|
||||
push(len);
|
||||
cmpl(len, 0);
|
||||
jccb(Assembler::lessEqual, L_second_loop_exit);
|
||||
|
||||
// Multiply an array by one 64 bit long.
|
||||
if (UseBMI2Instructions) {
|
||||
op2 = rdxReg;
|
||||
movq(op2, Address(x, len, Address::times_4, 0));
|
||||
rorxq(op2, op2, 32);
|
||||
}
|
||||
else {
|
||||
movq(op2, Address(x, len, Address::times_4, 0));
|
||||
rorq(op2, 32);
|
||||
}
|
||||
|
||||
bind(L_third_loop);
|
||||
decrementl(len);
|
||||
jccb(Assembler::negative, L_third_loop_exit);
|
||||
decrementl(len);
|
||||
jccb(Assembler::negative, L_last_x);
|
||||
|
||||
movq(op1, Address(x, len, Address::times_4, 0));
|
||||
rorq(op1, 32);
|
||||
|
||||
bind(L_multiply);
|
||||
subl(zlen, 2);
|
||||
movq(sum, Address(z, zlen, Address::times_4, 0));
|
||||
|
||||
// Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry.
|
||||
if (UseBMI2Instructions) {
|
||||
multiply_add_64_bmi2(sum, op1, op2, carry, tmp2);
|
||||
}
|
||||
else {
|
||||
multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
|
||||
}
|
||||
|
||||
movq(Address(z, zlen, Address::times_4, 0), sum);
|
||||
|
||||
jmp(L_third_loop);
|
||||
bind(L_third_loop_exit);
|
||||
|
||||
// Fourth loop
|
||||
// Add 64 bit long carry into z with carry propogation.
|
||||
// Uses offsetted zlen.
|
||||
add_one_64(z, zlen, carry, tmp1);
|
||||
|
||||
pop(len);
|
||||
pop(zlen);
|
||||
jmp(L_second_loop);
|
||||
|
||||
// Next infrequent code is moved outside loops.
|
||||
bind(L_last_x);
|
||||
movl(op1, Address(x, 0));
|
||||
jmp(L_multiply);
|
||||
|
||||
bind(L_second_loop_exit);
|
||||
pop(len);
|
||||
pop(zlen);
|
||||
pop(len);
|
||||
pop(zlen);
|
||||
|
||||
// Fifth loop
|
||||
// Shift z left 1 bit.
|
||||
lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4);
|
||||
|
||||
// z[zlen-1] |= x[len-1] & 1;
|
||||
movl(tmp3, Address(x, len, Address::times_4, -4));
|
||||
andl(tmp3, 1);
|
||||
orl(Address(z, zlen, Address::times_4, -4), tmp3);
|
||||
|
||||
pop(tmp5);
|
||||
pop(tmp4);
|
||||
pop(tmp3);
|
||||
pop(tmp2);
|
||||
pop(tmp1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function for mul_add()
|
||||
* Multiply the in[] by int k and add to out[] starting at offset offs using
|
||||
* 128 bit by 32 bit multiply and return the carry in tmp5.
|
||||
* Only quad int aligned length of in[] is operated on in this function.
|
||||
* k is in rdxReg for BMI2Instructions, for others it is in tmp2.
|
||||
* This function preserves out, in and k registers.
|
||||
* len and offset point to the appropriate index in "in" & "out" correspondingly
|
||||
* tmp5 has the carry.
|
||||
* other registers are temporary and are modified.
|
||||
*
|
||||
*/
|
||||
void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in,
|
||||
Register offset, Register len, Register tmp1, Register tmp2, Register tmp3,
|
||||
Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
|
||||
|
||||
Label L_first_loop, L_first_loop_exit;
|
||||
|
||||
movl(tmp1, len);
|
||||
shrl(tmp1, 2);
|
||||
|
||||
bind(L_first_loop);
|
||||
subl(tmp1, 1);
|
||||
jccb(Assembler::negative, L_first_loop_exit);
|
||||
|
||||
subl(len, 4);
|
||||
subl(offset, 4);
|
||||
|
||||
Register op2 = tmp2;
|
||||
const Register sum = tmp3;
|
||||
const Register op1 = tmp4;
|
||||
const Register carry = tmp5;
|
||||
|
||||
if (UseBMI2Instructions) {
|
||||
op2 = rdxReg;
|
||||
}
|
||||
|
||||
movq(op1, Address(in, len, Address::times_4, 8));
|
||||
rorq(op1, 32);
|
||||
movq(sum, Address(out, offset, Address::times_4, 8));
|
||||
rorq(sum, 32);
|
||||
if (UseBMI2Instructions) {
|
||||
multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
|
||||
}
|
||||
else {
|
||||
multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
|
||||
}
|
||||
// Store back in big endian from little endian
|
||||
rorq(sum, 0x20);
|
||||
movq(Address(out, offset, Address::times_4, 8), sum);
|
||||
|
||||
movq(op1, Address(in, len, Address::times_4, 0));
|
||||
rorq(op1, 32);
|
||||
movq(sum, Address(out, offset, Address::times_4, 0));
|
||||
rorq(sum, 32);
|
||||
if (UseBMI2Instructions) {
|
||||
multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
|
||||
}
|
||||
else {
|
||||
multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
|
||||
}
|
||||
// Store back in big endian from little endian
|
||||
rorq(sum, 0x20);
|
||||
movq(Address(out, offset, Address::times_4, 0), sum);
|
||||
|
||||
jmp(L_first_loop);
|
||||
bind(L_first_loop_exit);
|
||||
}
|
||||
|
||||
/**
|
||||
* Code for BigInteger::mulAdd() intrinsic
|
||||
*
|
||||
* rdi: out
|
||||
* rsi: in
|
||||
* r11: offs (out.length - offset)
|
||||
* rcx: len
|
||||
* r8: k
|
||||
* r12: tmp1
|
||||
* r13: tmp2
|
||||
* r14: tmp3
|
||||
* r15: tmp4
|
||||
* rbx: tmp5
|
||||
* Multiply the in[] by word k and add to out[], return the carry in rax
|
||||
*/
|
||||
void MacroAssembler::mul_add(Register out, Register in, Register offs,
|
||||
Register len, Register k, Register tmp1, Register tmp2, Register tmp3,
|
||||
Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
|
||||
|
||||
Label L_carry, L_last_in, L_done;
|
||||
|
||||
// carry = 0;
|
||||
// for (int j=len-1; j >= 0; j--) {
|
||||
// long product = (in[j] & LONG_MASK) * kLong +
|
||||
// (out[offs] & LONG_MASK) + carry;
|
||||
// out[offs--] = (int)product;
|
||||
// carry = product >>> 32;
|
||||
// }
|
||||
//
|
||||
push(tmp1);
|
||||
push(tmp2);
|
||||
push(tmp3);
|
||||
push(tmp4);
|
||||
push(tmp5);
|
||||
|
||||
Register op2 = tmp2;
|
||||
const Register sum = tmp3;
|
||||
const Register op1 = tmp4;
|
||||
const Register carry = tmp5;
|
||||
|
||||
if (UseBMI2Instructions) {
|
||||
op2 = rdxReg;
|
||||
movl(op2, k);
|
||||
}
|
||||
else {
|
||||
movl(op2, k);
|
||||
}
|
||||
|
||||
xorq(carry, carry);
|
||||
|
||||
//First loop
|
||||
|
||||
//Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply
|
||||
//The carry is in tmp5
|
||||
mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg);
|
||||
|
||||
//Multiply the trailing in[] entry using 64 bit by 32 bit, if any
|
||||
decrementl(len);
|
||||
jccb(Assembler::negative, L_carry);
|
||||
decrementl(len);
|
||||
jccb(Assembler::negative, L_last_in);
|
||||
|
||||
movq(op1, Address(in, len, Address::times_4, 0));
|
||||
rorq(op1, 32);
|
||||
|
||||
subl(offs, 2);
|
||||
movq(sum, Address(out, offs, Address::times_4, 0));
|
||||
rorq(sum, 32);
|
||||
|
||||
if (UseBMI2Instructions) {
|
||||
multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
|
||||
}
|
||||
else {
|
||||
multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
|
||||
}
|
||||
|
||||
// Store back in big endian from little endian
|
||||
rorq(sum, 0x20);
|
||||
movq(Address(out, offs, Address::times_4, 0), sum);
|
||||
|
||||
testl(len, len);
|
||||
jccb(Assembler::zero, L_carry);
|
||||
|
||||
//Multiply the last in[] entry, if any
|
||||
bind(L_last_in);
|
||||
movl(op1, Address(in, 0));
|
||||
movl(sum, Address(out, offs, Address::times_4, -4));
|
||||
|
||||
movl(raxReg, k);
|
||||
mull(op1); //tmp4 * eax -> edx:eax
|
||||
addl(sum, carry);
|
||||
adcl(rdxReg, 0);
|
||||
addl(sum, raxReg);
|
||||
adcl(rdxReg, 0);
|
||||
movl(carry, rdxReg);
|
||||
|
||||
movl(Address(out, offs, Address::times_4, -4), sum);
|
||||
|
||||
bind(L_carry);
|
||||
//return tmp5/carry as carry in rax
|
||||
movl(rax, carry);
|
||||
|
||||
bind(L_done);
|
||||
pop(tmp5);
|
||||
pop(tmp4);
|
||||
pop(tmp3);
|
||||
pop(tmp2);
|
||||
pop(tmp1);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
@ -1241,6 +1241,25 @@ public:
|
||||
Register carry2);
|
||||
void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
|
||||
Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
|
||||
|
||||
void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
|
||||
Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
|
||||
void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry,
|
||||
Register tmp2);
|
||||
void multiply_add_64(Register sum, Register op1, Register op2, Register carry,
|
||||
Register rdxReg, Register raxReg);
|
||||
void add_one_64(Register z, Register zlen, Register carry, Register tmp1);
|
||||
void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
|
||||
Register tmp3, Register tmp4);
|
||||
void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2,
|
||||
Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
|
||||
|
||||
void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1,
|
||||
Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
|
||||
Register raxReg);
|
||||
void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1,
|
||||
Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg,
|
||||
Register raxReg);
|
||||
#endif
|
||||
|
||||
// CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
|
||||
|
@ -3785,6 +3785,107 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
/**
|
||||
* Arguments:
|
||||
*
|
||||
// Input:
|
||||
// c_rarg0 - x address
|
||||
// c_rarg1 - x length
|
||||
// c_rarg2 - z address
|
||||
// c_rarg3 - z lenth
|
||||
*
|
||||
*/
|
||||
address generate_squareToLen() {
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "squareToLen");
|
||||
|
||||
address start = __ pc();
|
||||
// Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
|
||||
// Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...)
|
||||
const Register x = rdi;
|
||||
const Register len = rsi;
|
||||
const Register z = r8;
|
||||
const Register zlen = rcx;
|
||||
|
||||
const Register tmp1 = r12;
|
||||
const Register tmp2 = r13;
|
||||
const Register tmp3 = r14;
|
||||
const Register tmp4 = r15;
|
||||
const Register tmp5 = rbx;
|
||||
|
||||
BLOCK_COMMENT("Entry:");
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
||||
setup_arg_regs(4); // x => rdi, len => rsi, z => rdx
|
||||
// zlen => rcx
|
||||
// r9 and r10 may be used to save non-volatile registers
|
||||
__ movptr(r8, rdx);
|
||||
__ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax);
|
||||
|
||||
restore_arg_regs();
|
||||
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
/**
|
||||
* Arguments:
|
||||
*
|
||||
* Input:
|
||||
* c_rarg0 - out address
|
||||
* c_rarg1 - in address
|
||||
* c_rarg2 - offset
|
||||
* c_rarg3 - len
|
||||
* not Win64
|
||||
* c_rarg4 - k
|
||||
* Win64
|
||||
* rsp+40 - k
|
||||
*/
|
||||
address generate_mulAdd() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "mulAdd");
|
||||
|
||||
address start = __ pc();
|
||||
// Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
|
||||
// Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
|
||||
const Register out = rdi;
|
||||
const Register in = rsi;
|
||||
const Register offset = r11;
|
||||
const Register len = rcx;
|
||||
const Register k = r8;
|
||||
|
||||
// Next registers will be saved on stack in mul_add().
|
||||
const Register tmp1 = r12;
|
||||
const Register tmp2 = r13;
|
||||
const Register tmp3 = r14;
|
||||
const Register tmp4 = r15;
|
||||
const Register tmp5 = rbx;
|
||||
|
||||
BLOCK_COMMENT("Entry:");
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
||||
setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx
|
||||
// len => rcx, k => r8
|
||||
// r9 and r10 may be used to save non-volatile registers
|
||||
#ifdef _WIN64
|
||||
// last argument is on stack on Win64
|
||||
__ movl(k, Address(rsp, 6 * wordSize));
|
||||
#endif
|
||||
__ movptr(r11, rdx); // move offset in rdx to offset(r11)
|
||||
__ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax);
|
||||
|
||||
restore_arg_regs();
|
||||
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
@ -4030,6 +4131,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
if (UseMultiplyToLenIntrinsic) {
|
||||
StubRoutines::_multiplyToLen = generate_multiplyToLen();
|
||||
}
|
||||
if (UseSquareToLenIntrinsic) {
|
||||
StubRoutines::_squareToLen = generate_squareToLen();
|
||||
}
|
||||
if (UseMulAddIntrinsic) {
|
||||
StubRoutines::_mulAdd = generate_mulAdd();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ static bool returns_to_call_stub(address return_pc) { return return_pc == _
|
||||
|
||||
enum platform_dependent_constants {
|
||||
code_size1 = 19000, // simply increase if too small (assembler will crash if too small)
|
||||
code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
|
||||
code_size2 = 23000 // simply increase if too small (assembler will crash if too small)
|
||||
};
|
||||
|
||||
class x86 {
|
||||
|
@ -790,6 +790,12 @@ void VM_Version::get_processor_features() {
|
||||
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
|
||||
UseMultiplyToLenIntrinsic = true;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
|
||||
UseSquareToLenIntrinsic = true;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
|
||||
UseMulAddIntrinsic = true;
|
||||
}
|
||||
#else
|
||||
if (UseMultiplyToLenIntrinsic) {
|
||||
if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
|
||||
@ -797,6 +803,18 @@ void VM_Version::get_processor_features() {
|
||||
}
|
||||
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false);
|
||||
}
|
||||
if (UseSquareToLenIntrinsic) {
|
||||
if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
|
||||
warning("squareToLen intrinsic is not available in 32-bit VM");
|
||||
}
|
||||
FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false);
|
||||
}
|
||||
if (UseMulAddIntrinsic) {
|
||||
if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
|
||||
warning("mulAdd intrinsic is not available in 32-bit VM");
|
||||
}
|
||||
FLAG_SET_DEFAULT(UseMulAddIntrinsic, false);
|
||||
}
|
||||
#endif
|
||||
#endif // COMPILER2
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -38,8 +38,8 @@ class AIXDecoder: public AbstractDecoder {
|
||||
|
||||
virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } // demangled by getFuncName
|
||||
|
||||
virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath) {
|
||||
return (::getFuncName((codeptr_t)addr, buf, buflen, offset, 0, 0, 0) == 0);
|
||||
virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
|
||||
return (::getFuncName((codeptr_t)addr, buf, buflen, offset, 0, 0, 0, demangle) == 0);
|
||||
}
|
||||
virtual bool decode(address addr, char *buf, int buflen, int* offset, const void *base) {
|
||||
ShouldNotReachHere();
|
||||
|
@ -1439,7 +1439,8 @@ static address resolve_function_descriptor_to_code_pointer(address p) {
|
||||
}
|
||||
|
||||
bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
int buflen, int *offset) {
|
||||
int buflen, int *offset,
|
||||
bool demangle) {
|
||||
if (offset) {
|
||||
*offset = -1;
|
||||
}
|
||||
@ -1454,7 +1455,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
}
|
||||
|
||||
// Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
|
||||
return Decoder::decode(addr, buf, buflen, offset);
|
||||
return Decoder::decode(addr, buf, buflen, offset, demangle);
|
||||
}
|
||||
|
||||
static int getModuleName(codeptr_t pc, // [in] program counter
|
||||
|
@ -114,7 +114,8 @@ extern "C" int getFuncName(
|
||||
int* p_displacement, // [out] optional: displacement (-1 if not available)
|
||||
const struct tbtable** p_tb, // [out] optional: ptr to traceback table to get further
|
||||
// information (NULL if not available)
|
||||
char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
|
||||
char* p_errmsg, size_t errmsglen,// [out] optional: user provided buffer for error messages
|
||||
bool demangle // [in] whether to demangle the name
|
||||
) {
|
||||
struct tbtable* tb = 0;
|
||||
unsigned int searchcount = 0;
|
||||
@ -216,15 +217,17 @@ extern "C" int getFuncName(
|
||||
p_name[0] = '\0';
|
||||
|
||||
// If it is a C++ name, try and demangle it using the Demangle interface (see demangle.h).
|
||||
char* rest;
|
||||
Name* const name = Demangle(buf, rest);
|
||||
if (name) {
|
||||
const char* const demangled_name = name->Text();
|
||||
if (demangled_name) {
|
||||
strncpy(p_name, demangled_name, namelen-1);
|
||||
p_name[namelen-1] = '\0';
|
||||
if (demangle) {
|
||||
char* rest;
|
||||
Name* const name = Demangle(buf, rest);
|
||||
if (name) {
|
||||
const char* const demangled_name = name->Text();
|
||||
if (demangled_name) {
|
||||
strncpy(p_name, demangled_name, namelen-1);
|
||||
p_name[namelen-1] = '\0';
|
||||
}
|
||||
delete name;
|
||||
}
|
||||
delete name;
|
||||
}
|
||||
|
||||
// Fallback: if demangling did not work, just provide the unmangled name.
|
||||
@ -325,7 +328,7 @@ int dladdr(void* addr, Dl_info* info) {
|
||||
int displacement = 0;
|
||||
|
||||
if (getFuncName((codeptr_t) p, funcname, sizeof(funcname), &displacement,
|
||||
NULL, NULL, 0) == 0) {
|
||||
NULL, NULL, 0, true /* demangle */) == 0) {
|
||||
if (funcname[0] != '\0') {
|
||||
const char* const interned = dladdr_fixed_strings.intern(funcname);
|
||||
info->dli_sname = interned;
|
||||
|
@ -87,7 +87,8 @@ int getFuncName(
|
||||
char* p_name, size_t namelen, // [out] optional: user provided buffer for the function name
|
||||
int* p_displacement, // [out] optional: displacement
|
||||
const struct tbtable** p_tb, // [out] optional: ptr to traceback table to get further information
|
||||
char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
|
||||
char* p_errmsg, size_t errmsglen,// [out] optional: user provided buffer for error messages
|
||||
bool demangle = true // [in] whether to demangle the name
|
||||
);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,7 +42,7 @@ class MachODecoder : public AbstractDecoder {
|
||||
virtual bool decode(address pc, char* buf, int buflen, int* offset,
|
||||
const void* base);
|
||||
virtual bool decode(address pc, char* buf, int buflen, int* offset,
|
||||
const char* module_path = NULL) {
|
||||
const char* module_path, bool demangle) {
|
||||
ShouldNotReachHere();
|
||||
return false;
|
||||
}
|
||||
|
@ -1339,7 +1339,8 @@ bool os::address_is_in_vm(address addr) {
|
||||
#define MACH_MAXSYMLEN 256
|
||||
|
||||
bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
int buflen, int *offset) {
|
||||
int buflen, int *offset,
|
||||
bool demangle) {
|
||||
// buf is not optional, but offset is optional
|
||||
assert(buf != NULL, "sanity check");
|
||||
|
||||
@ -1349,7 +1350,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
if (dladdr((void*)addr, &dlinfo) != 0) {
|
||||
// see if we have a matching symbol
|
||||
if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
|
||||
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
|
||||
if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
|
||||
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
|
||||
}
|
||||
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
|
||||
@ -1358,15 +1359,16 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
// no matching symbol so try for just file info
|
||||
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
buf, buflen, offset, dlinfo.dli_fname, demangle)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle non-dynamic manually:
|
||||
if (dlinfo.dli_fbase != NULL &&
|
||||
Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset, dlinfo.dli_fbase)) {
|
||||
if (!Decoder::demangle(localbuf, buf, buflen)) {
|
||||
Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset,
|
||||
dlinfo.dli_fbase)) {
|
||||
if (!(demangle && Decoder::demangle(localbuf, buf, buflen))) {
|
||||
jio_snprintf(buf, buflen, "%s", localbuf);
|
||||
}
|
||||
return true;
|
||||
|
@ -1623,7 +1623,8 @@ bool os::address_is_in_vm(address addr) {
|
||||
}
|
||||
|
||||
bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
int buflen, int *offset) {
|
||||
int buflen, int *offset,
|
||||
bool demangle) {
|
||||
// buf is not optional, but offset is optional
|
||||
assert(buf != NULL, "sanity check");
|
||||
|
||||
@ -1632,7 +1633,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
if (dladdr((void*)addr, &dlinfo) != 0) {
|
||||
// see if we have a matching symbol
|
||||
if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
|
||||
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
|
||||
if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
|
||||
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
|
||||
}
|
||||
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
|
||||
@ -1641,7 +1642,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
// no matching symbol so try for just file info
|
||||
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
buf, buflen, offset, dlinfo.dli_fname, demangle)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -1627,7 +1627,8 @@ typedef int (*dladdr1_func_type)(void *, Dl_info *, void **, int);
|
||||
static dladdr1_func_type dladdr1_func = NULL;
|
||||
|
||||
bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
int buflen, int * offset) {
|
||||
int buflen, int * offset,
|
||||
bool demangle) {
|
||||
// buf is not optional, but offset is optional
|
||||
assert(buf != NULL, "sanity check");
|
||||
|
||||
@ -1655,7 +1656,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
if (dlinfo.dli_saddr != NULL &&
|
||||
(char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
|
||||
if (dlinfo.dli_sname != NULL) {
|
||||
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
|
||||
if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
|
||||
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
|
||||
}
|
||||
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
|
||||
@ -1665,7 +1666,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
// no matching symbol so try for just file info
|
||||
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
buf, buflen, offset, dlinfo.dli_fname, demangle)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -1679,7 +1680,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
if (dladdr((void *)addr, &dlinfo) != 0) {
|
||||
// see if we have a matching symbol
|
||||
if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
|
||||
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
|
||||
if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
|
||||
jio_snprintf(buf, buflen, dlinfo.dli_sname);
|
||||
}
|
||||
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
|
||||
@ -1688,7 +1689,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
// no matching symbol so try for just file info
|
||||
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
buf, buflen, offset, dlinfo.dli_fname, demangle)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -162,7 +162,7 @@ void WindowsDecoder::initialize() {
|
||||
// current function and comparing the result
|
||||
address addr = (address)Decoder::demangle;
|
||||
char buf[MAX_PATH];
|
||||
if (decode(addr, buf, sizeof(buf), NULL)) {
|
||||
if (decode(addr, buf, sizeof(buf), NULL, NULL, true /* demangle */)) {
|
||||
_can_decode_in_vm = !strcmp(buf, "Decoder::demangle");
|
||||
}
|
||||
}
|
||||
@ -187,7 +187,7 @@ bool WindowsDecoder::can_decode_C_frame_in_vm() const {
|
||||
}
|
||||
|
||||
|
||||
bool WindowsDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* modulepath) {
|
||||
bool WindowsDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* modulepath, bool demangle_name) {
|
||||
if (_pfnSymGetSymFromAddr64 != NULL) {
|
||||
PIMAGEHLP_SYMBOL64 pSymbol;
|
||||
char symbolInfo[MAX_PATH + sizeof(IMAGEHLP_SYMBOL64)];
|
||||
@ -197,7 +197,7 @@ bool WindowsDecoder::decode(address addr, char *buf, int buflen, int* offset, co
|
||||
DWORD64 displacement;
|
||||
if (_pfnSymGetSymFromAddr64(::GetCurrentProcess(), (DWORD64)addr, &displacement, pSymbol)) {
|
||||
if (buf != NULL) {
|
||||
if (demangle(pSymbol->Name, buf, buflen)) {
|
||||
if (!(demangle_name && demangle(pSymbol->Name, buf, buflen))) {
|
||||
jio_snprintf(buf, buflen, "%s", pSymbol->Name);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -60,7 +60,7 @@ public:
|
||||
|
||||
bool can_decode_C_frame_in_vm() const;
|
||||
bool demangle(const char* symbol, char *buf, int buflen);
|
||||
bool decode(address addr, char *buf, int buflen, int* offset, const char* modulepath = NULL);
|
||||
bool decode(address addr, char *buf, int buflen, int* offset, const char* modulepath, bool demangle);
|
||||
bool decode(address addr, char *buf, int buflen, int* offset, const void* base) {
|
||||
ShouldNotReachHere();
|
||||
return false;
|
||||
|
@ -1369,11 +1369,12 @@ bool os::dll_address_to_library_name(address addr, char* buf,
|
||||
}
|
||||
|
||||
bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
int buflen, int *offset) {
|
||||
int buflen, int *offset,
|
||||
bool demangle) {
|
||||
// buf is not optional, but offset is optional
|
||||
assert(buf != NULL, "sanity check");
|
||||
|
||||
if (Decoder::decode(addr, buf, buflen, offset)) {
|
||||
if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
|
||||
return true;
|
||||
}
|
||||
if (offset != NULL) *offset = -1;
|
||||
|
@ -363,9 +363,6 @@ class CompilerInterfaceVC10 extends CompilerInterface {
|
||||
|
||||
// Set /On option
|
||||
addAttr(rv, "Optimization", opt);
|
||||
// Set /FR option.
|
||||
addAttr(rv, "BrowseInformation", "true");
|
||||
addAttr(rv, "BrowseInformationFile", "$(IntDir)");
|
||||
// Set /MD option.
|
||||
addAttr(rv, "RuntimeLibrary", "MultiThreadedDLL");
|
||||
// Set /Oy- option
|
||||
|
@ -32,11 +32,11 @@
|
||||
//
|
||||
// The compact hash table writer implementations
|
||||
//
|
||||
CompactHashtableWriter::CompactHashtableWriter(const char* table_name,
|
||||
CompactHashtableWriter::CompactHashtableWriter(int table_type,
|
||||
int num_entries,
|
||||
CompactHashtableStats* stats) {
|
||||
assert(DumpSharedSpaces, "dump-time only");
|
||||
_table_name = table_name;
|
||||
_type = table_type;
|
||||
_num_entries = num_entries;
|
||||
_num_buckets = number_of_buckets(_num_entries);
|
||||
_buckets = NEW_C_HEAP_ARRAY(Entry*, _num_buckets, mtSymbol);
|
||||
@ -99,7 +99,7 @@ juint* CompactHashtableWriter::dump_table(juint* p, juint** first_bucket,
|
||||
NumberSeq* summary) {
|
||||
int index;
|
||||
juint* compact_table = p;
|
||||
// Find the start of the buckets, skip the compact_bucket_infos table
|
||||
// Compute the start of the buckets, include the compact_bucket_infos table
|
||||
// and the table end offset.
|
||||
juint offset = _num_buckets + 1;
|
||||
*first_bucket = compact_table + offset;
|
||||
@ -130,10 +130,17 @@ juint* CompactHashtableWriter::dump_table(juint* p, juint** first_bucket,
|
||||
// Write the compact table's entries
|
||||
juint* CompactHashtableWriter::dump_buckets(juint* compact_table, juint* p,
|
||||
NumberSeq* summary) {
|
||||
uintx base_address = uintx(MetaspaceShared::shared_rs()->base());
|
||||
uintx max_delta = uintx(MetaspaceShared::shared_rs()->size());
|
||||
assert(max_delta <= 0x7fffffff, "range check");
|
||||
uintx base_address = 0;
|
||||
uintx max_delta = 0;
|
||||
int num_compact_buckets = 0;
|
||||
if (_type == CompactHashtable<Symbol*, char>::_symbol_table) {
|
||||
base_address = uintx(MetaspaceShared::shared_rs()->base());
|
||||
max_delta = uintx(MetaspaceShared::shared_rs()->size());
|
||||
assert(max_delta <= 0x7fffffff, "range check");
|
||||
} else {
|
||||
assert((_type == CompactHashtable<oop, char>::_string_table), "unknown table");
|
||||
assert(UseCompressedOops, "UseCompressedOops is required");
|
||||
}
|
||||
|
||||
assert(p != NULL, "sanity");
|
||||
for (int index = 0; index < _num_buckets; index++) {
|
||||
@ -148,12 +155,16 @@ juint* CompactHashtableWriter::dump_buckets(juint* compact_table, juint* p,
|
||||
for (Entry* tent = _buckets[index]; tent;
|
||||
tent = tent->next()) {
|
||||
if (bucket_type == REGULAR_BUCKET_TYPE) {
|
||||
*p++ = juint(tent->hash()); // write symbol hash
|
||||
*p++ = juint(tent->hash()); // write entry hash
|
||||
}
|
||||
if (_type == CompactHashtable<Symbol*, char>::_symbol_table) {
|
||||
uintx deltax = uintx(tent->value()) - base_address;
|
||||
assert(deltax < max_delta, "range check");
|
||||
juint delta = juint(deltax);
|
||||
*p++ = delta; // write entry offset
|
||||
} else {
|
||||
*p++ = oopDesc::encode_heap_oop(tent->string());
|
||||
}
|
||||
uintx deltax = uintx(tent->value()) - base_address;
|
||||
assert(deltax < max_delta, "range check");
|
||||
juint delta = juint(deltax);
|
||||
*p++ = delta; // write symbol offset
|
||||
count ++;
|
||||
}
|
||||
assert(count == _bucket_sizes[index], "sanity");
|
||||
@ -174,6 +185,10 @@ void CompactHashtableWriter::dump(char** top, char* end) {
|
||||
|
||||
uintx base_address = uintx(MetaspaceShared::shared_rs()->base());
|
||||
|
||||
// Now write the following at the beginning of the table:
|
||||
// base_address (uintx)
|
||||
// num_entries (juint)
|
||||
// num_buckets (juint)
|
||||
*p++ = high(base_address);
|
||||
*p++ = low (base_address); // base address
|
||||
*p++ = _num_entries; // number of entries in the table
|
||||
@ -191,7 +206,8 @@ void CompactHashtableWriter::dump(char** top, char* end) {
|
||||
if (_num_entries > 0) {
|
||||
avg_cost = double(_required_bytes)/double(_num_entries);
|
||||
}
|
||||
tty->print_cr("Shared %s table stats -------- base: " PTR_FORMAT, _table_name, (intptr_t)base_address);
|
||||
tty->print_cr("Shared %s table stats -------- base: " PTR_FORMAT,
|
||||
table_name(), (intptr_t)base_address);
|
||||
tty->print_cr("Number of entries : %9d", _num_entries);
|
||||
tty->print_cr("Total bytes used : %9d", (int)((*top) - old_top));
|
||||
tty->print_cr("Average bytes per entry : %9.3f", avg_cost);
|
||||
@ -202,12 +218,24 @@ void CompactHashtableWriter::dump(char** top, char* end) {
|
||||
}
|
||||
}
|
||||
|
||||
const char* CompactHashtableWriter::table_name() {
|
||||
switch (_type) {
|
||||
case CompactHashtable<Symbol*, char>::_symbol_table: return "symbol";
|
||||
case CompactHashtable<oop, char>::_string_table: return "string";
|
||||
default:
|
||||
;
|
||||
}
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
//
|
||||
// The CompactHashtable implementation
|
||||
//
|
||||
template <class T, class N> const char* CompactHashtable<T, N>::init(const char* buffer) {
|
||||
template <class T, class N> const char* CompactHashtable<T, N>::init(
|
||||
CompactHashtableType type, const char* buffer) {
|
||||
assert(!DumpSharedSpaces, "run-time only");
|
||||
_type = type;
|
||||
juint*p = (juint*)buffer;
|
||||
juint upper = *p++;
|
||||
juint lower = *p++;
|
||||
@ -245,8 +273,34 @@ template <class T, class N> void CompactHashtable<T, N>::symbols_do(SymbolClosur
|
||||
}
|
||||
}
|
||||
|
||||
template <class T, class N> void CompactHashtable<T, N>::oops_do(OopClosure* f) {
|
||||
assert(!DumpSharedSpaces, "run-time only");
|
||||
assert(_type == _string_table || _bucket_count == 0, "sanity");
|
||||
for (juint i = 0; i < _bucket_count; i ++) {
|
||||
juint bucket_info = _buckets[i];
|
||||
juint bucket_offset = BUCKET_OFFSET(bucket_info);
|
||||
int bucket_type = BUCKET_TYPE(bucket_info);
|
||||
juint* bucket = _buckets + bucket_offset;
|
||||
juint* bucket_end = _buckets;
|
||||
|
||||
narrowOop o;
|
||||
if (bucket_type == COMPACT_BUCKET_TYPE) {
|
||||
o = (narrowOop)bucket[0];
|
||||
f->do_oop(&o);
|
||||
} else {
|
||||
bucket_end += BUCKET_OFFSET(_buckets[i + 1]);
|
||||
while (bucket < bucket_end) {
|
||||
o = (narrowOop)bucket[1];
|
||||
f->do_oop(&o);
|
||||
bucket += 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Explicitly instantiate these types
|
||||
template class CompactHashtable<Symbol*, char>;
|
||||
template class CompactHashtable<oop, char>;
|
||||
|
||||
#ifndef O_BINARY // if defined (Win32) use binary files.
|
||||
#define O_BINARY 0 // otherwise do nothing.
|
||||
@ -273,6 +327,8 @@ HashtableTextDump::HashtableTextDump(const char* filename) : _fd(-1) {
|
||||
_p = _base;
|
||||
_end = _base + st.st_size;
|
||||
_filename = filename;
|
||||
_prefix_type = Unknown;
|
||||
_line_no = 1;
|
||||
}
|
||||
|
||||
HashtableTextDump::~HashtableTextDump() {
|
||||
@ -286,9 +342,9 @@ void HashtableTextDump::quit(const char* err, const char* msg) {
|
||||
vm_exit_during_initialization(err, msg);
|
||||
}
|
||||
|
||||
void HashtableTextDump::corrupted(const char *p) {
|
||||
void HashtableTextDump::corrupted(const char *p, const char* msg) {
|
||||
char info[60];
|
||||
sprintf(info, "corrupted at pos %d", (int)(p - _base));
|
||||
sprintf(info, "%s. Corrupted at line %d (file pos %d)", msg, _line_no, (int)(p - _base));
|
||||
quit(info, _filename);
|
||||
}
|
||||
|
||||
@ -298,8 +354,9 @@ bool HashtableTextDump::skip_newline() {
|
||||
} else if (_p[0] == '\n') {
|
||||
_p += 1;
|
||||
} else {
|
||||
corrupted(_p);
|
||||
corrupted(_p, "Unexpected character");
|
||||
}
|
||||
_line_no ++;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -328,26 +385,60 @@ void HashtableTextDump::check_version(const char* ver) {
|
||||
skip_newline();
|
||||
}
|
||||
|
||||
void HashtableTextDump::scan_prefix_type() {
|
||||
_p ++;
|
||||
if (strncmp(_p, "SECTION: String", 15) == 0) {
|
||||
_p += 15;
|
||||
_prefix_type = StringPrefix;
|
||||
} else if (strncmp(_p, "SECTION: Symbol", 15) == 0) {
|
||||
_p += 15;
|
||||
_prefix_type = SymbolPrefix;
|
||||
} else {
|
||||
_prefix_type = Unknown;
|
||||
}
|
||||
skip_newline();
|
||||
}
|
||||
|
||||
int HashtableTextDump::scan_prefix() {
|
||||
int HashtableTextDump::scan_prefix(int* utf8_length) {
|
||||
if (*_p == '@') {
|
||||
scan_prefix_type();
|
||||
}
|
||||
|
||||
switch (_prefix_type) {
|
||||
case SymbolPrefix:
|
||||
*utf8_length = scan_symbol_prefix(); break;
|
||||
case StringPrefix:
|
||||
*utf8_length = scan_string_prefix(); break;
|
||||
default:
|
||||
tty->print_cr("Shared input data type: Unknown.");
|
||||
corrupted(_p, "Unknown data type");
|
||||
}
|
||||
|
||||
return _prefix_type;
|
||||
}
|
||||
|
||||
int HashtableTextDump::scan_string_prefix() {
|
||||
// Expect /[0-9]+: /
|
||||
int utf8_length = get_num(':');
|
||||
int utf8_length;
|
||||
get_num(':', &utf8_length);
|
||||
if (*_p != ' ') {
|
||||
corrupted(_p);
|
||||
corrupted(_p, "Wrong prefix format for string");
|
||||
}
|
||||
_p++;
|
||||
return utf8_length;
|
||||
}
|
||||
|
||||
int HashtableTextDump::scan_prefix2() {
|
||||
int HashtableTextDump::scan_symbol_prefix() {
|
||||
// Expect /[0-9]+ (-|)[0-9]+: /
|
||||
int utf8_length = get_num(' ');
|
||||
if (*_p == '-') {
|
||||
_p++;
|
||||
int utf8_length;
|
||||
get_num(' ', &utf8_length);
|
||||
if (*_p == '-') {
|
||||
_p++;
|
||||
}
|
||||
(void)get_num(':');
|
||||
int ref_num;
|
||||
(void)get_num(':', &ref_num);
|
||||
if (*_p != ' ') {
|
||||
corrupted(_p);
|
||||
corrupted(_p, "Wrong prefix format for symbol");
|
||||
}
|
||||
_p++;
|
||||
return utf8_length;
|
||||
@ -408,7 +499,7 @@ void HashtableTextDump::get_utf8(char* utf8_buffer, int utf8_length) {
|
||||
case 'r': *to++ = '\r'; break;
|
||||
case '\\': *to++ = '\\'; break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
corrupted(_p, "Unsupported character");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "services/diagnosticCommand.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
@ -49,7 +50,7 @@ public:
|
||||
// the compact table to the shared archive.
|
||||
//
|
||||
// At dump time, the CompactHashtableWriter obtains all entries from the
|
||||
// symbol table and adds them to a new temporary hash table. The hash
|
||||
// symbol/string table and adds them to a new temporary hash table. The hash
|
||||
// table size (number of buckets) is calculated using
|
||||
// '(num_entries + bucket_size - 1) / bucket_size'. The default bucket
|
||||
// size is 4 and can be changed by -XX:SharedSymbolTableBucketSize option.
|
||||
@ -57,14 +58,14 @@ public:
|
||||
// faster lookup. It also has relatively small number of empty buckets and
|
||||
// good distribution of the entries.
|
||||
//
|
||||
// We use a simple hash function (symbol_hash % num_bucket) for the table.
|
||||
// We use a simple hash function (hash % num_bucket) for the table.
|
||||
// The new table is compacted when written out. Please see comments
|
||||
// above the CompactHashtable class for the table layout detail. The bucket
|
||||
// offsets are written to the archive as part of the compact table. The
|
||||
// bucket offset is encoded in the low 30-bit (0-29) and the bucket type
|
||||
// (regular or compact) are encoded in bit[31, 30]. For buckets with more
|
||||
// than one entry, both symbol hash and symbol offset are written to the
|
||||
// table. For buckets with only one entry, only the symbol offset is written
|
||||
// than one entry, both hash and entry offset are written to the
|
||||
// table. For buckets with only one entry, only the entry offset is written
|
||||
// to the table and the buckets are tagged as compact in their type bits.
|
||||
// Buckets without entry are skipped from the table. Their offsets are
|
||||
// still written out for faster lookup.
|
||||
@ -78,6 +79,7 @@ public:
|
||||
|
||||
public:
|
||||
Entry(unsigned int hash, Symbol *symbol) : _next(NULL), _hash(hash), _literal(symbol) {}
|
||||
Entry(unsigned int hash, oop string) : _next(NULL), _hash(hash), _literal(string) {}
|
||||
|
||||
void *value() {
|
||||
return _literal;
|
||||
@ -85,6 +87,9 @@ public:
|
||||
Symbol *symbol() {
|
||||
return (Symbol*)_literal;
|
||||
}
|
||||
oop string() {
|
||||
return (oop)_literal;
|
||||
}
|
||||
unsigned int hash() {
|
||||
return _hash;
|
||||
}
|
||||
@ -95,7 +100,7 @@ public:
|
||||
private:
|
||||
static int number_of_buckets(int num_entries);
|
||||
|
||||
const char* _table_name;
|
||||
int _type;
|
||||
int _num_entries;
|
||||
int _num_buckets;
|
||||
juint* _bucket_sizes;
|
||||
@ -105,7 +110,7 @@ private:
|
||||
|
||||
public:
|
||||
// This is called at dump-time only
|
||||
CompactHashtableWriter(const char* table_name, int num_entries, CompactHashtableStats* stats);
|
||||
CompactHashtableWriter(int table_type, int num_entries, CompactHashtableStats* stats);
|
||||
~CompactHashtableWriter();
|
||||
|
||||
int get_required_bytes() {
|
||||
@ -116,6 +121,10 @@ public:
|
||||
add(hash, new Entry(hash, symbol));
|
||||
}
|
||||
|
||||
void add(unsigned int hash, oop string) {
|
||||
add(hash, new Entry(hash, string));
|
||||
}
|
||||
|
||||
private:
|
||||
void add(unsigned int hash, Entry* entry);
|
||||
juint* dump_table(juint* p, juint** first_bucket, NumberSeq* summary);
|
||||
@ -123,6 +132,7 @@ private:
|
||||
|
||||
public:
|
||||
void dump(char** top, char* end);
|
||||
const char* table_name();
|
||||
};
|
||||
|
||||
#define REGULAR_BUCKET_TYPE 0
|
||||
@ -136,23 +146,23 @@ public:
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// CompactHashtable is used to stored the CDS archive's symbol table. Used
|
||||
// CompactHashtable is used to stored the CDS archive's symbol/string table. Used
|
||||
// at runtime only to access the compact table from the archive.
|
||||
//
|
||||
// Because these tables are read-only (no entries can be added/deleted) at run-time
|
||||
// and tend to have large number of entries, we try to minimize the footprint
|
||||
// cost per entry.
|
||||
//
|
||||
// Layout of compact symbol table in the shared archive:
|
||||
// Layout of compact table in the shared archive:
|
||||
//
|
||||
// uintx base_address;
|
||||
// juint num_symbols;
|
||||
// juint num_entries;
|
||||
// juint num_buckets;
|
||||
// juint bucket_infos[num_buckets+1]; // bit[31,30]: type; bit[29-0]: offset
|
||||
// juint table[]
|
||||
//
|
||||
// -----------------------------------
|
||||
// | base_address | num_symbols |
|
||||
// | base_address | num_entries |
|
||||
// |---------------------------------|
|
||||
// | num_buckets | bucket_info0 |
|
||||
// |---------------------------------|
|
||||
@ -177,9 +187,13 @@ public:
|
||||
// compact buckets have '01' in their highest 2-bit, and regular buckets have
|
||||
// '00' in their highest 2-bit.
|
||||
//
|
||||
// For normal buckets, each symbol's entry is 8 bytes in the table[]:
|
||||
// juint hash; /* symbol hash */
|
||||
// juint offset; /* Symbol* sym = (Symbol*)(base_address + offset) */
|
||||
// For normal buckets, each entry is 8 bytes in the table[]:
|
||||
// juint hash; /* symbol/string hash */
|
||||
// union {
|
||||
// juint offset; /* Symbol* sym = (Symbol*)(base_address + offset) */
|
||||
// narrowOop str; /* String narrowOop encoding */
|
||||
// }
|
||||
//
|
||||
//
|
||||
// For compact buckets, each entry has only the 4-byte 'offset' in the table[].
|
||||
//
|
||||
@ -189,19 +203,41 @@ public:
|
||||
//
|
||||
template <class T, class N> class CompactHashtable VALUE_OBJ_CLASS_SPEC {
|
||||
friend class VMStructs;
|
||||
|
||||
public:
|
||||
enum CompactHashtableType {
|
||||
_symbol_table = 0,
|
||||
_string_table = 1
|
||||
};
|
||||
|
||||
private:
|
||||
CompactHashtableType _type;
|
||||
uintx _base_address;
|
||||
juint _entry_count;
|
||||
juint _bucket_count;
|
||||
juint _table_end_offset;
|
||||
juint* _buckets;
|
||||
|
||||
inline bool equals(T entry, const char* name, int len) {
|
||||
if (entry->equals(name, len)) {
|
||||
assert(entry->refcount() == -1, "must be shared");
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
inline Symbol* lookup_entry(CompactHashtable<Symbol*, char>* const t,
|
||||
juint* addr, const char* name, int len) {
|
||||
Symbol* sym = (Symbol*)((void*)(_base_address + *addr));
|
||||
if (sym->equals(name, len)) {
|
||||
assert(sym->refcount() == -1, "must be shared");
|
||||
return sym;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
inline oop lookup_entry(CompactHashtable<oop, char>* const t,
|
||||
juint* addr, const char* name, int len) {
|
||||
narrowOop obj = (narrowOop)(*addr);
|
||||
oop string = oopDesc::decode_heap_oop(obj);
|
||||
if (java_lang_String::equals(string, (jchar*)name, len)) {
|
||||
return string;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
public:
|
||||
@ -211,7 +247,14 @@ public:
|
||||
_table_end_offset = 0;
|
||||
_buckets = 0;
|
||||
}
|
||||
const char* init(const char *buffer);
|
||||
const char* init(CompactHashtableType type, const char *buffer);
|
||||
|
||||
void reset() {
|
||||
_entry_count = 0;
|
||||
_bucket_count = 0;
|
||||
_table_end_offset = 0;
|
||||
_buckets = 0;
|
||||
}
|
||||
|
||||
// Lookup an entry from the compact table
|
||||
inline T lookup(const N* name, unsigned int hash, int len) {
|
||||
@ -225,23 +268,22 @@ public:
|
||||
juint* bucket_end = _buckets;
|
||||
|
||||
if (bucket_type == COMPACT_BUCKET_TYPE) {
|
||||
// the compact bucket has one entry with symbol offset only
|
||||
T entry = (T)((void*)(_base_address + bucket[0]));
|
||||
if (equals(entry, name, len)) {
|
||||
return entry;
|
||||
// the compact bucket has one entry with entry offset only
|
||||
T res = lookup_entry(this, &bucket[0], name, len);
|
||||
if (res != NULL) {
|
||||
return res;
|
||||
}
|
||||
} else {
|
||||
// This is a regular bucket, which has more than one
|
||||
// entries. Each entry is a pair of symbol (hash, offset).
|
||||
// entries. Each entry is a pair of entry (hash, offset).
|
||||
// Seek until the end of the bucket.
|
||||
bucket_end += BUCKET_OFFSET(_buckets[index + 1]);
|
||||
while (bucket < bucket_end) {
|
||||
unsigned int h = (unsigned int)(bucket[0]);
|
||||
if (h == hash) {
|
||||
juint offset = bucket[1];
|
||||
T entry = (T)((void*)(_base_address + offset));
|
||||
if (equals(entry, name, len)) {
|
||||
return entry;
|
||||
T res = lookup_entry(this, &bucket[1], name, len);
|
||||
if (res != NULL) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
bucket += 2;
|
||||
@ -253,12 +295,15 @@ public:
|
||||
|
||||
// iterate over symbols
|
||||
void symbols_do(SymbolClosure *cl);
|
||||
|
||||
// iterate over strings
|
||||
void oops_do(OopClosure* f);
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Read/Write the contents of a hashtable textual dump (created by
|
||||
// SymbolTable::dump).
|
||||
// SymbolTable::dump and StringTable::dump).
|
||||
// Because the dump file may be big (hundred of MB in extreme cases),
|
||||
// we use mmap for fast access when reading it.
|
||||
//
|
||||
@ -269,21 +314,29 @@ class HashtableTextDump VALUE_OBJ_CLASS_SPEC {
|
||||
const char* _end;
|
||||
const char* _filename;
|
||||
size_t _size;
|
||||
int _prefix_type;
|
||||
int _line_no;
|
||||
public:
|
||||
HashtableTextDump(const char* filename);
|
||||
~HashtableTextDump();
|
||||
|
||||
enum {
|
||||
SymbolPrefix = 1 << 0,
|
||||
StringPrefix = 1 << 1,
|
||||
Unknown = 1 << 2
|
||||
};
|
||||
|
||||
void quit(const char* err, const char* msg);
|
||||
|
||||
inline int remain() {
|
||||
return (int)(_end - _p);
|
||||
}
|
||||
|
||||
void corrupted(const char *p);
|
||||
void corrupted(const char *p, const char *msg);
|
||||
|
||||
inline void corrupted_if(bool cond) {
|
||||
if (cond) {
|
||||
corrupted(_p);
|
||||
corrupted(_p, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -292,7 +345,7 @@ public:
|
||||
void skip_past(char c);
|
||||
void check_version(const char* ver);
|
||||
|
||||
inline int get_num(char delim) {
|
||||
inline bool get_num(char delim, int *utf8_length) {
|
||||
const char* p = _p;
|
||||
const char* end = _end;
|
||||
int num = 0;
|
||||
@ -303,18 +356,22 @@ public:
|
||||
num = num * 10 + (c - '0');
|
||||
} else if (c == delim) {
|
||||
_p = p;
|
||||
return num;
|
||||
*utf8_length = num;
|
||||
return true;
|
||||
} else {
|
||||
corrupted(p-1);
|
||||
// Not [0-9], not 'delim'
|
||||
return false;
|
||||
}
|
||||
}
|
||||
corrupted(_end);
|
||||
corrupted(_end, "Incorrect format");
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
int scan_prefix();
|
||||
int scan_prefix2();
|
||||
void scan_prefix_type();
|
||||
int scan_prefix(int* utf8_length);
|
||||
int scan_string_prefix();
|
||||
int scan_symbol_prefix();
|
||||
|
||||
jchar unescape(const char* from, const char* end, int count);
|
||||
void get_utf8(char* utf8_buffer, int utf8_length);
|
||||
|
@ -118,6 +118,10 @@ class java_lang_String : AllStatic {
|
||||
return hash_offset;
|
||||
}
|
||||
|
||||
static void set_value_raw(oop string, typeArrayOop buffer) {
|
||||
assert(initialized, "Must be initialized");
|
||||
string->obj_field_put_raw(value_offset, buffer);
|
||||
}
|
||||
static void set_value(oop string, typeArrayOop buffer) {
|
||||
assert(initialized && (value_offset > 0), "Must be initialized");
|
||||
string->obj_field_put(value_offset, (oop)buffer);
|
||||
@ -210,6 +214,7 @@ class java_lang_String : AllStatic {
|
||||
// Debugging
|
||||
static void print(oop java_string, outputStream* st);
|
||||
friend class JavaClasses;
|
||||
friend class StringTable;
|
||||
};
|
||||
|
||||
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#endif
|
||||
@ -87,19 +88,28 @@ class StableMemoryChecker : public StackObj {
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
StringTable* StringTable::_the_table = NULL;
|
||||
|
||||
bool StringTable::_ignore_shared_strings = false;
|
||||
bool StringTable::_needs_rehashing = false;
|
||||
|
||||
volatile int StringTable::_parallel_claimed_idx = 0;
|
||||
|
||||
CompactHashtable<oop, char> StringTable::_shared_table;
|
||||
|
||||
// Pick hashing algorithm
|
||||
unsigned int StringTable::hash_string(const jchar* s, int len) {
|
||||
return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) :
|
||||
java_lang_String::hash_code(s, len);
|
||||
}
|
||||
|
||||
oop StringTable::lookup(int index, jchar* name,
|
||||
int len, unsigned int hash) {
|
||||
oop StringTable::lookup_shared(jchar* name, int len) {
|
||||
// java_lang_String::hash_code() was used to compute hash values in the shared table. Don't
|
||||
// use the hash value from StringTable::hash_string() as it might use alternate hashcode.
|
||||
return _shared_table.lookup((const char*)name,
|
||||
java_lang_String::hash_code(name, len), len);
|
||||
}
|
||||
|
||||
oop StringTable::lookup_in_main_table(int index, jchar* name,
|
||||
int len, unsigned int hash) {
|
||||
int count = 0;
|
||||
for (HashtableEntry<oop, mtSymbol>* l = bucket(index); l != NULL; l = l->next()) {
|
||||
count++;
|
||||
@ -140,7 +150,8 @@ oop StringTable::basic_add(int index_arg, Handle string, jchar* name,
|
||||
// Since look-up was done lock-free, we need to check if another
|
||||
// thread beat us in the race to insert the symbol.
|
||||
|
||||
oop test = lookup(index, name, len, hashValue); // calls lookup(u1*, int)
|
||||
// No need to lookup the shared table from here since the caller (intern()) already did
|
||||
oop test = lookup_in_main_table(index, name, len, hashValue); // calls lookup(u1*, int)
|
||||
if (test != NULL) {
|
||||
// Entry already added
|
||||
return test;
|
||||
@ -172,9 +183,14 @@ static void ensure_string_alive(oop string) {
|
||||
}
|
||||
|
||||
oop StringTable::lookup(jchar* name, int len) {
|
||||
oop string = lookup_shared(name, len);
|
||||
if (string != NULL) {
|
||||
return string;
|
||||
}
|
||||
|
||||
unsigned int hash = hash_string(name, len);
|
||||
int index = the_table()->hash_to_index(hash);
|
||||
oop string = the_table()->lookup(index, name, len, hash);
|
||||
string = the_table()->lookup_in_main_table(index, name, len, hash);
|
||||
|
||||
ensure_string_alive(string);
|
||||
|
||||
@ -184,9 +200,14 @@ oop StringTable::lookup(jchar* name, int len) {
|
||||
|
||||
oop StringTable::intern(Handle string_or_null, jchar* name,
|
||||
int len, TRAPS) {
|
||||
oop found_string = lookup_shared(name, len);
|
||||
if (found_string != NULL) {
|
||||
return found_string;
|
||||
}
|
||||
|
||||
unsigned int hashValue = hash_string(name, len);
|
||||
int index = the_table()->hash_to_index(hashValue);
|
||||
oop found_string = the_table()->lookup(index, name, len, hashValue);
|
||||
found_string = the_table()->lookup_in_main_table(index, name, len, hashValue);
|
||||
|
||||
// Found
|
||||
if (found_string != NULL) {
|
||||
@ -611,3 +632,131 @@ int StringtableDCmd::num_arguments() {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Sharing
|
||||
bool StringTable::copy_shared_string(GrowableArray<MemRegion> *string_space,
|
||||
CompactHashtableWriter* ch_table) {
|
||||
#if INCLUDE_CDS && INCLUDE_ALL_GCS && defined(_LP64) && !defined(_WINDOWS)
|
||||
assert(UseG1GC, "Only support G1 GC");
|
||||
assert(UseCompressedOops && UseCompressedClassPointers,
|
||||
"Only support UseCompressedOops and UseCompressedClassPointers enabled");
|
||||
|
||||
Thread* THREAD = Thread::current();
|
||||
G1CollectedHeap::heap()->begin_archive_alloc_range();
|
||||
for (int i = 0; i < the_table()->table_size(); ++i) {
|
||||
HashtableEntry<oop, mtSymbol>* bucket = the_table()->bucket(i);
|
||||
for ( ; bucket != NULL; bucket = bucket->next()) {
|
||||
oop s = bucket->literal();
|
||||
unsigned int hash = java_lang_String::hash_code(s);
|
||||
if (hash == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// allocate the new 'value' array first
|
||||
typeArrayOop v = java_lang_String::value(s);
|
||||
int v_len = v->size();
|
||||
typeArrayOop new_v;
|
||||
if (G1CollectedHeap::heap()->is_archive_alloc_too_large(v_len)) {
|
||||
continue; // skip the current String. The 'value' array is too large to handle
|
||||
} else {
|
||||
new_v = (typeArrayOop)G1CollectedHeap::heap()->archive_mem_allocate(v_len);
|
||||
if (new_v == NULL) {
|
||||
return false; // allocation failed
|
||||
}
|
||||
}
|
||||
// now allocate the new String object
|
||||
int s_len = s->size();
|
||||
oop new_s = (oop)G1CollectedHeap::heap()->archive_mem_allocate(s_len);
|
||||
if (new_s == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
s->identity_hash();
|
||||
v->identity_hash();
|
||||
|
||||
// copy the objects' data
|
||||
Copy::aligned_disjoint_words((HeapWord*)s, (HeapWord*)new_s, s_len);
|
||||
Copy::aligned_disjoint_words((HeapWord*)v, (HeapWord*)new_v, v_len);
|
||||
|
||||
// adjust the pointer to the 'value' field in the new String oop. Also pre-compute and set the
|
||||
// 'hash' field. That avoids "write" to the shared strings at runtime by the deduplication process.
|
||||
java_lang_String::set_value_raw(new_s, new_v);
|
||||
if (java_lang_String::hash(new_s) == 0) {
|
||||
java_lang_String::set_hash(new_s, hash);
|
||||
}
|
||||
|
||||
// add to the compact table
|
||||
ch_table->add(hash, new_s);
|
||||
}
|
||||
}
|
||||
|
||||
G1CollectedHeap::heap()->end_archive_alloc_range(string_space, os::vm_allocation_granularity());
|
||||
assert(string_space->length() <= 2, "sanity");
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StringTable::copy_compact_table(char** top, char *end, GrowableArray<MemRegion> *string_space,
|
||||
size_t* space_size) {
|
||||
#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
|
||||
if (!(UseG1GC && UseCompressedOops && UseCompressedClassPointers)) {
|
||||
if (PrintSharedSpaces) {
|
||||
tty->print_cr("Shared strings are excluded from the archive as UseG1GC, "
|
||||
"UseCompressedOops and UseCompressedClassPointers are required.");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
CompactHashtableWriter ch_table(CompactHashtable<oop, char>::_string_table,
|
||||
the_table()->number_of_entries(),
|
||||
&MetaspaceShared::stats()->string);
|
||||
|
||||
// Copy the interned strings into the "string space" within the java heap
|
||||
if (!copy_shared_string(string_space, &ch_table)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (int i = 0; i < string_space->length(); i++) {
|
||||
*space_size += string_space->at(i).byte_size();
|
||||
}
|
||||
|
||||
// Now dump the compact table
|
||||
if (*top + ch_table.get_required_bytes() > end) {
|
||||
// not enough space left
|
||||
return false;
|
||||
}
|
||||
ch_table.dump(top, end);
|
||||
*top = (char*)align_pointer_up(*top, sizeof(void*));
|
||||
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
void StringTable::shared_oops_do(OopClosure* f) {
|
||||
#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
|
||||
_shared_table.oops_do(f);
|
||||
#endif
|
||||
}
|
||||
|
||||
const char* StringTable::init_shared_table(FileMapInfo *mapinfo, char *buffer) {
|
||||
#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
|
||||
if (mapinfo->space_capacity(MetaspaceShared::first_string) == 0) {
|
||||
// no shared string data
|
||||
return buffer;
|
||||
}
|
||||
|
||||
// initialize the shared table
|
||||
juint *p = (juint*)buffer;
|
||||
const char* end = _shared_table.init(
|
||||
CompactHashtable<oop, char>::_string_table, (char*)p);
|
||||
const char* aligned_end = (const char*)align_pointer_up(end, sizeof(void*));
|
||||
|
||||
if (_ignore_shared_strings) {
|
||||
_shared_table.reset();
|
||||
}
|
||||
|
||||
return aligned_end;
|
||||
#endif
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,10 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
|
||||
template <class T, class N> class CompactHashtable;
|
||||
class CompactHashtableWriter;
|
||||
class FileMapInfo;
|
||||
|
||||
class StringTable : public RehashableHashtable<oop, mtSymbol> {
|
||||
friend class VMStructs;
|
||||
friend class Symbol;
|
||||
@ -36,6 +40,10 @@ private:
|
||||
// The string table
|
||||
static StringTable* _the_table;
|
||||
|
||||
// Shared string table
|
||||
static CompactHashtable<oop, char> _shared_table;
|
||||
static bool _ignore_shared_strings;
|
||||
|
||||
// Set if one bucket is out of balance due to hash algorithm deficiency
|
||||
static bool _needs_rehashing;
|
||||
|
||||
@ -46,7 +54,8 @@ private:
|
||||
oop basic_add(int index, Handle string_or_null, jchar* name, int len,
|
||||
unsigned int hashValue, TRAPS);
|
||||
|
||||
oop lookup(int index, jchar* chars, int length, unsigned int hashValue);
|
||||
oop lookup_in_main_table(int index, jchar* chars, int length, unsigned int hashValue);
|
||||
static oop lookup_shared(jchar* name, int len);
|
||||
|
||||
// Apply the give oop closure to the entries to the buckets
|
||||
// in the range [start_idx, end_idx).
|
||||
@ -141,12 +150,14 @@ public:
|
||||
static int verify_and_compare_entries();
|
||||
|
||||
// Sharing
|
||||
static void copy_buckets(char** top, char*end) {
|
||||
the_table()->Hashtable<oop, mtSymbol>::copy_buckets(top, end);
|
||||
}
|
||||
static void copy_table(char** top, char*end) {
|
||||
the_table()->Hashtable<oop, mtSymbol>::copy_table(top, end);
|
||||
}
|
||||
static void ignore_shared_strings(bool v) { _ignore_shared_strings = v; }
|
||||
static bool shared_string_ignored() { return _ignore_shared_strings; }
|
||||
static void shared_oops_do(OopClosure* f);
|
||||
static bool copy_shared_string(GrowableArray<MemRegion> *string_space,
|
||||
CompactHashtableWriter* ch_table);
|
||||
static bool copy_compact_table(char** top, char* end, GrowableArray<MemRegion> *string_space,
|
||||
size_t* space_size);
|
||||
static const char* init_shared_table(FileMapInfo *mapinfo, char* buffer);
|
||||
static void reverse() {
|
||||
the_table()->Hashtable<oop, mtSymbol>::reverse();
|
||||
}
|
||||
|
@ -539,7 +539,8 @@ void SymbolTable::dump(outputStream* st, bool verbose) {
|
||||
|
||||
bool SymbolTable::copy_compact_table(char** top, char*end) {
|
||||
#if INCLUDE_CDS
|
||||
CompactHashtableWriter ch_table("symbol", the_table()->number_of_entries(),
|
||||
CompactHashtableWriter ch_table(CompactHashtable<Symbol*, char>::_symbol_table,
|
||||
the_table()->number_of_entries(),
|
||||
&MetaspaceShared::stats()->symbol);
|
||||
if (*top + ch_table.get_required_bytes() > end) {
|
||||
// not enough space left
|
||||
@ -556,7 +557,6 @@ bool SymbolTable::copy_compact_table(char** top, char*end) {
|
||||
}
|
||||
}
|
||||
|
||||
char* old_top = *top;
|
||||
ch_table.dump(top, end);
|
||||
|
||||
*top = (char*)align_pointer_up(*top, sizeof(void*));
|
||||
@ -565,7 +565,8 @@ bool SymbolTable::copy_compact_table(char** top, char*end) {
|
||||
}
|
||||
|
||||
const char* SymbolTable::init_shared_table(const char* buffer) {
|
||||
const char* end = _shared_table.init(buffer);
|
||||
const char* end = _shared_table.init(
|
||||
CompactHashtable<Symbol*, char>::_symbol_table, buffer);
|
||||
return (const char*)align_pointer_up(end, sizeof(void*));
|
||||
}
|
||||
|
||||
|
@ -799,6 +799,14 @@
|
||||
do_name( multiplyToLen_name, "multiplyToLen") \
|
||||
do_signature(multiplyToLen_signature, "([II[II[I)[I") \
|
||||
\
|
||||
do_intrinsic(_squareToLen, java_math_BigInteger, squareToLen_name, squareToLen_signature, F_S) \
|
||||
do_name( squareToLen_name, "implSquareToLen") \
|
||||
do_signature(squareToLen_signature, "([II[II)[I") \
|
||||
\
|
||||
do_intrinsic(_mulAdd, java_math_BigInteger, mulAdd_name, mulAdd_signature, F_S) \
|
||||
do_name( mulAdd_name, "implMulAdd") \
|
||||
do_signature(mulAdd_signature, "([I[IIII)I") \
|
||||
\
|
||||
/* java/lang/ref/Reference */ \
|
||||
do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \
|
||||
\
|
||||
|
@ -190,7 +190,12 @@ class CodeCache : AllStatic {
|
||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||
static void clear_inline_caches(); // clear all inline caches
|
||||
|
||||
// Returns the CodeBlobType for nmethods of the given compilation level
|
||||
// Returns the CodeBlobType for the given nmethod
|
||||
static int get_code_blob_type(nmethod* nm) {
|
||||
return get_code_heap(nm)->code_blob_type();
|
||||
}
|
||||
|
||||
// Returns the CodeBlobType for the given compilation level
|
||||
static int get_code_blob_type(int comp_level) {
|
||||
if (comp_level == CompLevel_none ||
|
||||
comp_level == CompLevel_simple ||
|
||||
@ -287,7 +292,7 @@ private:
|
||||
// Iterate over all CodeBlobs
|
||||
_code_blob_type = CodeBlobType::All;
|
||||
} else if (nm != NULL) {
|
||||
_code_blob_type = CodeCache::get_code_blob_type(nm->comp_level());
|
||||
_code_blob_type = CodeCache::get_code_blob_type(nm);
|
||||
} else {
|
||||
// Only iterate over method code heaps, starting with non-profiled
|
||||
_code_blob_type = CodeBlobType::MethodNonProfiled;
|
||||
|
@ -1421,7 +1421,7 @@ void nmethod::flush() {
|
||||
Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
|
||||
if (PrintMethodFlushing) {
|
||||
tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
|
||||
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024);
|
||||
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
|
||||
}
|
||||
|
||||
// We need to deallocate any ExceptionCache data.
|
||||
|
@ -107,7 +107,8 @@ void CollectionSetChooser::verify() {
|
||||
HeapRegion *curr = regions_at(index++);
|
||||
guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
|
||||
guarantee(!curr->is_young(), "should not be young!");
|
||||
guarantee(!curr->is_humongous(), "should not be humongous!");
|
||||
guarantee(!curr->is_pinned(),
|
||||
err_msg("Pinned region should not be in collection set (index %u)", curr->hrm_index()));
|
||||
if (prev != NULL) {
|
||||
guarantee(order_regions(prev, curr) != 1,
|
||||
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
|
||||
@ -149,8 +150,8 @@ void CollectionSetChooser::sort_regions() {
|
||||
|
||||
|
||||
void CollectionSetChooser::add_region(HeapRegion* hr) {
|
||||
assert(!hr->is_humongous(),
|
||||
"Humongous regions shouldn't be added to the collection set");
|
||||
assert(!hr->is_pinned(),
|
||||
err_msg("Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index()));
|
||||
assert(!hr->is_young(), "should not be young!");
|
||||
_regions.append(hr);
|
||||
_length++;
|
||||
|
@ -103,13 +103,12 @@ public:
|
||||
void sort_regions();
|
||||
|
||||
// Determine whether to add the given region to the CSet chooser or
|
||||
// not. Currently, we skip humongous regions (we never add them to
|
||||
// the CSet, we only reclaim them during cleanup) and regions whose
|
||||
// live bytes are over the threshold.
|
||||
// not. Currently, we skip pinned regions and regions whose live
|
||||
// bytes are over the threshold. Humongous regions may be reclaimed during cleanup.
|
||||
bool should_add(HeapRegion* hr) {
|
||||
assert(hr->is_marked(), "pre-condition");
|
||||
assert(!hr->is_young(), "should never consider young regions");
|
||||
return !hr->is_humongous() &&
|
||||
return !hr->is_pinned() &&
|
||||
hr->live_bytes() < _region_live_threshold_bytes;
|
||||
}
|
||||
|
||||
|
@ -1784,7 +1784,7 @@ public:
|
||||
const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
|
||||
|
||||
bool doHeapRegion(HeapRegion *hr) {
|
||||
if (hr->is_continues_humongous()) {
|
||||
if (hr->is_continues_humongous() || hr->is_archive()) {
|
||||
return false;
|
||||
}
|
||||
// We use a claim value of zero here because all regions
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "gc/g1/g1Allocator.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1MarkSweep.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/g1/heapRegionSet.inline.hpp"
|
||||
|
||||
@ -44,6 +45,8 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
||||
HeapRegion** retained_old) {
|
||||
HeapRegion* retained_region = *retained_old;
|
||||
*retained_old = NULL;
|
||||
assert(retained_region == NULL || !retained_region->is_archive(),
|
||||
err_msg("Archive region should not be alloc region (index %u)", retained_region->hrm_index()));
|
||||
|
||||
// We will discard the current GC alloc region if:
|
||||
// a) it's in the collection set (it can happen!),
|
||||
@ -168,3 +171,153 @@ void G1DefaultParGCAllocator::waste(size_t& wasted, size_t& undo_wasted) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) {
|
||||
// Create the archive allocator, and also enable archive object checking
|
||||
// in mark-sweep, since we will be creating archive regions.
|
||||
G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h);
|
||||
G1MarkSweep::enable_archive_object_check();
|
||||
return result;
|
||||
}
|
||||
|
||||
bool G1ArchiveAllocator::alloc_new_region() {
|
||||
// Allocate the highest free region in the reserved heap,
|
||||
// and add it to our list of allocated regions. It is marked
|
||||
// archive and added to the old set.
|
||||
HeapRegion* hr = _g1h->alloc_highest_free_region();
|
||||
if (hr == NULL) {
|
||||
return false;
|
||||
}
|
||||
assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index()));
|
||||
hr->set_archive();
|
||||
_g1h->_old_set.add(hr);
|
||||
_g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive);
|
||||
_allocated_regions.append(hr);
|
||||
_allocation_region = hr;
|
||||
|
||||
// Set up _bottom and _max to begin allocating in the lowest
|
||||
// min_region_size'd chunk of the allocated G1 region.
|
||||
_bottom = hr->bottom();
|
||||
_max = _bottom + HeapRegion::min_region_size_in_words();
|
||||
|
||||
// Tell mark-sweep that objects in this region are not to be marked.
|
||||
G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords));
|
||||
|
||||
// Since we've modified the old set, call update_sizes.
|
||||
_g1h->g1mm()->update_sizes();
|
||||
return true;
|
||||
}
|
||||
|
||||
HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
|
||||
assert(word_size != 0, "size must not be zero");
|
||||
if (_allocation_region == NULL) {
|
||||
if (!alloc_new_region()) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
HeapWord* old_top = _allocation_region->top();
|
||||
assert(_bottom >= _allocation_region->bottom(),
|
||||
err_msg("inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
|
||||
p2i(_bottom), p2i(_allocation_region->bottom())));
|
||||
assert(_max <= _allocation_region->end(),
|
||||
err_msg("inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
|
||||
p2i(_max), p2i(_allocation_region->end())));
|
||||
assert(_bottom <= old_top && old_top <= _max,
|
||||
err_msg("inconsistent allocation state: expected "
|
||||
PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
|
||||
p2i(_bottom), p2i(old_top), p2i(_max)));
|
||||
|
||||
// Allocate the next word_size words in the current allocation chunk.
|
||||
// If allocation would cross the _max boundary, insert a filler and begin
|
||||
// at the base of the next min_region_size'd chunk. Also advance to the next
|
||||
// chunk if we don't yet cross the boundary, but the remainder would be too
|
||||
// small to fill.
|
||||
HeapWord* new_top = old_top + word_size;
|
||||
size_t remainder = pointer_delta(_max, new_top);
|
||||
if ((new_top > _max) ||
|
||||
((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) {
|
||||
if (old_top != _max) {
|
||||
size_t fill_size = pointer_delta(_max, old_top);
|
||||
CollectedHeap::fill_with_object(old_top, fill_size);
|
||||
_summary_bytes_used += fill_size * HeapWordSize;
|
||||
}
|
||||
_allocation_region->set_top(_max);
|
||||
old_top = _bottom = _max;
|
||||
|
||||
// Check if we've just used up the last min_region_size'd chunk
|
||||
// in the current region, and if so, allocate a new one.
|
||||
if (_bottom != _allocation_region->end()) {
|
||||
_max = _bottom + HeapRegion::min_region_size_in_words();
|
||||
} else {
|
||||
if (!alloc_new_region()) {
|
||||
return NULL;
|
||||
}
|
||||
old_top = _allocation_region->bottom();
|
||||
}
|
||||
}
|
||||
_allocation_region->set_top(old_top + word_size);
|
||||
_summary_bytes_used += word_size * HeapWordSize;
|
||||
|
||||
return old_top;
|
||||
}
|
||||
|
||||
void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes) {
|
||||
assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
|
||||
err_msg("alignment " SIZE_FORMAT " too large", end_alignment_in_bytes));
|
||||
assert(is_size_aligned(end_alignment_in_bytes, HeapWordSize),
|
||||
err_msg("alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize));
|
||||
|
||||
// If we've allocated nothing, simply return.
|
||||
if (_allocation_region == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If an end alignment was requested, insert filler objects.
|
||||
if (end_alignment_in_bytes != 0) {
|
||||
HeapWord* currtop = _allocation_region->top();
|
||||
HeapWord* newtop = (HeapWord*)align_pointer_up(currtop, end_alignment_in_bytes);
|
||||
size_t fill_size = pointer_delta(newtop, currtop);
|
||||
if (fill_size != 0) {
|
||||
if (fill_size < CollectedHeap::min_fill_size()) {
|
||||
// If the required fill is smaller than we can represent,
|
||||
// bump up to the next aligned address. We know we won't exceed the current
|
||||
// region boundary because the max supported alignment is smaller than the min
|
||||
// region size, and because the allocation code never leaves space smaller than
|
||||
// the min_fill_size at the top of the current allocation region.
|
||||
newtop = (HeapWord*)align_pointer_up(currtop + CollectedHeap::min_fill_size(),
|
||||
end_alignment_in_bytes);
|
||||
fill_size = pointer_delta(newtop, currtop);
|
||||
}
|
||||
HeapWord* fill = archive_mem_allocate(fill_size);
|
||||
CollectedHeap::fill_with_objects(fill, fill_size);
|
||||
}
|
||||
}
|
||||
|
||||
// Loop through the allocated regions, and create MemRegions summarizing
|
||||
// the allocated address range, combining contiguous ranges. Add the
|
||||
// MemRegions to the GrowableArray provided by the caller.
|
||||
int index = _allocated_regions.length() - 1;
|
||||
assert(_allocated_regions.at(index) == _allocation_region,
|
||||
err_msg("expected region %u at end of array, found %u",
|
||||
_allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()));
|
||||
HeapWord* base_address = _allocation_region->bottom();
|
||||
HeapWord* top = base_address;
|
||||
|
||||
while (index >= 0) {
|
||||
HeapRegion* next = _allocated_regions.at(index);
|
||||
HeapWord* new_base = next->bottom();
|
||||
HeapWord* new_top = next->top();
|
||||
if (new_base != top) {
|
||||
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
|
||||
base_address = new_base;
|
||||
}
|
||||
top = new_top;
|
||||
index = index - 1;
|
||||
}
|
||||
|
||||
assert(top != base_address, err_msg("zero-sized range, address " PTR_FORMAT, p2i(base_address)));
|
||||
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
|
||||
_allocated_regions.clear();
|
||||
_allocation_region = NULL;
|
||||
};
|
||||
|
@ -269,4 +269,72 @@ public:
|
||||
virtual void waste(size_t& wasted, size_t& undo_wasted);
|
||||
};
|
||||
|
||||
// G1ArchiveAllocator is used to allocate memory in archive
|
||||
// regions. Such regions are not modifiable by GC, being neither
|
||||
// scavenged nor compacted, or even marked in the object header.
|
||||
// They can contain no pointers to non-archive heap regions,
|
||||
class G1ArchiveAllocator : public CHeapObj<mtGC> {
|
||||
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// The current allocation region
|
||||
HeapRegion* _allocation_region;
|
||||
|
||||
// Regions allocated for the current archive range.
|
||||
GrowableArray<HeapRegion*> _allocated_regions;
|
||||
|
||||
// The number of bytes used in the current range.
|
||||
size_t _summary_bytes_used;
|
||||
|
||||
// Current allocation window within the current region.
|
||||
HeapWord* _bottom;
|
||||
HeapWord* _top;
|
||||
HeapWord* _max;
|
||||
|
||||
// Allocate a new region for this archive allocator.
|
||||
// Allocation is from the top of the reserved heap downward.
|
||||
bool alloc_new_region();
|
||||
|
||||
public:
|
||||
G1ArchiveAllocator(G1CollectedHeap* g1h) :
|
||||
_g1h(g1h),
|
||||
_allocation_region(NULL),
|
||||
_allocated_regions((ResourceObj::set_allocation_type((address) &_allocated_regions,
|
||||
ResourceObj::C_HEAP),
|
||||
2), true /* C_Heap */),
|
||||
_summary_bytes_used(0),
|
||||
_bottom(NULL),
|
||||
_top(NULL),
|
||||
_max(NULL) { }
|
||||
|
||||
virtual ~G1ArchiveAllocator() {
|
||||
assert(_allocation_region == NULL, "_allocation_region not NULL");
|
||||
}
|
||||
|
||||
static G1ArchiveAllocator* create_allocator(G1CollectedHeap* g1h);
|
||||
|
||||
// Allocate memory for an individual object.
|
||||
HeapWord* archive_mem_allocate(size_t word_size);
|
||||
|
||||
// Return the memory ranges used in the current archive, after
|
||||
// aligning to the requested alignment.
|
||||
void complete_archive(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes);
|
||||
|
||||
// The number of bytes allocated by this allocator.
|
||||
size_t used() {
|
||||
return _summary_bytes_used;
|
||||
}
|
||||
|
||||
// Clear the count of bytes allocated in prior G1 regions. This
|
||||
// must be done when recalculate_use is used to reset the counter
|
||||
// for the generic allocator, since it counts bytes in all G1
|
||||
// regions, including those still associated with this allocator.
|
||||
void clear_used() {
|
||||
_summary_bytes_used = 0;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_GC_G1_G1BIASEDARRAY_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
// Implements the common base functionality for arrays that contain provisions
|
||||
@ -128,6 +129,14 @@ public:
|
||||
return biased_base()[biased_index];
|
||||
}
|
||||
|
||||
// Return the index of the element of the given array that covers the given
|
||||
// word in the heap.
|
||||
idx_t get_index_by_address(HeapWord* value) const {
|
||||
idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
|
||||
this->verify_biased_index(biased_index);
|
||||
return biased_index - _bias;
|
||||
}
|
||||
|
||||
// Set the value of the array entry that corresponds to the given array.
|
||||
void set_by_address(HeapWord * address, T value) {
|
||||
idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
|
||||
@ -135,6 +144,18 @@ public:
|
||||
biased_base()[biased_index] = value;
|
||||
}
|
||||
|
||||
// Set the value of all array entries that correspond to addresses
|
||||
// in the specified MemRegion.
|
||||
void set_by_address(MemRegion range, T value) {
|
||||
idx_t biased_start = ((uintptr_t)range.start()) >> this->shift_by();
|
||||
idx_t biased_last = ((uintptr_t)range.last()) >> this->shift_by();
|
||||
this->verify_biased_index(biased_start);
|
||||
this->verify_biased_index(biased_last);
|
||||
for (idx_t i = biased_start; i <= biased_last; i++) {
|
||||
biased_base()[i] = value;
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
// Returns the address of the element the given address maps to
|
||||
T* address_mapped_to(HeapWord* address) {
|
||||
|
@ -405,7 +405,7 @@ HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
|
||||
// can move in an incremental collection.
|
||||
bool G1CollectedHeap::is_scavengable(const void* p) {
|
||||
HeapRegion* hr = heap_region_containing(p);
|
||||
return !hr->is_humongous();
|
||||
return !hr->is_pinned();
|
||||
}
|
||||
|
||||
// Private methods.
|
||||
@ -908,6 +908,207 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::begin_archive_alloc_range() {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
if (_archive_allocator == NULL) {
|
||||
_archive_allocator = G1ArchiveAllocator::create_allocator(this);
|
||||
}
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
|
||||
// Allocations in archive regions cannot be of a size that would be considered
|
||||
// humongous even for a minimum-sized region, because G1 region sizes/boundaries
|
||||
// may be different at archive-restore time.
|
||||
return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
assert(_archive_allocator != NULL, "_archive_allocator not initialized");
|
||||
if (is_archive_alloc_too_large(word_size)) {
|
||||
return NULL;
|
||||
}
|
||||
return _archive_allocator->archive_mem_allocate(word_size);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes) {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
assert(_archive_allocator != NULL, "_archive_allocator not initialized");
|
||||
|
||||
// Call complete_archive to do the real work, filling in the MemRegion
|
||||
// array with the archive regions.
|
||||
_archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
|
||||
delete _archive_allocator;
|
||||
_archive_allocator = NULL;
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MemRegion reserved = _hrm.reserved();
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MutexLockerEx x(Heap_lock);
|
||||
|
||||
MemRegion reserved = _hrm.reserved();
|
||||
HeapWord* prev_last_addr = NULL;
|
||||
HeapRegion* prev_last_region = NULL;
|
||||
|
||||
// Temporarily disable pretouching of heap pages. This interface is used
|
||||
// when mmap'ing archived heap data in, so pre-touching is wasted.
|
||||
FlagSetting fs(AlwaysPreTouch, false);
|
||||
|
||||
// Enable archive object checking in G1MarkSweep. We have to let it know
|
||||
// about each archive range, so that objects in those ranges aren't marked.
|
||||
G1MarkSweep::enable_archive_object_check();
|
||||
|
||||
// For each specified MemRegion range, allocate the corresponding G1
|
||||
// regions and mark them as archive regions. We expect the ranges in
|
||||
// ascending starting address order, without overlap.
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
MemRegion curr_range = ranges[i];
|
||||
HeapWord* start_address = curr_range.start();
|
||||
size_t word_size = curr_range.word_size();
|
||||
HeapWord* last_address = curr_range.last();
|
||||
size_t commits = 0;
|
||||
|
||||
guarantee(reserved.contains(start_address) && reserved.contains(last_address),
|
||||
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||
p2i(start_address), p2i(last_address)));
|
||||
guarantee(start_address > prev_last_addr,
|
||||
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||
p2i(start_address), p2i(prev_last_addr)));
|
||||
prev_last_addr = last_address;
|
||||
|
||||
// Check for ranges that start in the same G1 region in which the previous
|
||||
// range ended, and adjust the start address so we don't try to allocate
|
||||
// the same region again. If the current range is entirely within that
|
||||
// region, skip it, just adjusting the recorded top.
|
||||
HeapRegion* start_region = _hrm.addr_to_region(start_address);
|
||||
if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
|
||||
start_address = start_region->end();
|
||||
if (start_address > last_address) {
|
||||
_allocator->increase_used(word_size * HeapWordSize);
|
||||
start_region->set_top(last_address + 1);
|
||||
continue;
|
||||
}
|
||||
start_region->set_top(start_address);
|
||||
curr_range = MemRegion(start_address, last_address + 1);
|
||||
start_region = _hrm.addr_to_region(start_address);
|
||||
}
|
||||
|
||||
// Perform the actual region allocation, exiting if it fails.
|
||||
// Then note how much new space we have allocated.
|
||||
if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
|
||||
return false;
|
||||
}
|
||||
_allocator->increase_used(word_size * HeapWordSize);
|
||||
if (commits != 0) {
|
||||
ergo_verbose1(ErgoHeapSizing,
|
||||
"attempt heap expansion",
|
||||
ergo_format_reason("allocate archive regions")
|
||||
ergo_format_byte("total size"),
|
||||
HeapRegion::GrainWords * HeapWordSize * commits);
|
||||
}
|
||||
|
||||
// Mark each G1 region touched by the range as archive, add it to the old set,
|
||||
// and set the allocation context and top.
|
||||
HeapRegion* curr_region = _hrm.addr_to_region(start_address);
|
||||
HeapRegion* last_region = _hrm.addr_to_region(last_address);
|
||||
prev_last_region = last_region;
|
||||
|
||||
while (curr_region != NULL) {
|
||||
assert(curr_region->is_empty() && !curr_region->is_pinned(),
|
||||
err_msg("Region already in use (index %u)", curr_region->hrm_index()));
|
||||
_hr_printer.alloc(curr_region, G1HRPrinter::Archive);
|
||||
curr_region->set_allocation_context(AllocationContext::system());
|
||||
curr_region->set_archive();
|
||||
_old_set.add(curr_region);
|
||||
if (curr_region != last_region) {
|
||||
curr_region->set_top(curr_region->end());
|
||||
curr_region = _hrm.next_region_in_heap(curr_region);
|
||||
} else {
|
||||
curr_region->set_top(last_address + 1);
|
||||
curr_region = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// Notify mark-sweep of the archive range.
|
||||
G1MarkSweep::mark_range_archive(curr_range);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
MemRegion reserved = _hrm.reserved();
|
||||
HeapWord *prev_last_addr = NULL;
|
||||
HeapRegion* prev_last_region = NULL;
|
||||
|
||||
// For each MemRegion, create filler objects, if needed, in the G1 regions
|
||||
// that contain the address range. The address range actually within the
|
||||
// MemRegion will not be modified. That is assumed to have been initialized
|
||||
// elsewhere, probably via an mmap of archived heap data.
|
||||
MutexLockerEx x(Heap_lock);
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
HeapWord* start_address = ranges[i].start();
|
||||
HeapWord* last_address = ranges[i].last();
|
||||
|
||||
assert(reserved.contains(start_address) && reserved.contains(last_address),
|
||||
err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
|
||||
p2i(start_address), p2i(last_address)));
|
||||
assert(start_address > prev_last_addr,
|
||||
err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
|
||||
p2i(start_address), p2i(prev_last_addr)));
|
||||
|
||||
HeapRegion* start_region = _hrm.addr_to_region(start_address);
|
||||
HeapRegion* last_region = _hrm.addr_to_region(last_address);
|
||||
HeapWord* bottom_address = start_region->bottom();
|
||||
|
||||
// Check for a range beginning in the same region in which the
|
||||
// previous one ended.
|
||||
if (start_region == prev_last_region) {
|
||||
bottom_address = prev_last_addr + 1;
|
||||
}
|
||||
|
||||
// Verify that the regions were all marked as archive regions by
|
||||
// alloc_archive_regions.
|
||||
HeapRegion* curr_region = start_region;
|
||||
while (curr_region != NULL) {
|
||||
guarantee(curr_region->is_archive(),
|
||||
err_msg("Expected archive region at index %u", curr_region->hrm_index()));
|
||||
if (curr_region != last_region) {
|
||||
curr_region = _hrm.next_region_in_heap(curr_region);
|
||||
} else {
|
||||
curr_region = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
prev_last_addr = last_address;
|
||||
prev_last_region = last_region;
|
||||
|
||||
// Fill the memory below the allocated range with dummy object(s),
|
||||
// if the region bottom does not match the range start, or if the previous
|
||||
// range ended within the same G1 region, and there is a gap.
|
||||
if (start_address != bottom_address) {
|
||||
size_t fill_size = pointer_delta(start_address, bottom_address);
|
||||
G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
|
||||
_allocator->increase_used(fill_size * HeapWordSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||
uint* gc_count_before_ret,
|
||||
uint* gclocker_retry_count_ret) {
|
||||
@ -1132,6 +1333,8 @@ public:
|
||||
}
|
||||
} else if (hr->is_continues_humongous()) {
|
||||
_hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
|
||||
} else if (hr->is_archive()) {
|
||||
_hr_printer->post_compaction(hr, G1HRPrinter::Archive);
|
||||
} else if (hr->is_old()) {
|
||||
_hr_printer->post_compaction(hr, G1HRPrinter::Old);
|
||||
} else {
|
||||
@ -1723,6 +1926,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
|
||||
_humongous_reclaim_candidates(),
|
||||
_has_humongous_reclaim_candidates(false),
|
||||
_archive_allocator(NULL),
|
||||
_free_regions_coming(false),
|
||||
_young_list(new YoungList(this)),
|
||||
_gc_time_stamp(0),
|
||||
@ -1748,7 +1952,11 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_workers->initialize_workers();
|
||||
|
||||
_allocator = G1Allocator::create_allocator(this);
|
||||
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
|
||||
_humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
|
||||
|
||||
// Override the default _filler_array_max_size so that no humongous filler
|
||||
// objects are created.
|
||||
_filler_array_max_size = _humongous_object_threshold_in_words;
|
||||
|
||||
uint n_queues = ParallelGCThreads;
|
||||
_task_queues = new RefToScanQueueSet(n_queues);
|
||||
@ -2163,7 +2371,11 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
|
||||
|
||||
// Computes the sum of the storage used by the various regions.
|
||||
size_t G1CollectedHeap::used() const {
|
||||
return _allocator->used();
|
||||
size_t result = _allocator->used();
|
||||
if (_archive_allocator != NULL) {
|
||||
result += _archive_allocator->used();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::used_unlocked() const {
|
||||
@ -2576,7 +2788,7 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
|
||||
|
||||
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
|
||||
HeapRegion* result = _hrm.next_region_in_heap(from);
|
||||
while (result != NULL && result->is_humongous()) {
|
||||
while (result != NULL && result->is_pinned()) {
|
||||
result = _hrm.next_region_in_heap(result);
|
||||
}
|
||||
return result;
|
||||
@ -2884,6 +3096,31 @@ public:
|
||||
size_t live_bytes() { return _live_bytes; }
|
||||
};
|
||||
|
||||
class VerifyArchiveOopClosure: public OopClosure {
|
||||
public:
|
||||
VerifyArchiveOopClosure(HeapRegion *hr) { }
|
||||
void do_oop(narrowOop *p) { do_oop_work(p); }
|
||||
void do_oop( oop *p) { do_oop_work(p); }
|
||||
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj),
|
||||
err_msg("Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
|
||||
p2i(p), p2i(obj)));
|
||||
}
|
||||
};
|
||||
|
||||
class VerifyArchiveRegionClosure: public ObjectClosure {
|
||||
public:
|
||||
VerifyArchiveRegionClosure(HeapRegion *hr) { }
|
||||
// Verify that all object pointers are to archive regions.
|
||||
void do_object(oop o) {
|
||||
VerifyArchiveOopClosure checkOop(NULL);
|
||||
assert(o != NULL, "Should not be here for NULL oops");
|
||||
o->oop_iterate_no_header(&checkOop);
|
||||
}
|
||||
};
|
||||
|
||||
class VerifyRegionClosure: public HeapRegionClosure {
|
||||
private:
|
||||
bool _par;
|
||||
@ -2903,6 +3140,13 @@ public:
|
||||
}
|
||||
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
// For archive regions, verify there are no heap pointers to
|
||||
// non-pinned regions. For all others, verify liveness info.
|
||||
if (r->is_archive()) {
|
||||
VerifyArchiveRegionClosure verify_oop_pointers(r);
|
||||
r->object_iterate(&verify_oop_pointers);
|
||||
return true;
|
||||
}
|
||||
if (!r->is_continues_humongous()) {
|
||||
bool failures = false;
|
||||
r->verify(_vo, &failures);
|
||||
@ -3087,7 +3331,7 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
|
||||
switch (vo) {
|
||||
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
|
||||
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
|
||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
|
||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked() && !hr->is_archive();
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false; // keep some compilers happy
|
||||
@ -3098,7 +3342,10 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
|
||||
switch (vo) {
|
||||
case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
|
||||
case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
|
||||
case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
|
||||
case VerifyOption_G1UseMarkWord: {
|
||||
HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj);
|
||||
return !obj->is_gc_marked() && !hr->is_archive();
|
||||
}
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false; // keep some compilers happy
|
||||
@ -3131,7 +3378,7 @@ void G1CollectedHeap::print_extended_on(outputStream* st) const {
|
||||
st->cr();
|
||||
st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
|
||||
"HS=humongous(starts), HC=humongous(continues), "
|
||||
"CS=collection set, F=free, TS=gc time stamp, "
|
||||
"CS=collection set, F=free, A=archive, TS=gc time stamp, "
|
||||
"PTAMS=previous top-at-mark-start, "
|
||||
"NTAMS=next top-at-mark-start)");
|
||||
PrintRegionClosure blk(st);
|
||||
@ -3233,6 +3480,28 @@ void G1CollectedHeap::print_all_rsets() {
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
|
||||
YoungList* young_list = heap()->young_list();
|
||||
|
||||
size_t eden_used_bytes = young_list->eden_used_bytes();
|
||||
size_t survivor_used_bytes = young_list->survivor_used_bytes();
|
||||
|
||||
size_t eden_capacity_bytes =
|
||||
(g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
|
||||
|
||||
VirtualSpaceSummary heap_summary = create_heap_space_summary();
|
||||
return G1HeapSummary(heap_summary, used(), eden_used_bytes, eden_capacity_bytes, survivor_used_bytes);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
|
||||
const G1HeapSummary& heap_summary = create_g1_heap_summary();
|
||||
gc_tracer->report_gc_heap_summary(when, heap_summary);
|
||||
|
||||
const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
|
||||
gc_tracer->report_metaspace_summary(when, metaspace_summary);
|
||||
}
|
||||
|
||||
|
||||
G1CollectedHeap* G1CollectedHeap::heap() {
|
||||
CollectedHeap* heap = Universe::heap();
|
||||
assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
|
||||
@ -3830,6 +4099,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
|
||||
if (evacuation_failed()) {
|
||||
_allocator->set_used(recalculate_used());
|
||||
if (_archive_allocator != NULL) {
|
||||
_archive_allocator->clear_used();
|
||||
}
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
if (_evacuation_failed_info_array[i].has_failed()) {
|
||||
_gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
|
||||
@ -6151,13 +6423,18 @@ public:
|
||||
assert(!r->is_young(), "we should not come across young regions");
|
||||
|
||||
if (r->is_humongous()) {
|
||||
// We ignore humongous regions, we left the humongous set unchanged
|
||||
// We ignore humongous regions. We left the humongous set unchanged.
|
||||
} else {
|
||||
// Objects that were compacted would have ended up on regions
|
||||
// that were previously old or free.
|
||||
// that were previously old or free. Archive regions (which are
|
||||
// old) will not have been touched.
|
||||
assert(r->is_free() || r->is_old(), "invariant");
|
||||
// We now consider them old, so register as such.
|
||||
r->set_old();
|
||||
// We now consider them old, so register as such. Leave
|
||||
// archive regions set that way, however, while still adding
|
||||
// them to the old set.
|
||||
if (!r->is_archive()) {
|
||||
r->set_old();
|
||||
}
|
||||
_old_set->add(r);
|
||||
}
|
||||
_total_used += r->used();
|
||||
@ -6183,6 +6460,9 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
|
||||
|
||||
if (!free_list_only) {
|
||||
_allocator->set_used(cl.total_used());
|
||||
if (_archive_allocator != NULL) {
|
||||
_archive_allocator->clear_used();
|
||||
}
|
||||
}
|
||||
assert(_allocator->used_unlocked() == recalculate_used(),
|
||||
err_msg("inconsistent _allocator->used_unlocked(), "
|
||||
@ -6283,6 +6563,25 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
_hr_printer.retire(alloc_region);
|
||||
}
|
||||
|
||||
HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
|
||||
bool expanded = false;
|
||||
uint index = _hrm.find_highest_free(&expanded);
|
||||
|
||||
if (index != G1_NO_HRM_INDEX) {
|
||||
if (expanded) {
|
||||
ergo_verbose1(ErgoHeapSizing,
|
||||
"attempt heap expansion",
|
||||
ergo_format_reason("requested address range outside heap bounds")
|
||||
ergo_format_byte("region size"),
|
||||
HeapRegion::GrainWords * HeapWordSize);
|
||||
}
|
||||
_hrm.allocate_free_regions_starting_at(index, 1);
|
||||
return region_at(index);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
// Heap region set verification
|
||||
|
||||
class VerifyRegionListsClosure : public HeapRegionClosure {
|
||||
@ -6319,6 +6618,9 @@ public:
|
||||
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
|
||||
_old_count.increment(1u, hr->capacity());
|
||||
} else {
|
||||
// There are no other valid region types. Check for one invalid
|
||||
// one we can identify: pinned without old or humongous set.
|
||||
assert(!hr->is_pinned(), err_msg("Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index()));
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
return false;
|
||||
|
@ -188,6 +188,7 @@ class G1CollectedHeap : public CollectedHeap {
|
||||
friend class SurvivorGCAllocRegion;
|
||||
friend class OldGCAllocRegion;
|
||||
friend class G1Allocator;
|
||||
friend class G1ArchiveAllocator;
|
||||
|
||||
// Closures used in implementation.
|
||||
friend class G1ParScanThreadState;
|
||||
@ -250,6 +251,9 @@ private:
|
||||
// Class that handles the different kinds of allocations.
|
||||
G1Allocator* _allocator;
|
||||
|
||||
// Class that handles archive allocation ranges.
|
||||
G1ArchiveAllocator* _archive_allocator;
|
||||
|
||||
// Statistics for each allocation context
|
||||
AllocationContextStats _allocation_context_stats;
|
||||
|
||||
@ -370,6 +374,8 @@ private:
|
||||
void log_gc_header();
|
||||
void log_gc_footer(double pause_time_sec);
|
||||
|
||||
void trace_heap(GCWhen::Type when, const GCTracer* tracer);
|
||||
|
||||
// These are macros so that, if the assert fires, we get the correct
|
||||
// line number, file, etc.
|
||||
|
||||
@ -574,6 +580,10 @@ protected:
|
||||
void retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes, InCSetState dest);
|
||||
|
||||
// Allocate the highest free region in the reserved heap. This will commit
|
||||
// regions as necessary.
|
||||
HeapRegion* alloc_highest_free_region();
|
||||
|
||||
// - if explicit_gc is true, the GC is for a System.gc() or a heap
|
||||
// inspection request and should collect the entire heap
|
||||
// - if clear_all_soft_refs is true, all soft references should be
|
||||
@ -729,6 +739,44 @@ public:
|
||||
void free_humongous_region(HeapRegion* hr,
|
||||
FreeRegionList* free_list,
|
||||
bool par);
|
||||
|
||||
// Facility for allocating in 'archive' regions in high heap memory and
|
||||
// recording the allocated ranges. These should all be called from the
|
||||
// VM thread at safepoints, without the heap lock held. They can be used
|
||||
// to create and archive a set of heap regions which can be mapped at the
|
||||
// same fixed addresses in a subsequent JVM invocation.
|
||||
void begin_archive_alloc_range();
|
||||
|
||||
// Check if the requested size would be too large for an archive allocation.
|
||||
bool is_archive_alloc_too_large(size_t word_size);
|
||||
|
||||
// Allocate memory of the requested size from the archive region. This will
|
||||
// return NULL if the size is too large or if no memory is available. It
|
||||
// does not trigger a garbage collection.
|
||||
HeapWord* archive_mem_allocate(size_t word_size);
|
||||
|
||||
// Optionally aligns the end address and returns the allocated ranges in
|
||||
// an array of MemRegions in order of ascending addresses.
|
||||
void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes = 0);
|
||||
|
||||
// Facility for allocating a fixed range within the heap and marking
|
||||
// the containing regions as 'archive'. For use at JVM init time, when the
|
||||
// caller may mmap archived heap data at the specified range(s).
|
||||
// Verify that the MemRegions specified in the argument array are within the
|
||||
// reserved heap.
|
||||
bool check_archive_addresses(MemRegion* range, size_t count);
|
||||
|
||||
// Commit the appropriate G1 regions containing the specified MemRegions
|
||||
// and mark them as 'archive' regions. The regions in the array must be
|
||||
// non-overlapping and in order of ascending address.
|
||||
bool alloc_archive_regions(MemRegion* range, size_t count);
|
||||
|
||||
// Insert any required filler objects in the G1 regions around the specified
|
||||
// ranges to make the regions parseable. This must be called after
|
||||
// alloc_archive_regions, and after class loading has occurred.
|
||||
void fill_archive_regions(MemRegion* range, size_t count);
|
||||
|
||||
protected:
|
||||
|
||||
// Shrink the garbage-first heap by at most the given size (in bytes!).
|
||||
@ -1393,6 +1441,11 @@ public:
|
||||
return word_size > _humongous_object_threshold_in_words;
|
||||
}
|
||||
|
||||
// Returns the humongous threshold for a specific region size
|
||||
static size_t humongous_threshold_for(size_t region_size) {
|
||||
return (region_size / 2);
|
||||
}
|
||||
|
||||
// Update mod union table with the set of dirty cards.
|
||||
void updateModUnion();
|
||||
|
||||
@ -1439,21 +1492,23 @@ public:
|
||||
|
||||
// Determine if an object is dead, given the object and also
|
||||
// the region to which the object belongs. An object is dead
|
||||
// iff a) it was not allocated since the last mark and b) it
|
||||
// is not marked.
|
||||
// iff a) it was not allocated since the last mark, b) it
|
||||
// is not marked, and c) it is not in an archive region.
|
||||
bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
|
||||
return
|
||||
!hr->obj_allocated_since_prev_marking(obj) &&
|
||||
!isMarkedPrev(obj);
|
||||
!isMarkedPrev(obj) &&
|
||||
!hr->is_archive();
|
||||
}
|
||||
|
||||
// This function returns true when an object has been
|
||||
// around since the previous marking and hasn't yet
|
||||
// been marked during this marking.
|
||||
// been marked during this marking, and is not in an archive region.
|
||||
bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
|
||||
return
|
||||
!hr->obj_allocated_since_next_marking(obj) &&
|
||||
!isMarkedNext(obj);
|
||||
!isMarkedNext(obj) &&
|
||||
!hr->is_archive();
|
||||
}
|
||||
|
||||
// Determine if an object is dead, given only the object itself.
|
||||
@ -1548,6 +1603,8 @@ public:
|
||||
bool is_obj_dead_cond(const oop obj,
|
||||
const VerifyOption vo) const;
|
||||
|
||||
G1HeapSummary create_g1_heap_summary();
|
||||
|
||||
// Printing
|
||||
|
||||
virtual void print_on(outputStream* st) const;
|
||||
|
@ -190,7 +190,7 @@ public:
|
||||
bool during_initial_mark = _g1h->collector_state()->during_initial_mark_pause();
|
||||
bool during_conc_mark = _g1h->collector_state()->mark_in_progress();
|
||||
|
||||
assert(!hr->is_humongous(), "sanity");
|
||||
assert(!hr->is_pinned(), err_msg("Unexpected pinned region at index %u", hr->hrm_index()));
|
||||
assert(hr->in_collection_set(), "bad CS");
|
||||
|
||||
if (_hrclaimer->claim_region(hr->hrm_index())) {
|
||||
|
@ -54,6 +54,7 @@ const char* G1HRPrinter::region_type_name(RegionType type) {
|
||||
case SingleHumongous: return "SingleH";
|
||||
case StartsHumongous: return "StartsH";
|
||||
case ContinuesHumongous: return "ContinuesH";
|
||||
case Archive: return "Archive";
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
// trying to keep the Windows compiler happy
|
||||
|
@ -52,7 +52,8 @@ public:
|
||||
Old,
|
||||
SingleHumongous,
|
||||
StartsHumongous,
|
||||
ContinuesHumongous
|
||||
ContinuesHumongous,
|
||||
Archive
|
||||
} RegionType;
|
||||
|
||||
typedef enum {
|
||||
|
@ -57,6 +57,9 @@
|
||||
|
||||
class HeapRegion;
|
||||
|
||||
bool G1MarkSweep::_archive_check_enabled = false;
|
||||
G1ArchiveRegionMap G1MarkSweep::_archive_region_map;
|
||||
|
||||
void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
|
||||
bool clear_all_softrefs) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
|
||||
@ -212,7 +215,7 @@ class G1AdjustPointersClosure: public HeapRegionClosure {
|
||||
// point all the oops to the new location
|
||||
MarkSweep::adjust_pointers(obj);
|
||||
}
|
||||
} else {
|
||||
} else if (!r->is_pinned()) {
|
||||
// This really ought to be "as_CompactibleSpace"...
|
||||
r->adjust_pointers();
|
||||
}
|
||||
@ -275,7 +278,7 @@ public:
|
||||
}
|
||||
hr->reset_during_compaction();
|
||||
}
|
||||
} else {
|
||||
} else if (!hr->is_pinned()) {
|
||||
hr->compact();
|
||||
}
|
||||
return false;
|
||||
@ -298,6 +301,26 @@ void G1MarkSweep::mark_sweep_phase4() {
|
||||
|
||||
}
|
||||
|
||||
void G1MarkSweep::enable_archive_object_check() {
|
||||
assert(!_archive_check_enabled, "archive range check already enabled");
|
||||
_archive_check_enabled = true;
|
||||
size_t length = Universe::heap()->max_capacity();
|
||||
_archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
|
||||
(HeapWord*)Universe::heap()->base() + length,
|
||||
HeapRegion::GrainBytes);
|
||||
}
|
||||
|
||||
void G1MarkSweep::mark_range_archive(MemRegion range) {
|
||||
assert(_archive_check_enabled, "archive range check not enabled");
|
||||
_archive_region_map.set_by_address(range, true);
|
||||
}
|
||||
|
||||
bool G1MarkSweep::in_archive_range(oop object) {
|
||||
// This is the out-of-line part of is_archive_object test, done separately
|
||||
// to avoid additional performance impact when the check is not enabled.
|
||||
return _archive_region_map.get_by_address((HeapWord*)object);
|
||||
}
|
||||
|
||||
void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
g1h->heap_region_iterate(blk);
|
||||
@ -357,7 +380,7 @@ bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
|
||||
} else {
|
||||
assert(hr->is_continues_humongous(), "Invalid humongous.");
|
||||
}
|
||||
} else {
|
||||
} else if (!hr->is_pinned()) {
|
||||
prepare_for_compaction(hr, hr->end());
|
||||
}
|
||||
return false;
|
||||
|
@ -44,6 +44,7 @@ class ReferenceProcessor;
|
||||
//
|
||||
// Class unloading will only occur when a full gc is invoked.
|
||||
class G1PrepareCompactClosure;
|
||||
class G1ArchiveRegionMap;
|
||||
|
||||
class G1MarkSweep : AllStatic {
|
||||
public:
|
||||
@ -54,7 +55,22 @@ class G1MarkSweep : AllStatic {
|
||||
static STWGCTimer* gc_timer() { return GenMarkSweep::_gc_timer; }
|
||||
static SerialOldTracer* gc_tracer() { return GenMarkSweep::_gc_tracer; }
|
||||
|
||||
// Create the _archive_region_map which is used to identify archive objects.
|
||||
static void enable_archive_object_check();
|
||||
|
||||
// Mark the regions containing the specified address range as archive regions.
|
||||
static void mark_range_archive(MemRegion range);
|
||||
|
||||
// Check if an object is in an archive region using the _archive_region_map.
|
||||
static bool in_archive_range(oop object);
|
||||
|
||||
// Check if archive object checking is enabled, to avoid calling in_archive_range
|
||||
// unnecessarily.
|
||||
static bool archive_check_enabled() { return G1MarkSweep::_archive_check_enabled; }
|
||||
|
||||
private:
|
||||
static bool _archive_check_enabled;
|
||||
static G1ArchiveRegionMap _archive_region_map;
|
||||
|
||||
// Mark live objects
|
||||
static void mark_sweep_phase1(bool& marked_for_deopt,
|
||||
@ -93,4 +109,12 @@ class G1PrepareCompactClosure : public HeapRegionClosure {
|
||||
bool doHeapRegion(HeapRegion* hr);
|
||||
};
|
||||
|
||||
// G1ArchiveRegionMap is a boolean array used to mark G1 regions as
|
||||
// archive regions. This allows a quick check for whether an object
|
||||
// should not be marked because it is in an archive region.
|
||||
class G1ArchiveRegionMap : public G1BiasedMappedArray<bool> {
|
||||
protected:
|
||||
bool default_value() const { return false; }
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1MARKSWEEP_HPP
|
||||
|
@ -23,12 +23,14 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "gc/g1/g1Log.hpp"
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#include "gc/g1/g1StringDedupQueue.hpp"
|
||||
#include "gc/g1/g1StringDedupTable.hpp"
|
||||
#include "gc/g1/g1StringDedupThread.hpp"
|
||||
#include "gc/g1/suspendibleThreadSet.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
|
||||
G1StringDedupThread* G1StringDedupThread::_thread = NULL;
|
||||
@ -55,11 +57,36 @@ G1StringDedupThread* G1StringDedupThread::thread() {
|
||||
return _thread;
|
||||
}
|
||||
|
||||
class G1StringDedupSharedClosure: public OopClosure {
|
||||
private:
|
||||
G1StringDedupStat& _stat;
|
||||
|
||||
public:
|
||||
G1StringDedupSharedClosure(G1StringDedupStat& stat) : _stat(stat) {}
|
||||
|
||||
virtual void do_oop(oop* p) { ShouldNotReachHere(); }
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
oop java_string = oopDesc::load_decode_heap_oop(p);
|
||||
G1StringDedupTable::deduplicate(java_string, _stat);
|
||||
}
|
||||
};
|
||||
|
||||
// The CDS archive does not include the string dedupication table. Only the string
|
||||
// table is saved in the archive. The shared strings from CDS archive need to be
|
||||
// added to the string dedupication table before deduplication occurs. That is
|
||||
// done in the begining of the G1StringDedupThread (see G1StringDedupThread::run()
|
||||
// below).
|
||||
void G1StringDedupThread::deduplicate_shared_strings(G1StringDedupStat& stat) {
|
||||
G1StringDedupSharedClosure sharedStringDedup(stat);
|
||||
StringTable::shared_oops_do(&sharedStringDedup);
|
||||
}
|
||||
|
||||
void G1StringDedupThread::run() {
|
||||
G1StringDedupStat total_stat;
|
||||
|
||||
initialize_in_thread();
|
||||
wait_for_universe_init();
|
||||
deduplicate_shared_strings(total_stat);
|
||||
|
||||
// Main loop
|
||||
for (;;) {
|
||||
|
@ -52,6 +52,8 @@ public:
|
||||
static G1StringDedupThread* thread();
|
||||
|
||||
virtual void run();
|
||||
|
||||
void deduplicate_shared_strings(G1StringDedupStat& stat);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1STRINGDEDUPTHREAD_HPP
|
||||
|
@ -103,6 +103,10 @@ size_t HeapRegion::max_region_size() {
|
||||
return HeapRegionBounds::max_size();
|
||||
}
|
||||
|
||||
size_t HeapRegion::min_region_size_in_words() {
|
||||
return HeapRegionBounds::min_size() >> LogHeapWordSize;
|
||||
}
|
||||
|
||||
void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
|
||||
size_t region_size = G1HeapRegionSize;
|
||||
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
|
||||
@ -716,7 +720,7 @@ public:
|
||||
HeapRegion* to = _g1h->heap_region_containing(obj);
|
||||
if (from != NULL && to != NULL &&
|
||||
from != to &&
|
||||
!to->is_humongous()) {
|
||||
!to->is_pinned()) {
|
||||
jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
|
||||
jbyte cv_field = *_bs->byte_for_const(p);
|
||||
const jbyte dirty = CardTableModRefBS::dirty_card_val();
|
||||
|
@ -331,6 +331,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
}
|
||||
|
||||
static size_t max_region_size();
|
||||
static size_t min_region_size_in_words();
|
||||
|
||||
// It sets up the heap region size (GrainBytes / GrainWords), as
|
||||
// well as other related fields that are based on the heap region
|
||||
@ -417,6 +418,15 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
|
||||
bool is_old() const { return _type.is_old(); }
|
||||
|
||||
// A pinned region contains objects which are not moved by garbage collections.
|
||||
// Humongous regions and archive regions are pinned.
|
||||
bool is_pinned() const { return _type.is_pinned(); }
|
||||
|
||||
// An archive region is a pinned region, also tagged as old, which
|
||||
// should not be marked during mark/sweep. This allows the address
|
||||
// space to be shared by JVM instances.
|
||||
bool is_archive() const { return _type.is_archive(); }
|
||||
|
||||
// For a humongous region, region in which it starts.
|
||||
HeapRegion* humongous_start_region() const {
|
||||
return _humongous_start_region;
|
||||
@ -670,6 +680,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
|
||||
void set_old() { _type.set_old(); }
|
||||
|
||||
void set_archive() { _type.set_archive(); }
|
||||
|
||||
// Determine if an object has been allocated since the last
|
||||
// mark performed by the collector. This returns true iff the object
|
||||
// is within the unmarked area of the region.
|
||||
|
@ -278,6 +278,55 @@ uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx)
|
||||
return num_regions;
|
||||
}
|
||||
|
||||
uint HeapRegionManager::find_highest_free(bool* expanded) {
|
||||
// Loop downwards from the highest region index, looking for an
|
||||
// entry which is either free or not yet committed. If not yet
|
||||
// committed, expand_at that index.
|
||||
uint curr = max_length() - 1;
|
||||
while (true) {
|
||||
HeapRegion *hr = _regions.get_by_index(curr);
|
||||
if (hr == NULL) {
|
||||
uint res = expand_at(curr, 1);
|
||||
if (res == 1) {
|
||||
*expanded = true;
|
||||
return curr;
|
||||
}
|
||||
} else {
|
||||
if (hr->is_free()) {
|
||||
*expanded = false;
|
||||
return curr;
|
||||
}
|
||||
}
|
||||
if (curr == 0) {
|
||||
return G1_NO_HRM_INDEX;
|
||||
}
|
||||
curr--;
|
||||
}
|
||||
}
|
||||
|
||||
bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count) {
|
||||
size_t commits = 0;
|
||||
uint start_index = (uint)_regions.get_index_by_address(range.start());
|
||||
uint last_index = (uint)_regions.get_index_by_address(range.last());
|
||||
|
||||
// Ensure that each G1 region in the range is free, returning false if not.
|
||||
// Commit those that are not yet available, and keep count.
|
||||
for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
|
||||
if (!is_available(curr_index)) {
|
||||
commits++;
|
||||
expand_at(curr_index, 1);
|
||||
}
|
||||
HeapRegion* curr_region = _regions.get_by_index(curr_index);
|
||||
if (!curr_region->is_free()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1);
|
||||
*commit_count = commits;
|
||||
return true;
|
||||
}
|
||||
|
||||
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
|
||||
const uint start_index = hrclaimer->start_region_for_worker(worker_id);
|
||||
|
||||
|
@ -221,6 +221,16 @@ public:
|
||||
|
||||
HeapRegion* next_region_in_heap(const HeapRegion* r) const;
|
||||
|
||||
// Find the highest free or uncommitted region in the reserved heap,
|
||||
// and if uncommitted, commit it. If none are available, return G1_NO_HRM_INDEX.
|
||||
// Set the 'expanded' boolean true if a new region was committed.
|
||||
uint find_highest_free(bool* expanded);
|
||||
|
||||
// Allocate the regions that contain the address range specified, committing the
|
||||
// regions if necessary. Return false if any of the regions is already committed
|
||||
// and not free, and return the number of regions newly committed in commit_count.
|
||||
bool allocate_containing_regions(MemRegion range, size_t* commit_count);
|
||||
|
||||
// Apply blk->doHeapRegion() on all committed regions in address order,
|
||||
// terminating the iteration early if doHeapRegion() returns true.
|
||||
void iterate(HeapRegionClosure* blk) const;
|
||||
|
@ -42,7 +42,8 @@ void HeapRegionSetBase::verify_region(HeapRegion* hr) {
|
||||
assert(hr->is_humongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
|
||||
assert(hr->is_free() == regions_free(), err_msg("Wrong free state for region %u and set %s", hr->hrm_index(), name()));
|
||||
assert(!hr->is_free() || hr->is_empty(), err_msg("Free region %u is not empty for set %s", hr->hrm_index(), name()));
|
||||
assert(!hr->is_empty() || hr->is_free(), err_msg("Empty region %u is not free for set %s", hr->hrm_index(), name()));
|
||||
assert(!hr->is_empty() || hr->is_free() || hr->is_archive(),
|
||||
err_msg("Empty region %u is not free or archive for set %s", hr->hrm_index(), name()));
|
||||
assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrm_index()));
|
||||
}
|
||||
#endif
|
||||
|
@ -33,6 +33,7 @@ bool HeapRegionType::is_valid(Tag tag) {
|
||||
case StartsHumongousTag:
|
||||
case ContinuesHumongousTag:
|
||||
case OldTag:
|
||||
case ArchiveTag:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -47,6 +48,7 @@ const char* HeapRegionType::get_str() const {
|
||||
case StartsHumongousTag: return "HUMS";
|
||||
case ContinuesHumongousTag: return "HUMC";
|
||||
case OldTag: return "OLD";
|
||||
case ArchiveTag: return "ARC";
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
// keep some compilers happy
|
||||
@ -62,6 +64,7 @@ const char* HeapRegionType::get_short_str() const {
|
||||
case StartsHumongousTag: return "HS";
|
||||
case ContinuesHumongousTag: return "HC";
|
||||
case OldTag: return "O";
|
||||
case ArchiveTag: return "A";
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
// keep some compilers happy
|
||||
|
@ -44,15 +44,18 @@ private:
|
||||
//
|
||||
// 0000 0 [ 0] Free
|
||||
//
|
||||
// 0001 0 Young Mask
|
||||
// 0001 0 [ 2] Young Mask
|
||||
// 0001 0 [ 2] Eden
|
||||
// 0001 1 [ 3] Survivor
|
||||
//
|
||||
// 0010 0 Humongous Mask
|
||||
// 0010 0 [ 4] Starts Humongous
|
||||
// 0010 1 [ 5] Continues Humongous
|
||||
// 0010 0 [ 4] Humongous Mask
|
||||
// 0100 0 [ 8] Pinned Mask
|
||||
// 0110 0 [12] Starts Humongous
|
||||
// 0110 1 [13] Continues Humongous
|
||||
//
|
||||
// 01000 [ 8] Old
|
||||
// 1000 0 [16] Old Mask
|
||||
//
|
||||
// 1100 0 [24] Archive
|
||||
typedef enum {
|
||||
FreeTag = 0,
|
||||
|
||||
@ -61,10 +64,14 @@ private:
|
||||
SurvTag = YoungMask + 1,
|
||||
|
||||
HumongousMask = 4,
|
||||
StartsHumongousTag = HumongousMask,
|
||||
ContinuesHumongousTag = HumongousMask + 1,
|
||||
PinnedMask = 8,
|
||||
StartsHumongousTag = HumongousMask | PinnedMask,
|
||||
ContinuesHumongousTag = HumongousMask | PinnedMask + 1,
|
||||
|
||||
OldTag = 8
|
||||
OldMask = 16,
|
||||
OldTag = OldMask,
|
||||
|
||||
ArchiveTag = PinnedMask | OldMask
|
||||
} Tag;
|
||||
|
||||
volatile Tag _tag;
|
||||
@ -108,7 +115,13 @@ public:
|
||||
bool is_starts_humongous() const { return get() == StartsHumongousTag; }
|
||||
bool is_continues_humongous() const { return get() == ContinuesHumongousTag; }
|
||||
|
||||
bool is_old() const { return get() == OldTag; }
|
||||
bool is_archive() const { return get() == ArchiveTag; }
|
||||
|
||||
// is_old regions may or may not also be pinned
|
||||
bool is_old() const { return (get() & OldMask) != 0; }
|
||||
|
||||
// is_pinned regions may be archive or humongous
|
||||
bool is_pinned() const { return (get() & PinnedMask) != 0; }
|
||||
|
||||
// Setters
|
||||
|
||||
@ -123,6 +136,8 @@ public:
|
||||
|
||||
void set_old() { set(OldTag); }
|
||||
|
||||
void set_archive() { set_from(ArchiveTag, FreeTag); }
|
||||
|
||||
// Misc
|
||||
|
||||
const char* get_str() const;
|
||||
|
@ -313,7 +313,7 @@ void MarkSweep::restore_marks() {
|
||||
|
||||
MarkSweep::IsAliveClosure MarkSweep::is_alive;
|
||||
|
||||
bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
|
||||
bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked() || is_archive_object(p); }
|
||||
|
||||
MarkSweep::KeepAliveClosure MarkSweep::keep_alive;
|
||||
|
||||
|
@ -147,6 +147,9 @@ class MarkSweep : AllStatic {
|
||||
// Reference Processing
|
||||
static ReferenceProcessor* const ref_processor() { return _ref_processor; }
|
||||
|
||||
// Archive Object handling
|
||||
static inline bool is_archive_object(oop object);
|
||||
|
||||
static STWGCTimer* gc_timer() { return _gc_timer; }
|
||||
static SerialOldTracer* gc_tracer() { return _gc_tracer; }
|
||||
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include "utilities/stack.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#include "gc/g1/g1MarkSweep.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
inline void MarkSweep::mark_object(oop obj) {
|
||||
@ -57,6 +58,15 @@ inline void MarkSweep::mark_object(oop obj) {
|
||||
}
|
||||
}
|
||||
|
||||
inline bool MarkSweep::is_archive_object(oop object) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
return (G1MarkSweep::archive_check_enabled() &&
|
||||
G1MarkSweep::in_archive_range(object));
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void MarkSweep::follow_klass(Klass* klass) {
|
||||
oop op = klass->klass_holder();
|
||||
MarkSweep::mark_and_push(&op);
|
||||
@ -74,7 +84,8 @@ template <class T> inline void MarkSweep::follow_root(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked()) {
|
||||
if (!obj->mark()->is_marked() &&
|
||||
!is_archive_object(obj)) {
|
||||
mark_object(obj);
|
||||
follow_object(obj);
|
||||
}
|
||||
@ -87,7 +98,8 @@ template <class T> inline void MarkSweep::mark_and_push(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked()) {
|
||||
if (!obj->mark()->is_marked() &&
|
||||
!is_archive_object(obj)) {
|
||||
mark_object(obj);
|
||||
_marking_stack.push(obj);
|
||||
}
|
||||
@ -111,15 +123,18 @@ template <class T> inline void MarkSweep::adjust_pointer(T* p) {
|
||||
assert(Universe::heap()->is_in(obj), "should be in heap");
|
||||
|
||||
oop new_obj = oop(obj->mark()->decode_pointer());
|
||||
assert(new_obj != NULL || // is forwarding ptr?
|
||||
assert(is_archive_object(obj) || // no forwarding of archive objects
|
||||
new_obj != NULL || // is forwarding ptr?
|
||||
obj->mark() == markOopDesc::prototype() || // not gc marked?
|
||||
(UseBiasedLocking && obj->mark()->has_bias_pattern()),
|
||||
// not gc marked?
|
||||
// not gc marked?
|
||||
"should be forwarded");
|
||||
if (new_obj != NULL) {
|
||||
assert(Universe::heap()->is_in_reserved(new_obj),
|
||||
"should be in object space");
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
if (!is_archive_object(obj)) {
|
||||
assert(Universe::heap()->is_in_reserved(new_obj),
|
||||
"should be in object space");
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -88,9 +88,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
static int _fire_out_of_memory_count;
|
||||
#endif
|
||||
|
||||
// Used for filler objects (static, but initialized in ctor).
|
||||
static size_t _filler_array_max_size;
|
||||
|
||||
GCHeapLog* _gc_heap_log;
|
||||
|
||||
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
|
||||
@ -102,6 +99,9 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
BarrierSet* _barrier_set;
|
||||
bool _is_gc_active;
|
||||
|
||||
// Used for filler objects (static, but initialized in ctor).
|
||||
static size_t _filler_array_max_size;
|
||||
|
||||
unsigned int _total_collections; // ... started
|
||||
unsigned int _total_full_collections; // ... started
|
||||
NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
|
||||
|
@ -78,11 +78,13 @@ class MetaspaceSizes : public StackObj {
|
||||
|
||||
class GCHeapSummary;
|
||||
class PSHeapSummary;
|
||||
class G1HeapSummary;
|
||||
|
||||
class GCHeapSummaryVisitor {
|
||||
public:
|
||||
virtual void visit(const GCHeapSummary* heap_summary) const = 0;
|
||||
virtual void visit(const PSHeapSummary* heap_summary) const {}
|
||||
virtual void visit(const G1HeapSummary* heap_summary) const {}
|
||||
};
|
||||
|
||||
class GCHeapSummary : public StackObj {
|
||||
@ -125,6 +127,22 @@ class PSHeapSummary : public GCHeapSummary {
|
||||
}
|
||||
};
|
||||
|
||||
class G1HeapSummary : public GCHeapSummary {
|
||||
size_t _edenUsed;
|
||||
size_t _edenCapacity;
|
||||
size_t _survivorUsed;
|
||||
public:
|
||||
G1HeapSummary(VirtualSpaceSummary& heap_space, size_t heap_used, size_t edenUsed, size_t edenCapacity, size_t survivorUsed) :
|
||||
GCHeapSummary(heap_space, heap_used), _edenUsed(edenUsed), _edenCapacity(edenCapacity), _survivorUsed(survivorUsed) { }
|
||||
const size_t edenUsed() const { return _edenUsed; }
|
||||
const size_t edenCapacity() const { return _edenCapacity; }
|
||||
const size_t survivorUsed() const { return _survivorUsed; }
|
||||
|
||||
virtual void accept(GCHeapSummaryVisitor* visitor) const {
|
||||
visitor->visit(this);
|
||||
}
|
||||
};
|
||||
|
||||
class MetaspaceSummary : public StackObj {
|
||||
size_t _capacity_until_GC;
|
||||
MetaspaceSizes _meta_space;
|
||||
|
@ -44,6 +44,7 @@ class GCHeapSummary;
|
||||
class MetaspaceChunkFreeListSummary;
|
||||
class MetaspaceSummary;
|
||||
class PSHeapSummary;
|
||||
class G1HeapSummary;
|
||||
class ReferenceProcessorStats;
|
||||
class TimePartitions;
|
||||
class BoolObjectClosure;
|
||||
|
@ -263,6 +263,20 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
|
||||
}
|
||||
}
|
||||
|
||||
void visit(const G1HeapSummary* g1_heap_summary) const {
|
||||
visit((GCHeapSummary*)g1_heap_summary);
|
||||
|
||||
EventG1HeapSummary e;
|
||||
if (e.should_commit()) {
|
||||
e.set_gcId(_gc_id.id());
|
||||
e.set_when((u1)_when);
|
||||
e.set_edenUsedSize(g1_heap_summary->edenUsed());
|
||||
e.set_edenTotalSize(g1_heap_summary->edenCapacity());
|
||||
e.set_survivorUsedSize(g1_heap_summary->survivorUsed());
|
||||
e.commit();
|
||||
}
|
||||
}
|
||||
|
||||
void visit(const PSHeapSummary* ps_heap_summary) const {
|
||||
visit((GCHeapSummary*)ps_heap_summary);
|
||||
|
||||
|
@ -28,6 +28,9 @@
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "classfile/altHashing.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#endif
|
||||
#include "memory/filemap.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
@ -166,6 +169,9 @@ void FileMapInfo::FileMapHeader::populate(FileMapInfo* mapinfo, size_t alignment
|
||||
_version = _current_version;
|
||||
_alignment = alignment;
|
||||
_obj_alignment = ObjectAlignmentInBytes;
|
||||
_narrow_oop_mode = Universe::narrow_oop_mode();
|
||||
_narrow_oop_shift = Universe::narrow_oop_shift();
|
||||
_max_heap_size = MaxHeapSize;
|
||||
_classpath_entry_table_size = mapinfo->_classpath_entry_table_size;
|
||||
_classpath_entry_table = mapinfo->_classpath_entry_table;
|
||||
_classpath_entry_size = mapinfo->_classpath_entry_size;
|
||||
@ -441,7 +447,16 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
|
||||
} else {
|
||||
si->_file_offset = _file_offset;
|
||||
}
|
||||
si->_base = base;
|
||||
if (MetaspaceShared::is_string_region(region)) {
|
||||
assert((base - (char*)Universe::narrow_oop_base()) % HeapWordSize == 0, "Sanity");
|
||||
if (base != NULL) {
|
||||
si->_addr._offset = (intx)oopDesc::encode_heap_oop_not_null((oop)base);
|
||||
} else {
|
||||
si->_addr._offset = 0;
|
||||
}
|
||||
} else {
|
||||
si->_addr._base = base;
|
||||
}
|
||||
si->_used = size;
|
||||
si->_capacity = capacity;
|
||||
si->_read_only = read_only;
|
||||
@ -450,6 +465,38 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
|
||||
write_bytes_aligned(base, (int)size);
|
||||
}
|
||||
|
||||
// Write the string space. The string space contains one or multiple GC(G1) regions.
|
||||
// When the total string space size is smaller than one GC region of the dump time,
|
||||
// only one string region is used for shared strings.
|
||||
//
|
||||
// If the total string space size is bigger than one GC region, there would be more
|
||||
// than one GC regions allocated for shared strings. The first/bottom GC region might
|
||||
// be a partial GC region with the empty portion at the higher address within that region.
|
||||
// The non-empty portion of the first region is written into the archive as one string
|
||||
// region. The rest are consecutive full GC regions if they exist, which can be written
|
||||
// out in one chunk as another string region.
|
||||
void FileMapInfo::write_string_regions(GrowableArray<MemRegion> *regions) {
|
||||
for (int i = MetaspaceShared::first_string;
|
||||
i < MetaspaceShared::first_string + MetaspaceShared::max_strings; i++) {
|
||||
char* start = NULL;
|
||||
size_t size = 0;
|
||||
if (regions->is_nonempty()) {
|
||||
if (i == MetaspaceShared::first_string) {
|
||||
MemRegion first = regions->first();
|
||||
start = (char*)first.start();
|
||||
size = first.byte_size();
|
||||
} else {
|
||||
int len = regions->length();
|
||||
if (len > 1) {
|
||||
start = (char*)regions->at(1).start();
|
||||
size = (char*)regions->at(len - 1).end() - start;
|
||||
}
|
||||
}
|
||||
}
|
||||
write_region(i, start, size, size, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Dump bytes to file -- at the current file position.
|
||||
|
||||
@ -514,7 +561,8 @@ void FileMapInfo::close() {
|
||||
// JVM/TI RedefineClasses() support:
|
||||
// Remap the shared readonly space to shared readwrite, private.
|
||||
bool FileMapInfo::remap_shared_readonly_as_readwrite() {
|
||||
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
|
||||
int idx = 0;
|
||||
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[idx];
|
||||
if (!si->_read_only) {
|
||||
// the space is already readwrite so we are done
|
||||
return true;
|
||||
@ -524,15 +572,16 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
|
||||
if (!open_for_read()) {
|
||||
return false;
|
||||
}
|
||||
char *addr = _header->region_addr(idx);
|
||||
char *base = os::remap_memory(_fd, _full_path, si->_file_offset,
|
||||
si->_base, size, false /* !read_only */,
|
||||
addr, size, false /* !read_only */,
|
||||
si->_allow_exec);
|
||||
close();
|
||||
if (base == NULL) {
|
||||
fail_continue("Unable to remap shared readonly space (errno=%d).", errno);
|
||||
return false;
|
||||
}
|
||||
if (base != si->_base) {
|
||||
if (base != addr) {
|
||||
fail_continue("Unable to remap shared readonly space at required address.");
|
||||
return false;
|
||||
}
|
||||
@ -543,7 +592,7 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
|
||||
// Map the whole region at once, assumed to be allocated contiguously.
|
||||
ReservedSpace FileMapInfo::reserve_shared_memory() {
|
||||
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
|
||||
char* requested_addr = si->_base;
|
||||
char* requested_addr = _header->region_addr(0);
|
||||
|
||||
size_t size = FileMapInfo::shared_spaces_size();
|
||||
|
||||
@ -561,14 +610,16 @@ ReservedSpace FileMapInfo::reserve_shared_memory() {
|
||||
}
|
||||
|
||||
// Memory map a region in the address space.
|
||||
static const char* shared_region_name[] = { "ReadOnly", "ReadWrite", "MiscData", "MiscCode"};
|
||||
static const char* shared_region_name[] = { "ReadOnly", "ReadWrite", "MiscData", "MiscCode",
|
||||
"String1", "String2" };
|
||||
|
||||
char* FileMapInfo::map_region(int i) {
|
||||
assert(!MetaspaceShared::is_string_region(i), "sanity");
|
||||
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
|
||||
size_t used = si->_used;
|
||||
size_t alignment = os::vm_allocation_granularity();
|
||||
size_t size = align_size_up(used, alignment);
|
||||
char *requested_addr = si->_base;
|
||||
char *requested_addr = _header->region_addr(i);
|
||||
bool read_only;
|
||||
|
||||
// If a tool agent is in use (debugging enabled), we must map the address space RW
|
||||
@ -583,7 +634,7 @@ char* FileMapInfo::map_region(int i) {
|
||||
char *base = os::map_memory(_fd, _full_path, si->_file_offset,
|
||||
requested_addr, size, read_only,
|
||||
si->_allow_exec);
|
||||
if (base == NULL || base != si->_base) {
|
||||
if (base == NULL || base != requested_addr) {
|
||||
fail_continue("Unable to map %s shared space at required address.", shared_region_name[i]);
|
||||
return NULL;
|
||||
}
|
||||
@ -592,15 +643,119 @@ char* FileMapInfo::map_region(int i) {
|
||||
// in method FileMapInfo::reserve_shared_memory(), which is not called on Windows.
|
||||
MemTracker::record_virtual_memory_type((address)base, mtClassShared);
|
||||
#endif
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
MemRegion *string_ranges = NULL;
|
||||
int num_ranges = 0;
|
||||
bool FileMapInfo::map_string_regions() {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseG1GC && UseCompressedOops && UseCompressedClassPointers) {
|
||||
if (narrow_oop_mode() == Universe::narrow_oop_mode() &&
|
||||
narrow_oop_shift() == Universe::narrow_oop_shift()) {
|
||||
string_ranges = new MemRegion[MetaspaceShared::max_strings];
|
||||
struct FileMapInfo::FileMapHeader::space_info* si;
|
||||
|
||||
for (int i = MetaspaceShared::first_string;
|
||||
i < MetaspaceShared::first_string + MetaspaceShared::max_strings; i++) {
|
||||
si = &_header->_space[i];
|
||||
size_t used = si->_used;
|
||||
if (used > 0) {
|
||||
size_t size = used;
|
||||
char* requested_addr = (char*)((void*)oopDesc::decode_heap_oop_not_null(
|
||||
(narrowOop)si->_addr._offset));
|
||||
string_ranges[num_ranges] = MemRegion((HeapWord*)requested_addr, size / HeapWordSize);
|
||||
num_ranges ++;
|
||||
}
|
||||
}
|
||||
|
||||
if (num_ranges == 0) {
|
||||
return true; // no shared string data
|
||||
}
|
||||
|
||||
// Check that ranges are within the java heap
|
||||
if (!G1CollectedHeap::heap()->check_archive_addresses(string_ranges, num_ranges)) {
|
||||
fail_continue("Unable to allocate shared string space: range is not "
|
||||
"within java heap.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// allocate from java heap
|
||||
if (!G1CollectedHeap::heap()->alloc_archive_regions(string_ranges, num_ranges)) {
|
||||
fail_continue("Unable to allocate shared string space: range is "
|
||||
"already in use.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Map the string data. No need to call MemTracker::record_virtual_memory_type()
|
||||
// for mapped string regions as they are part of the reserved java heap, which
|
||||
// is already recorded.
|
||||
for (int i = 0; i < num_ranges; i++) {
|
||||
si = &_header->_space[MetaspaceShared::first_string + i];
|
||||
char* addr = (char*)string_ranges[i].start();
|
||||
char* base = os::map_memory(_fd, _full_path, si->_file_offset,
|
||||
addr, string_ranges[i].byte_size(), si->_read_only,
|
||||
si->_allow_exec);
|
||||
if (base == NULL || base != addr) {
|
||||
fail_continue("Unable to map shared string space at required address.");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true; // the shared string data is mapped successfuly
|
||||
} else {
|
||||
// narrow oop encoding differ, the shared string data are not used
|
||||
if (PrintSharedSpaces && _header->_space[MetaspaceShared::first_string]._used > 0) {
|
||||
tty->print_cr("Shared string data from the CDS archive is being ignored. "
|
||||
"The current CompressedOops encoding differs from that archived "
|
||||
"due to heap size change. The archive was dumped using max heap "
|
||||
"size %dM.", max_heap_size() >> 20);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (PrintSharedSpaces && _header->_space[MetaspaceShared::first_string]._used > 0) {
|
||||
tty->print_cr("Shared string data from the CDS archive is being ignored. UseG1GC, "
|
||||
"UseCompressedOops and UseCompressedClassPointers are required.");
|
||||
}
|
||||
}
|
||||
|
||||
// if we get here, the shared string data is not mapped
|
||||
assert(string_ranges == NULL && num_ranges == 0, "sanity");
|
||||
StringTable::ignore_shared_strings(true);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FileMapInfo::verify_string_regions() {
|
||||
for (int i = MetaspaceShared::first_string;
|
||||
i < MetaspaceShared::first_string + MetaspaceShared::max_strings; i++) {
|
||||
if (!verify_region_checksum(i)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void FileMapInfo::fixup_string_regions() {
|
||||
if (string_ranges != NULL) {
|
||||
G1CollectedHeap::heap()->fill_archive_regions(string_ranges, num_ranges);
|
||||
}
|
||||
}
|
||||
|
||||
bool FileMapInfo::verify_region_checksum(int i) {
|
||||
if (!VerifySharedSpaces) {
|
||||
return true;
|
||||
}
|
||||
const char* buf = _header->_space[i]._base;
|
||||
|
||||
size_t sz = _header->_space[i]._used;
|
||||
|
||||
if (sz == 0) {
|
||||
return true; // no data
|
||||
}
|
||||
if (MetaspaceShared::is_string_region(i) && StringTable::shared_string_ignored()) {
|
||||
return true; // shared string data are not mapped
|
||||
}
|
||||
const char* buf = _header->region_addr(i);
|
||||
int crc = ClassLoader::crc32(0, buf, (jint)sz);
|
||||
if (crc != _header->_space[i]._crc) {
|
||||
fail_continue("Checksum verification failed.");
|
||||
@ -612,14 +767,36 @@ bool FileMapInfo::verify_region_checksum(int i) {
|
||||
// Unmap a memory region in the address space.
|
||||
|
||||
void FileMapInfo::unmap_region(int i) {
|
||||
assert(!MetaspaceShared::is_string_region(i), "sanity");
|
||||
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
|
||||
size_t used = si->_used;
|
||||
size_t size = align_size_up(used, os::vm_allocation_granularity());
|
||||
if (!os::unmap_memory(si->_base, size)) {
|
||||
|
||||
if (used == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
char* addr = _header->region_addr(i);
|
||||
if (!os::unmap_memory(addr, size)) {
|
||||
fail_stop("Unable to unmap shared space.");
|
||||
}
|
||||
}
|
||||
|
||||
void FileMapInfo::unmap_string_regions() {
|
||||
for (int i = MetaspaceShared::first_string;
|
||||
i < MetaspaceShared::first_string + MetaspaceShared::max_strings; i++) {
|
||||
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
|
||||
size_t used = si->_used;
|
||||
if (used > 0) {
|
||||
size_t size = align_size_up(used, os::vm_allocation_granularity());
|
||||
char* addr = (char*)((void*)oopDesc::decode_heap_oop_not_null(
|
||||
(narrowOop)si->_addr._offset));
|
||||
if (!os::unmap_memory(addr, size)) {
|
||||
fail_stop("Unable to unmap shared space.");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void FileMapInfo::assert_mark(bool check) {
|
||||
if (!check) {
|
||||
@ -663,6 +840,15 @@ bool FileMapInfo::initialize() {
|
||||
return true;
|
||||
}
|
||||
|
||||
char* FileMapInfo::FileMapHeader::region_addr(int idx) {
|
||||
if (MetaspaceShared::is_string_region(idx)) {
|
||||
return (char*)((void*)oopDesc::decode_heap_oop_not_null(
|
||||
(narrowOop)_space[idx]._addr._offset));
|
||||
} else {
|
||||
return _space[idx]._addr._base;
|
||||
}
|
||||
}
|
||||
|
||||
int FileMapInfo::FileMapHeader::compute_crc() {
|
||||
char* header = data();
|
||||
// start computing from the field after _crc
|
||||
@ -734,8 +920,12 @@ bool FileMapInfo::validate_header() {
|
||||
// True if the p is within the mapped shared space, otherwise, false.
|
||||
bool FileMapInfo::is_in_shared_space(const void* p) {
|
||||
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
|
||||
if (p >= _header->_space[i]._base &&
|
||||
p < _header->_space[i]._base + _header->_space[i]._used) {
|
||||
char *base;
|
||||
if (MetaspaceShared::is_string_region(i) && _header->_space[i]._used == 0) {
|
||||
continue;
|
||||
}
|
||||
base = _header->region_addr(i);
|
||||
if (p >= base && p < base + _header->_space[i]._used) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -747,9 +937,10 @@ void FileMapInfo::print_shared_spaces() {
|
||||
gclog_or_tty->print_cr("Shared Spaces:");
|
||||
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
|
||||
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
|
||||
char *base = _header->region_addr(i);
|
||||
gclog_or_tty->print(" %s " INTPTR_FORMAT "-" INTPTR_FORMAT,
|
||||
shared_region_name[i],
|
||||
si->_base, si->_base + si->_used);
|
||||
base, base + si->_used);
|
||||
}
|
||||
}
|
||||
|
||||
@ -758,12 +949,14 @@ void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
|
||||
FileMapInfo *map_info = FileMapInfo::current_info();
|
||||
if (map_info) {
|
||||
map_info->fail_continue("%s", msg);
|
||||
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
|
||||
if (map_info->_header->_space[i]._base != NULL) {
|
||||
for (int i = 0; i < MetaspaceShared::num_non_strings; i++) {
|
||||
char *addr = map_info->_header->region_addr(i);
|
||||
if (addr != NULL && !MetaspaceShared::is_string_region(i)) {
|
||||
map_info->unmap_region(i);
|
||||
map_info->_header->_space[i]._base = NULL;
|
||||
map_info->_header->_space[i]._addr._base = NULL;
|
||||
}
|
||||
}
|
||||
map_info->unmap_string_regions();
|
||||
} else if (DumpSharedSpaces) {
|
||||
fail_stop("%s", msg);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -94,11 +94,18 @@ public:
|
||||
int _version; // (from enum, above.)
|
||||
size_t _alignment; // how shared archive should be aligned
|
||||
int _obj_alignment; // value of ObjectAlignmentInBytes
|
||||
int _narrow_oop_shift; // compressed oop encoding shift
|
||||
uintx _max_heap_size; // java max heap size during dumping
|
||||
Universe::NARROW_OOP_MODE _narrow_oop_mode; // compressed oop encoding mode
|
||||
|
||||
struct space_info {
|
||||
int _crc; // crc checksum of the current space
|
||||
size_t _file_offset; // sizeof(this) rounded to vm page size
|
||||
char* _base; // copy-on-write base address
|
||||
union {
|
||||
char* _base; // copy-on-write base address
|
||||
intx _offset; // offset from the compressed oop encoding base, only used
|
||||
// by string space
|
||||
} _addr;
|
||||
size_t _capacity; // for validity checking
|
||||
size_t _used; // for setting space top on read
|
||||
bool _read_only; // read only space?
|
||||
@ -138,6 +145,8 @@ public:
|
||||
size_t _classpath_entry_size;
|
||||
SharedClassPathEntry* _classpath_entry_table;
|
||||
|
||||
char* region_addr(int idx);
|
||||
|
||||
virtual bool validate();
|
||||
virtual void populate(FileMapInfo* info, size_t alignment);
|
||||
int compute_crc();
|
||||
@ -166,8 +175,10 @@ public:
|
||||
void invalidate();
|
||||
int version() { return _header->_version; }
|
||||
size_t alignment() { return _header->_alignment; }
|
||||
Universe::NARROW_OOP_MODE narrow_oop_mode() { return _header->_narrow_oop_mode; }
|
||||
int narrow_oop_shift() { return _header->_narrow_oop_shift; }
|
||||
uintx max_heap_size() { return _header->_max_heap_size; }
|
||||
size_t space_capacity(int i) { return _header->_space[i]._capacity; }
|
||||
char* region_base(int i) { return _header->_space[i]._base; }
|
||||
struct FileMapHeader* header() { return _header; }
|
||||
|
||||
static FileMapInfo* current_info() {
|
||||
@ -185,10 +196,15 @@ public:
|
||||
void write_space(int i, Metaspace* space, bool read_only);
|
||||
void write_region(int region, char* base, size_t size,
|
||||
size_t capacity, bool read_only, bool allow_exec);
|
||||
void write_string_regions(GrowableArray<MemRegion> *regions);
|
||||
void write_bytes(const void* buffer, int count);
|
||||
void write_bytes_aligned(const void* buffer, int count);
|
||||
char* map_region(int i);
|
||||
bool map_string_regions();
|
||||
bool verify_string_regions();
|
||||
void fixup_string_regions();
|
||||
void unmap_region(int i);
|
||||
void unmap_string_regions();
|
||||
bool verify_region_checksum(int i);
|
||||
void close();
|
||||
bool is_open() { return _file_open; }
|
||||
|
@ -3307,7 +3307,7 @@ void Metaspace::global_initialize() {
|
||||
// Map in spaces now also
|
||||
if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
|
||||
cds_total = FileMapInfo::shared_spaces_size();
|
||||
cds_address = (address)mapinfo->region_base(0);
|
||||
cds_address = (address)mapinfo->header()->region_addr(0);
|
||||
} else {
|
||||
assert(!mapinfo->is_open() && !UseSharedSpaces,
|
||||
"archive file not closed or shared spaces not disabled.");
|
||||
|
@ -422,6 +422,8 @@ private:
|
||||
GrowableArray<Klass*> *_class_promote_order;
|
||||
VirtualSpace _md_vs;
|
||||
VirtualSpace _mc_vs;
|
||||
CompactHashtableWriter* _string_cht;
|
||||
GrowableArray<MemRegion> *_string_regions;
|
||||
|
||||
public:
|
||||
VM_PopulateDumpSharedSpace(ClassLoaderData* loader_data,
|
||||
@ -540,7 +542,7 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
|
||||
NOT_PRODUCT(SystemDictionary::verify();)
|
||||
|
||||
// Copy the the symbol table, and the system dictionary to the shared
|
||||
// Copy the the symbol table, string table, and the system dictionary to the shared
|
||||
// space in usable form. Copy the hashtable
|
||||
// buckets first [read-write], then copy the linked lists of entries
|
||||
// [read-only].
|
||||
@ -548,6 +550,15 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
NOT_PRODUCT(SymbolTable::verify());
|
||||
handle_misc_data_space_failure(SymbolTable::copy_compact_table(&md_top, md_end));
|
||||
|
||||
size_t ss_bytes = 0;
|
||||
char* ss_low;
|
||||
// The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
|
||||
_string_regions = new GrowableArray<MemRegion>(2);
|
||||
NOT_PRODUCT(StringTable::verify());
|
||||
handle_misc_data_space_failure(StringTable::copy_compact_table(&md_top, md_end, _string_regions,
|
||||
&ss_bytes));
|
||||
ss_low = _string_regions->is_empty() ? NULL : (char*)_string_regions->first().start();
|
||||
|
||||
SystemDictionary::reverse();
|
||||
SystemDictionary::copy_buckets(&md_top, md_end);
|
||||
|
||||
@ -576,7 +587,8 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
const size_t rw_alloced = rw_space->capacity_bytes_slow(Metaspace::NonClassType);
|
||||
const size_t md_alloced = md_end-md_low;
|
||||
const size_t mc_alloced = mc_end-mc_low;
|
||||
const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced;
|
||||
const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced
|
||||
+ ss_bytes;
|
||||
|
||||
// Occupied size of each space.
|
||||
const size_t ro_bytes = ro_space->used_bytes_slow(Metaspace::NonClassType);
|
||||
@ -585,11 +597,12 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
const size_t mc_bytes = size_t(mc_top - mc_low);
|
||||
|
||||
// Percent of total size
|
||||
const size_t total_bytes = ro_bytes + rw_bytes + md_bytes + mc_bytes;
|
||||
const size_t total_bytes = ro_bytes + rw_bytes + md_bytes + mc_bytes + ss_bytes;
|
||||
const double ro_t_perc = ro_bytes / double(total_bytes) * 100.0;
|
||||
const double rw_t_perc = rw_bytes / double(total_bytes) * 100.0;
|
||||
const double md_t_perc = md_bytes / double(total_bytes) * 100.0;
|
||||
const double mc_t_perc = mc_bytes / double(total_bytes) * 100.0;
|
||||
const double ss_t_perc = ss_bytes / double(total_bytes) * 100.0;
|
||||
|
||||
// Percent of fullness of each space
|
||||
const double ro_u_perc = ro_bytes / double(ro_alloced) * 100.0;
|
||||
@ -602,6 +615,7 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
tty->print_cr(fmt_space, "rw", rw_bytes, rw_t_perc, rw_alloced, rw_u_perc, rw_space->bottom());
|
||||
tty->print_cr(fmt_space, "md", md_bytes, md_t_perc, md_alloced, md_u_perc, md_low);
|
||||
tty->print_cr(fmt_space, "mc", mc_bytes, mc_t_perc, mc_alloced, mc_u_perc, mc_low);
|
||||
tty->print_cr(fmt_space, "st", ss_bytes, ss_t_perc, ss_bytes, 100.0, ss_low);
|
||||
tty->print_cr("total : %9d [100.0%% of total] out of %9d bytes [%4.1f%% used]",
|
||||
total_bytes, total_alloced, total_u_perc);
|
||||
|
||||
@ -631,6 +645,7 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
pointer_delta(mc_top, _mc_vs.low(), sizeof(char)),
|
||||
SharedMiscCodeSize,
|
||||
true, true);
|
||||
mapinfo->write_string_regions(_string_regions);
|
||||
|
||||
// Pass 2 - write data.
|
||||
mapinfo->open_for_write();
|
||||
@ -646,6 +661,8 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
pointer_delta(mc_top, _mc_vs.low(), sizeof(char)),
|
||||
SharedMiscCodeSize,
|
||||
true, true);
|
||||
mapinfo->write_string_regions(_string_regions);
|
||||
|
||||
mapinfo->close();
|
||||
|
||||
memmove(vtbl_list, saved_vtbl, vtbl_list_size * sizeof(void*));
|
||||
@ -942,6 +959,11 @@ bool MetaspaceShared::is_in_shared_space(const void* p) {
|
||||
return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_space(p);
|
||||
}
|
||||
|
||||
bool MetaspaceShared::is_string_region(int idx) {
|
||||
return (idx >= MetaspaceShared::first_string &&
|
||||
idx < MetaspaceShared::first_string + MetaspaceShared::max_strings);
|
||||
}
|
||||
|
||||
void MetaspaceShared::print_shared_spaces() {
|
||||
if (UseSharedSpaces) {
|
||||
FileMapInfo::current_info()->print_shared_spaces();
|
||||
@ -972,13 +994,15 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
|
||||
|
||||
// Map each shared region
|
||||
if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
|
||||
mapinfo->verify_region_checksum(ro) &&
|
||||
mapinfo->verify_region_checksum(ro) &&
|
||||
(_rw_base = mapinfo->map_region(rw)) != NULL &&
|
||||
mapinfo->verify_region_checksum(rw) &&
|
||||
mapinfo->verify_region_checksum(rw) &&
|
||||
(_md_base = mapinfo->map_region(md)) != NULL &&
|
||||
mapinfo->verify_region_checksum(md) &&
|
||||
mapinfo->verify_region_checksum(md) &&
|
||||
(_mc_base = mapinfo->map_region(mc)) != NULL &&
|
||||
mapinfo->verify_region_checksum(mc) &&
|
||||
mapinfo->verify_region_checksum(mc) &&
|
||||
mapinfo->map_string_regions() &&
|
||||
mapinfo->verify_string_regions() &&
|
||||
(image_alignment == (size_t)max_alignment()) &&
|
||||
mapinfo->validate_classpath_entry_table()) {
|
||||
// Success (no need to do anything)
|
||||
@ -990,6 +1014,7 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
|
||||
if (_rw_base != NULL) mapinfo->unmap_region(rw);
|
||||
if (_md_base != NULL) mapinfo->unmap_region(md);
|
||||
if (_mc_base != NULL) mapinfo->unmap_region(mc);
|
||||
mapinfo->unmap_string_regions();
|
||||
#ifndef _WINDOWS
|
||||
// Release the entire mapped region
|
||||
shared_rs.release();
|
||||
@ -1011,7 +1036,7 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
|
||||
void MetaspaceShared::initialize_shared_spaces() {
|
||||
FileMapInfo *mapinfo = FileMapInfo::current_info();
|
||||
|
||||
char* buffer = mapinfo->region_base(md);
|
||||
char* buffer = mapinfo->header()->region_addr(md);
|
||||
|
||||
// Skip over (reserve space for) a list of addresses of C++ vtables
|
||||
// for Klass objects. They get filled in later.
|
||||
@ -1027,13 +1052,16 @@ void MetaspaceShared::initialize_shared_spaces() {
|
||||
buffer += sizeof(intptr_t);
|
||||
buffer += vtable_size;
|
||||
|
||||
// Create the shared symbol table using the bucket array at this spot in the
|
||||
// Create the shared symbol table using the compact table at this spot in the
|
||||
// misc data space. (Todo: move this to read-only space. Currently
|
||||
// this is mapped copy-on-write but will never be written into).
|
||||
|
||||
buffer = (char*)SymbolTable::init_shared_table(buffer);
|
||||
SymbolTable::create_table();
|
||||
|
||||
// Create the shared string table using the compact table
|
||||
buffer = (char*)StringTable::init_shared_table(mapinfo, buffer);
|
||||
|
||||
// Create the shared dictionary using the bucket array at this spot in
|
||||
// the misc data space. Since the shared dictionary table is never
|
||||
// modified, this region (of mapped pages) will be (effectively, if
|
||||
@ -1100,6 +1128,11 @@ void MetaspaceShared::initialize_shared_spaces() {
|
||||
}
|
||||
}
|
||||
|
||||
void MetaspaceShared::fixup_shared_string_regions() {
|
||||
FileMapInfo *mapinfo = FileMapInfo::current_info();
|
||||
mapinfo->fixup_string_regions();
|
||||
}
|
||||
|
||||
// JVM/TI RedefineClasses() support:
|
||||
bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
|
||||
|
@ -53,6 +53,7 @@ public:
|
||||
memset(this, 0, sizeof(*this));
|
||||
}
|
||||
CompactHashtableStats symbol;
|
||||
CompactHashtableStats string;
|
||||
};
|
||||
|
||||
// Class Data Sharing Support
|
||||
@ -90,7 +91,10 @@ class MetaspaceShared : AllStatic {
|
||||
rw = 1, // read-write shared space in the heap
|
||||
md = 2, // miscellaneous data for initializing tables, etc.
|
||||
mc = 3, // miscellaneous code - vtable replacement.
|
||||
n_regions = 4
|
||||
max_strings = 2, // max number of string regions in string space
|
||||
num_non_strings = 4, // number of non-string regions
|
||||
first_string = num_non_strings, // index of first string region
|
||||
n_regions = max_strings + num_non_strings // total number of regions
|
||||
};
|
||||
|
||||
// Accessor functions to save shared space created for metadata, which has
|
||||
@ -124,10 +128,13 @@ class MetaspaceShared : AllStatic {
|
||||
}
|
||||
static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false);
|
||||
static void initialize_shared_spaces() NOT_CDS_RETURN;
|
||||
static void fixup_shared_string_regions() NOT_CDS_RETURN;
|
||||
|
||||
// Return true if given address is in the mapped shared space.
|
||||
static bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
|
||||
|
||||
static bool is_string_region(int idx) NOT_CDS_RETURN_(false);
|
||||
|
||||
static void generate_vtable_methods(void** vtbl_list,
|
||||
void** vtable,
|
||||
char** md_top, char* md_end,
|
||||
|
@ -311,6 +311,7 @@ void Universe::genesis(TRAPS) {
|
||||
SystemDictionary::Cloneable_klass(), "u3");
|
||||
assert(_the_array_interfaces_array->at(1) ==
|
||||
SystemDictionary::Serializable_klass(), "u3");
|
||||
MetaspaceShared::fixup_shared_string_regions();
|
||||
} else {
|
||||
// Set up shared interfaces array. (Do this before supers are set up.)
|
||||
_the_array_interfaces_array->at_put(0, SystemDictionary::Cloneable_klass());
|
||||
|
@ -381,6 +381,9 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
|
||||
if (!constants()->is_shared()) {
|
||||
MetadataFactory::free_metadata(loader_data, constants());
|
||||
}
|
||||
// Delete any cached resolution errors for the constant pool
|
||||
SystemDictionary::delete_resolution_error(constants());
|
||||
|
||||
set_constants(NULL);
|
||||
}
|
||||
|
||||
|
@ -665,6 +665,12 @@
|
||||
product(bool, UseMultiplyToLenIntrinsic, false, \
|
||||
"Enables intrinsification of BigInteger.multiplyToLen()") \
|
||||
\
|
||||
product(bool, UseSquareToLenIntrinsic, false, \
|
||||
"Enables intrinsification of BigInteger.squareToLen()") \
|
||||
\
|
||||
product(bool, UseMulAddIntrinsic, false, \
|
||||
"Enables intrinsification of BigInteger.mulAdd()") \
|
||||
\
|
||||
product(bool, UseTypeSpeculation, true, \
|
||||
"Speculatively propagate types from profiles") \
|
||||
\
|
||||
|
@ -972,7 +972,9 @@ void ConnectionGraph::process_call_arguments(CallNode *call) {
|
||||
strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
|
||||
strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
|
||||
strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
|
||||
strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0)
|
||||
strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
|
||||
strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
|
||||
strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0)
|
||||
))) {
|
||||
call->dump();
|
||||
fatal(err_msg_res("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name));
|
||||
|
@ -817,19 +817,78 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f
|
||||
BoolTest::mask hi_test = this_bool->_test._test;
|
||||
BoolTest::mask cond = hi_test;
|
||||
|
||||
// convert:
|
||||
//
|
||||
// dom_bool = x {<,<=,>,>=} a
|
||||
// / \
|
||||
// proj = {True,False} / \ otherproj = {False,True}
|
||||
// /
|
||||
// this_bool = x {<,<=} b
|
||||
// / \
|
||||
// fail = {True,False} / \ success = {False,True}
|
||||
// /
|
||||
//
|
||||
// (Second test guaranteed canonicalized, first one may not have
|
||||
// been canonicalized yet)
|
||||
//
|
||||
// into:
|
||||
//
|
||||
// cond = (x - lo) {<u,<=u,>u,>=u} adjusted_lim
|
||||
// / \
|
||||
// fail / \ success
|
||||
// /
|
||||
//
|
||||
|
||||
// Figure out which of the two tests sets the upper bound and which
|
||||
// sets the lower bound if any.
|
||||
Node* adjusted_lim = NULL;
|
||||
if (hi_type->_lo > lo_type->_hi && hi_type->_hi == max_jint && lo_type->_lo == min_jint) {
|
||||
|
||||
assert((dom_bool->_test.is_less() && !proj->_con) ||
|
||||
(dom_bool->_test.is_greater() && proj->_con), "incorrect test");
|
||||
// this test was canonicalized
|
||||
assert(this_bool->_test.is_less() && fail->_con, "incorrect test");
|
||||
|
||||
// this_bool = <
|
||||
// dom_bool = >= (proj = True) or dom_bool = < (proj = False)
|
||||
// x in [a, b[ on the fail (= True) projection, b > a-1 (because of hi_type->_lo > lo_type->_hi test above):
|
||||
// lo = a, hi = b, adjusted_lim = b-a, cond = <u
|
||||
// dom_bool = > (proj = True) or dom_bool = <= (proj = False)
|
||||
// x in ]a, b[ on the fail (= True) projection, b > a:
|
||||
// lo = a+1, hi = b, adjusted_lim = b-a-1, cond = <u
|
||||
// this_bool = <=
|
||||
// dom_bool = >= (proj = True) or dom_bool = < (proj = False)
|
||||
// x in [a, b] on the fail (= True) projection, b+1 > a-1:
|
||||
// lo = a, hi = b, adjusted_lim = b-a, cond = <=u
|
||||
// dom_bool = > (proj = True) or dom_bool = <= (proj = False)
|
||||
// x in ]a, b] on the fail (= True) projection b+1 > a:
|
||||
// lo = a+1, hi = b, adjusted_lim = b-a, cond = <u
|
||||
// lo = a+1, hi = b, adjusted_lim = b-a-1, cond = <=u doesn't work because a = b is possible, then hi-lo = -1
|
||||
|
||||
if (lo_test == BoolTest::gt || lo_test == BoolTest::le) {
|
||||
if (hi_test == BoolTest::le) {
|
||||
adjusted_lim = igvn->transform(new SubINode(hi, lo));
|
||||
cond = BoolTest::lt;
|
||||
}
|
||||
lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
|
||||
}
|
||||
} else if (lo_type->_lo > hi_type->_hi && lo_type->_hi == max_jint && hi_type->_lo == min_jint) {
|
||||
|
||||
// this_bool = <
|
||||
// dom_bool = < (proj = True) or dom_bool = >= (proj = False)
|
||||
// x in [b, a[ on the fail (= False) projection, a > b-1 (because of lo_type->_lo > hi_type->_hi above):
|
||||
// lo = b, hi = a, adjusted_lim = a-b, cond = >=u
|
||||
// dom_bool = <= (proj = True) or dom_bool = > (proj = False)
|
||||
// x in [b, a] on the fail (= False) projection, a+1 > b-1:
|
||||
// lo = b, hi = a, adjusted_lim = a-b, cond = >u
|
||||
// this_bool = <=
|
||||
// dom_bool = < (proj = True) or dom_bool = >= (proj = False)
|
||||
// x in ]b, a[ on the fail (= False) projection, a > b:
|
||||
// lo = b+1, hi = a, adjusted_lim = a-b-1, cond = >=u
|
||||
// dom_bool = <= (proj = True) or dom_bool = > (proj = False)
|
||||
// x in ]b, a] on the fail (= False) projection, a+1 > b:
|
||||
// lo = b+1, hi = a, adjusted_lim = a-b, cond = >=u
|
||||
// lo = b+1, hi = a, adjusted_lim = a-b-1, cond = >u doesn't work because a = b is possible, then hi-lo = -1
|
||||
|
||||
swap(lo, hi);
|
||||
swap(lo_type, hi_type);
|
||||
swap(lo_test, hi_test);
|
||||
@ -842,6 +901,10 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f
|
||||
cond = (hi_test == BoolTest::le || hi_test == BoolTest::gt) ? BoolTest::gt : BoolTest::ge;
|
||||
|
||||
if (lo_test == BoolTest::le) {
|
||||
if (cond == BoolTest::gt) {
|
||||
adjusted_lim = igvn->transform(new SubINode(hi, lo));
|
||||
cond = BoolTest::ge;
|
||||
}
|
||||
lo = igvn->transform(new AddINode(lo, igvn->intcon(1)));
|
||||
}
|
||||
|
||||
@ -860,7 +923,6 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lo = NULL;
|
||||
hi = NULL;
|
||||
}
|
||||
@ -868,12 +930,13 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f
|
||||
if (lo && hi) {
|
||||
// Merge the two compares into a single unsigned compare by building (CmpU (n - lo) (hi - lo))
|
||||
Node* adjusted_val = igvn->transform(new SubINode(n, lo));
|
||||
Node* adjusted_lim = igvn->transform(new SubINode(hi, lo));
|
||||
if (adjusted_lim == NULL) {
|
||||
adjusted_lim = igvn->transform(new SubINode(hi, lo));
|
||||
}
|
||||
Node* newcmp = igvn->transform(new CmpUNode(adjusted_val, adjusted_lim));
|
||||
Node* newbool = igvn->transform(new BoolNode(newcmp, cond));
|
||||
|
||||
igvn->is_IterGVN()->replace_input_of(dom_iff, 1, igvn->intcon(proj->_con));
|
||||
igvn->hash_delete(this);
|
||||
igvn->replace_input_of(dom_iff, 1, igvn->intcon(proj->_con));
|
||||
set_req(1, newbool);
|
||||
|
||||
return true;
|
||||
|
@ -291,6 +291,8 @@ class LibraryCallKit : public GraphKit {
|
||||
bool inline_updateBytesCRC32();
|
||||
bool inline_updateByteBufferCRC32();
|
||||
bool inline_multiplyToLen();
|
||||
bool inline_squareToLen();
|
||||
bool inline_mulAdd();
|
||||
|
||||
bool inline_profileBoolean();
|
||||
bool inline_isCompileConstant();
|
||||
@ -494,6 +496,14 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
|
||||
if (!UseMultiplyToLenIntrinsic) return NULL;
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_squareToLen:
|
||||
if (!UseSquareToLenIntrinsic) return NULL;
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_mulAdd:
|
||||
if (!UseMulAddIntrinsic) return NULL;
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
|
||||
case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
|
||||
if (!UseAESIntrinsics) return NULL;
|
||||
@ -913,6 +923,12 @@ bool LibraryCallKit::try_to_inline(int predicate) {
|
||||
case vmIntrinsics::_multiplyToLen:
|
||||
return inline_multiplyToLen();
|
||||
|
||||
case vmIntrinsics::_squareToLen:
|
||||
return inline_squareToLen();
|
||||
|
||||
case vmIntrinsics::_mulAdd:
|
||||
return inline_mulAdd();
|
||||
|
||||
case vmIntrinsics::_encodeISOArray:
|
||||
return inline_encodeISOArray();
|
||||
|
||||
@ -5306,6 +5322,100 @@ bool LibraryCallKit::inline_multiplyToLen() {
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------inline_squareToLen------------------------------------
|
||||
bool LibraryCallKit::inline_squareToLen() {
|
||||
assert(UseSquareToLenIntrinsic, "not implementated on this platform");
|
||||
|
||||
address stubAddr = StubRoutines::squareToLen();
|
||||
if (stubAddr == NULL) {
|
||||
return false; // Intrinsic's stub is not implemented on this platform
|
||||
}
|
||||
const char* stubName = "squareToLen";
|
||||
|
||||
assert(callee()->signature()->size() == 4, "implSquareToLen has 4 parameters");
|
||||
|
||||
Node* x = argument(0);
|
||||
Node* len = argument(1);
|
||||
Node* z = argument(2);
|
||||
Node* zlen = argument(3);
|
||||
|
||||
const Type* x_type = x->Value(&_gvn);
|
||||
const Type* z_type = z->Value(&_gvn);
|
||||
const TypeAryPtr* top_x = x_type->isa_aryptr();
|
||||
const TypeAryPtr* top_z = z_type->isa_aryptr();
|
||||
if (top_x == NULL || top_x->klass() == NULL ||
|
||||
top_z == NULL || top_z->klass() == NULL) {
|
||||
// failed array check
|
||||
return false;
|
||||
}
|
||||
|
||||
BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
|
||||
BasicType z_elem = z_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
|
||||
if (x_elem != T_INT || z_elem != T_INT) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
Node* x_start = array_element_address(x, intcon(0), x_elem);
|
||||
Node* z_start = array_element_address(z, intcon(0), z_elem);
|
||||
|
||||
Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
|
||||
OptoRuntime::squareToLen_Type(),
|
||||
stubAddr, stubName, TypePtr::BOTTOM,
|
||||
x_start, len, z_start, zlen);
|
||||
|
||||
set_result(z);
|
||||
return true;
|
||||
}
|
||||
|
||||
//-------------inline_mulAdd------------------------------------------
|
||||
bool LibraryCallKit::inline_mulAdd() {
|
||||
assert(UseMulAddIntrinsic, "not implementated on this platform");
|
||||
|
||||
address stubAddr = StubRoutines::mulAdd();
|
||||
if (stubAddr == NULL) {
|
||||
return false; // Intrinsic's stub is not implemented on this platform
|
||||
}
|
||||
const char* stubName = "mulAdd";
|
||||
|
||||
assert(callee()->signature()->size() == 5, "mulAdd has 5 parameters");
|
||||
|
||||
Node* out = argument(0);
|
||||
Node* in = argument(1);
|
||||
Node* offset = argument(2);
|
||||
Node* len = argument(3);
|
||||
Node* k = argument(4);
|
||||
|
||||
const Type* out_type = out->Value(&_gvn);
|
||||
const Type* in_type = in->Value(&_gvn);
|
||||
const TypeAryPtr* top_out = out_type->isa_aryptr();
|
||||
const TypeAryPtr* top_in = in_type->isa_aryptr();
|
||||
if (top_out == NULL || top_out->klass() == NULL ||
|
||||
top_in == NULL || top_in->klass() == NULL) {
|
||||
// failed array check
|
||||
return false;
|
||||
}
|
||||
|
||||
BasicType out_elem = out_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
|
||||
BasicType in_elem = in_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
|
||||
if (out_elem != T_INT || in_elem != T_INT) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Node* outlen = load_array_length(out);
|
||||
Node* new_offset = _gvn.transform(new SubINode(outlen, offset));
|
||||
Node* out_start = array_element_address(out, intcon(0), out_elem);
|
||||
Node* in_start = array_element_address(in, intcon(0), in_elem);
|
||||
|
||||
Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
|
||||
OptoRuntime::mulAdd_Type(),
|
||||
stubAddr, stubName, TypePtr::BOTTOM,
|
||||
out_start,in_start, new_offset, len, k);
|
||||
Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
|
||||
set_result(result);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Calculate CRC32 for byte.
|
||||
|
@ -475,7 +475,7 @@ void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
|
||||
C->set_major_progress();
|
||||
// Peeling a 'main' loop in a pre/main/post situation obfuscates the
|
||||
// 'pre' loop from the main and the 'pre' can no longer have it's
|
||||
// 'pre' loop from the main and the 'pre' can no longer have its
|
||||
// iterations adjusted. Therefore, we need to declare this loop as
|
||||
// no longer a 'main' loop; it will need new pre and post loops before
|
||||
// we can do further RCE.
|
||||
@ -1911,10 +1911,13 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
return;
|
||||
assert(opqzm->in(1) == main_limit, "do not understand situation");
|
||||
|
||||
// Find the pre-loop limit; we will expand it's iterations to
|
||||
// Find the pre-loop limit; we will expand its iterations to
|
||||
// not ever trip low tests.
|
||||
Node *p_f = iffm->in(0);
|
||||
assert(p_f->Opcode() == Op_IfFalse, "");
|
||||
// pre loop may have been optimized out
|
||||
if (p_f->Opcode() != Op_IfFalse) {
|
||||
return;
|
||||
}
|
||||
CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
|
||||
assert(pre_end->loopnode()->is_pre_loop(), "");
|
||||
Node *pre_opaq1 = pre_end->limit();
|
||||
@ -2215,6 +2218,56 @@ void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
static CountedLoopNode* locate_pre_from_main(CountedLoopNode *cl) {
|
||||
Node *ctrl = cl->in(LoopNode::EntryControl);
|
||||
assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "");
|
||||
Node *iffm = ctrl->in(0);
|
||||
assert(iffm->Opcode() == Op_If, "");
|
||||
Node *p_f = iffm->in(0);
|
||||
assert(p_f->Opcode() == Op_IfFalse, "");
|
||||
CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
|
||||
assert(pre_end->loopnode()->is_pre_loop(), "");
|
||||
return pre_end->loopnode();
|
||||
}
|
||||
#endif
|
||||
|
||||
// Remove the main and post loops and make the pre loop execute all
|
||||
// iterations. Useful when the pre loop is found empty.
|
||||
void IdealLoopTree::remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase) {
|
||||
CountedLoopEndNode* pre_end = cl->loopexit();
|
||||
Node* pre_cmp = pre_end->cmp_node();
|
||||
if (pre_cmp->in(2)->Opcode() != Op_Opaque1) {
|
||||
// Only safe to remove the main loop if the compiler optimized it
|
||||
// out based on an unknown number of iterations
|
||||
return;
|
||||
}
|
||||
|
||||
// Can we find the main loop?
|
||||
if (_next == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
Node* next_head = _next->_head;
|
||||
if (!next_head->is_CountedLoop()) {
|
||||
return;
|
||||
}
|
||||
|
||||
CountedLoopNode* main_head = next_head->as_CountedLoop();
|
||||
if (!main_head->is_main_loop()) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(locate_pre_from_main(main_head) == cl, "bad main loop");
|
||||
Node* main_iff = main_head->in(LoopNode::EntryControl)->in(0);
|
||||
|
||||
// Remove the Opaque1Node of the pre loop and make it execute all iterations
|
||||
phase->_igvn.replace_input_of(pre_cmp, 2, pre_cmp->in(2)->in(2));
|
||||
// Remove the Opaque1Node of the main loop so it can be optimized out
|
||||
Node* main_cmp = main_iff->in(1)->in(1);
|
||||
assert(main_cmp->in(2)->Opcode() == Op_Opaque1, "main loop has no opaque node?");
|
||||
phase->_igvn.replace_input_of(main_cmp, 2, main_cmp->in(2)->in(1));
|
||||
}
|
||||
|
||||
//------------------------------policy_do_remove_empty_loop--------------------
|
||||
// Micro-benchmark spamming. Policy is to always remove empty loops.
|
||||
@ -2233,6 +2286,12 @@ bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
|
||||
if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
|
||||
return false; // Infinite loop
|
||||
|
||||
if (cl->is_pre_loop()) {
|
||||
// If the loop we are removing is a pre-loop then the main and
|
||||
// post loop can be removed as well
|
||||
remove_main_post_loops(cl, phase);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// Ensure only one phi which is the iv.
|
||||
Node* iv = NULL;
|
||||
|
@ -485,6 +485,8 @@ public:
|
||||
bool is_inner() { return is_loop() && _child == NULL; }
|
||||
bool is_counted() { return is_loop() && _head != NULL && _head->is_CountedLoop(); }
|
||||
|
||||
void remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase);
|
||||
|
||||
#ifndef PRODUCT
|
||||
void dump_head( ) const; // Dump loop head only
|
||||
void dump() const; // Dump this loop recursively
|
||||
|
@ -945,6 +945,48 @@ const TypeFunc* OptoRuntime::multiplyToLen_Type() {
|
||||
return TypeFunc::make(domain, range);
|
||||
}
|
||||
|
||||
const TypeFunc* OptoRuntime::squareToLen_Type() {
|
||||
// create input type (domain)
|
||||
int num_args = 4;
|
||||
int argcnt = num_args;
|
||||
const Type** fields = TypeTuple::fields(argcnt);
|
||||
int argp = TypeFunc::Parms;
|
||||
fields[argp++] = TypePtr::NOTNULL; // x
|
||||
fields[argp++] = TypeInt::INT; // len
|
||||
fields[argp++] = TypePtr::NOTNULL; // z
|
||||
fields[argp++] = TypeInt::INT; // zlen
|
||||
assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
|
||||
const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
|
||||
|
||||
// no result type needed
|
||||
fields = TypeTuple::fields(1);
|
||||
fields[TypeFunc::Parms+0] = NULL;
|
||||
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
|
||||
return TypeFunc::make(domain, range);
|
||||
}
|
||||
|
||||
// for mulAdd calls, 2 pointers and 3 ints, returning int
|
||||
const TypeFunc* OptoRuntime::mulAdd_Type() {
|
||||
// create input type (domain)
|
||||
int num_args = 5;
|
||||
int argcnt = num_args;
|
||||
const Type** fields = TypeTuple::fields(argcnt);
|
||||
int argp = TypeFunc::Parms;
|
||||
fields[argp++] = TypePtr::NOTNULL; // out
|
||||
fields[argp++] = TypePtr::NOTNULL; // in
|
||||
fields[argp++] = TypeInt::INT; // offset
|
||||
fields[argp++] = TypeInt::INT; // len
|
||||
fields[argp++] = TypeInt::INT; // k
|
||||
assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
|
||||
const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
|
||||
|
||||
// returning carry (int)
|
||||
fields = TypeTuple::fields(1);
|
||||
fields[TypeFunc::Parms+0] = TypeInt::INT;
|
||||
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
|
||||
return TypeFunc::make(domain, range);
|
||||
}
|
||||
|
||||
|
||||
|
||||
//------------- Interpreter state access for on stack replacement
|
||||
|
@ -312,6 +312,10 @@ private:
|
||||
|
||||
static const TypeFunc* multiplyToLen_Type();
|
||||
|
||||
static const TypeFunc* squareToLen_Type();
|
||||
|
||||
static const TypeFunc* mulAdd_Type();
|
||||
|
||||
static const TypeFunc* updateBytesCRC32_Type();
|
||||
|
||||
// leaf on stack replacement interpreter accessor types
|
||||
|
@ -4089,9 +4089,6 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass,
|
||||
mnt->adjust_method_entries(the_class(), &trace_name_printed);
|
||||
}
|
||||
|
||||
// Fix Resolution Error table also to remove old constant pools
|
||||
SystemDictionary::delete_resolution_error(old_constants);
|
||||
|
||||
if (the_class->oop_map_cache() != NULL) {
|
||||
// Flush references to any obsolete methods from the oop map cache
|
||||
// so that obsolete methods are not pinned.
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "code/codeCache.hpp"
|
||||
#include "jvmtifiles/jvmtiEnv.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/wbtestmethods/parserTests.hpp"
|
||||
@ -1207,6 +1208,11 @@ WB_ENTRY(jobject, WB_GetMethodStringOption(JNIEnv* env, jobject wb, jobject meth
|
||||
return NULL;
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jboolean, WB_IsShared(JNIEnv* env, jobject wb, jobject obj))
|
||||
oop obj_oop = JNIHandles::resolve(obj);
|
||||
return MetaspaceShared::is_in_shared_space((void*)obj_oop);
|
||||
WB_END
|
||||
|
||||
//Some convenience methods to deal with objects from java
|
||||
int WhiteBox::offset_for_field(const char* field_name, oop object,
|
||||
Symbol* signature_symbol) {
|
||||
@ -1431,6 +1437,7 @@ static JNINativeMethod methods[] = {
|
||||
{CC"getMethodStringOption",
|
||||
CC"(Ljava/lang/reflect/Executable;Ljava/lang/String;)Ljava/lang/String;",
|
||||
(void*)&WB_GetMethodStringOption},
|
||||
{CC"isShared", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsShared },
|
||||
};
|
||||
|
||||
#undef CC
|
||||
|
@ -64,7 +64,7 @@ void AdvancedThresholdPolicy::initialize() {
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef SPARC
|
||||
#if defined SPARC || defined AARCH64
|
||||
if (FLAG_IS_DEFAULT(InlineSmallCode)) {
|
||||
FLAG_SET_DEFAULT(InlineSmallCode, 2500);
|
||||
}
|
||||
|
@ -670,8 +670,8 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa
|
||||
}
|
||||
// always move the block
|
||||
void* ptr = os::malloc(size, memflags, stack);
|
||||
if (PrintMalloc) {
|
||||
tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
|
||||
if (PrintMalloc && tty != NULL) {
|
||||
tty->print_cr("os::realloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
|
||||
}
|
||||
// Copy to new memory if malloc didn't fail
|
||||
if ( ptr != NULL ) {
|
||||
|
@ -539,7 +539,8 @@ class os: AllStatic {
|
||||
// If function name is not found, buf[0] is set to '\0' and offset is
|
||||
// set to -1 (if offset is non-NULL).
|
||||
static bool dll_address_to_function_name(address addr, char* buf,
|
||||
int buflen, int* offset);
|
||||
int buflen, int* offset,
|
||||
bool demangle = true);
|
||||
|
||||
// Locate DLL/DSO. On success, full path of the library is copied to
|
||||
// buf, and offset is optionally set to be the distance between addr
|
||||
|
@ -96,7 +96,7 @@ StubCodeGenerator::~StubCodeGenerator() {
|
||||
toprint[toprint_len++] = cdesc;
|
||||
if (cdesc == _first_stub) { saw_first = true; break; }
|
||||
}
|
||||
assert(saw_first, "must get both first & last");
|
||||
assert(toprint_len == 0 || saw_first, "must get both first & last");
|
||||
// Print in reverse order:
|
||||
qsort(toprint, toprint_len, sizeof(toprint[0]), compare_cdesc);
|
||||
for (int i = 0; i < toprint_len; i++) {
|
||||
|
@ -137,6 +137,8 @@ address StubRoutines::_updateBytesCRC32 = NULL;
|
||||
address StubRoutines::_crc_table_adr = NULL;
|
||||
|
||||
address StubRoutines::_multiplyToLen = NULL;
|
||||
address StubRoutines::_squareToLen = NULL;
|
||||
address StubRoutines::_mulAdd = NULL;
|
||||
|
||||
double (* StubRoutines::_intrinsic_log )(double) = NULL;
|
||||
double (* StubRoutines::_intrinsic_log10 )(double) = NULL;
|
||||
|
@ -197,6 +197,8 @@ class StubRoutines: AllStatic {
|
||||
static address _crc_table_adr;
|
||||
|
||||
static address _multiplyToLen;
|
||||
static address _squareToLen;
|
||||
static address _mulAdd;
|
||||
|
||||
// These are versions of the java.lang.Math methods which perform
|
||||
// the same operations as the intrinsic version. They are used for
|
||||
@ -356,6 +358,8 @@ class StubRoutines: AllStatic {
|
||||
static address crc_table_addr() { return _crc_table_adr; }
|
||||
|
||||
static address multiplyToLen() {return _multiplyToLen; }
|
||||
static address squareToLen() {return _squareToLen; }
|
||||
static address mulAdd() {return _mulAdd; }
|
||||
|
||||
static address select_fill_function(BasicType t, bool aligned, const char* &name);
|
||||
|
||||
|
@ -678,7 +678,7 @@ void NMethodSweeper::possibly_flush(nmethod* nm) {
|
||||
// ReservedCodeCacheSize
|
||||
int reset_val = hotness_counter_reset_val();
|
||||
int time_since_reset = reset_val - nm->hotness_counter();
|
||||
int code_blob_type = (CodeCache::get_code_blob_type(nm->comp_level()));
|
||||
int code_blob_type = CodeCache::get_code_blob_type(nm);
|
||||
double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity);
|
||||
// The less free space in the code cache we have - the bigger reverse_free_ratio() is.
|
||||
// I.e., 'threshold' increases with lower available space in the code cache and a higher
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/padded.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/markOop.hpp"
|
||||
@ -638,11 +639,11 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
|
||||
|
||||
// hashCode() is a heap mutator ...
|
||||
// Relaxing assertion for bug 6320749.
|
||||
assert(Universe::verify_in_progress() ||
|
||||
assert(Universe::verify_in_progress() || DumpSharedSpaces ||
|
||||
!SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
assert(Universe::verify_in_progress() ||
|
||||
assert(Universe::verify_in_progress() || DumpSharedSpaces ||
|
||||
Self->is_Java_thread() , "invariant");
|
||||
assert(Universe::verify_in_progress() ||
|
||||
assert(Universe::verify_in_progress() || DumpSharedSpaces ||
|
||||
((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
|
||||
|
||||
ObjectMonitor* monitor = NULL;
|
||||
|
@ -831,6 +831,8 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
|
||||
static_field(StubRoutines, _updateBytesCRC32, address) \
|
||||
static_field(StubRoutines, _crc_table_adr, address) \
|
||||
static_field(StubRoutines, _multiplyToLen, address) \
|
||||
static_field(StubRoutines, _squareToLen, address) \
|
||||
static_field(StubRoutines, _mulAdd, address) \
|
||||
\
|
||||
/*****************/ \
|
||||
/* SharedRuntime */ \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -347,6 +347,13 @@ bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
|
||||
return true;
|
||||
}
|
||||
|
||||
// Mapped CDS string region.
|
||||
// The string region(s) is part of the java heap.
|
||||
if (reserved_rgn->flag() == mtJavaHeap) {
|
||||
assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
|
||||
return true;
|
||||
}
|
||||
|
||||
ShouldNotReachHere();
|
||||
return false;
|
||||
}
|
||||
|
@ -264,6 +264,15 @@ Declares a structure type that can be used in other events.
|
||||
<structvalue type="ObjectSpace" field="toSpace" label="To Space"/>
|
||||
</event>
|
||||
|
||||
<event id="G1HeapSummary" path="vm/gc/heap/g1_summary" label="G1 Heap Summary" is_instant="true">
|
||||
<value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
|
||||
<value type="GCWHEN" field="when" label="When" />
|
||||
|
||||
<value type="BYTES64" field="edenUsedSize" label="Eden Used Size" />
|
||||
<value type="BYTES64" field="edenTotalSize" label="Eden Total Size" />
|
||||
<value type="BYTES64" field="survivorUsedSize" label="Survivor Used Size" />
|
||||
</event>
|
||||
|
||||
<event id="GCGarbageCollection" path="vm/gc/collector/garbage_collection" label="Garbage Collection"
|
||||
description="Garbage collection performed by the JVM">
|
||||
<value type="UINT" field="gcId" label="GC ID" relation="GC_ID" />
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -102,7 +102,7 @@ Mutex* Decoder::shared_decoder_lock() {
|
||||
return _shared_decoder_lock;
|
||||
}
|
||||
|
||||
bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath) {
|
||||
bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
|
||||
assert(_shared_decoder_lock != NULL, "Just check");
|
||||
bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
|
||||
MutexLockerEx locker(error_handling_thread ? NULL : _shared_decoder_lock, true);
|
||||
@ -110,7 +110,7 @@ bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const cha
|
||||
get_error_handler_instance(): get_shared_instance();
|
||||
assert(decoder != NULL, "null decoder");
|
||||
|
||||
return decoder->decode(addr, buf, buflen, offset, modulepath);
|
||||
return decoder->decode(addr, buf, buflen, offset, modulepath, demangle);
|
||||
}
|
||||
|
||||
bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const void* base) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -46,8 +46,12 @@ public:
|
||||
|
||||
// decode an pc address to corresponding function name and an offset from the beginning of
|
||||
// the function
|
||||
//
|
||||
// Note: the 'base' variant does not demangle names. The
|
||||
// demangling that was done systematically in the 'modulepath' variant
|
||||
// is now optional.
|
||||
virtual bool decode(address pc, char* buf, int buflen, int* offset,
|
||||
const char* modulepath = NULL) = 0;
|
||||
const char* modulepath = NULL, bool demangle = true) = 0;
|
||||
virtual bool decode(address pc, char* buf, int buflen, int* offset, const void* base) = 0;
|
||||
|
||||
// demangle a C++ symbol
|
||||
@ -81,7 +85,7 @@ public:
|
||||
~NullDecoder() {};
|
||||
|
||||
virtual bool decode(address pc, char* buf, int buflen, int* offset,
|
||||
const char* modulepath = NULL) {
|
||||
const char* modulepath, bool demangle) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -101,7 +105,10 @@ public:
|
||||
|
||||
class Decoder : AllStatic {
|
||||
public:
|
||||
static bool decode(address pc, char* buf, int buflen, int* offset, const char* modulepath = NULL);
|
||||
static bool decode(address pc, char* buf, int buflen, int* offset, const char* modulepath = NULL, bool demangle = true);
|
||||
static bool decode(address pc, char* buf, int buflen, int* offset, bool demangle) {
|
||||
return decode(pc, buf, buflen, offset, (const char*) NULL, demangle);
|
||||
}
|
||||
static bool decode(address pc, char* buf, int buflen, int* offset, const void* base);
|
||||
static bool demangle(const char* symbol, char* buf, int buflen);
|
||||
static bool can_decode_C_frame_in_vm();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,7 +34,7 @@ ElfDecoder::~ElfDecoder() {
|
||||
}
|
||||
}
|
||||
|
||||
bool ElfDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* filepath) {
|
||||
bool ElfDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* filepath, bool demangle_name) {
|
||||
assert(filepath, "null file path");
|
||||
assert(buf != NULL && buflen > 0, "Invalid buffer");
|
||||
if (has_error()) return false;
|
||||
@ -46,7 +46,7 @@ bool ElfDecoder::decode(address addr, char *buf, int buflen, int* offset, const
|
||||
if (!file->decode(addr, buf, buflen, offset)) {
|
||||
return false;
|
||||
}
|
||||
if (buf[0] != '\0') {
|
||||
if (demangle_name && (buf[0] != '\0')) {
|
||||
demangle(buf, buf, buflen);
|
||||
}
|
||||
return true;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,7 +42,7 @@ public:
|
||||
bool can_decode_C_frame_in_vm() const { return true; }
|
||||
|
||||
bool demangle(const char* symbol, char *buf, int buflen);
|
||||
bool decode(address addr, char *buf, int buflen, int* offset, const char* filepath = NULL);
|
||||
bool decode(address addr, char *buf, int buflen, int* offset, const char* filepath, bool demangle);
|
||||
bool decode(address addr, char *buf, int buflen, int* offset, const void *base) {
|
||||
ShouldNotReachHere();
|
||||
return false;
|
||||
|
117
hotspot/test/compiler/intrinsics/muladd/TestMulAdd.java
Normal file
117
hotspot/test/compiler/intrinsics/muladd/TestMulAdd.java
Normal file
@ -0,0 +1,117 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 8081778
|
||||
* @summary Add C2 x86 intrinsic for BigInteger::mulAdd() method
|
||||
*
|
||||
* @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch
|
||||
* -XX:+IgnoreUnrecognizedVMOptions -XX:-UseSquareToLenIntrinsic -XX:-UseMultiplyToLenIntrinsic
|
||||
* -XX:CompileCommand=dontinline,TestMulAdd::main
|
||||
* -XX:CompileCommand=option,TestMulAdd::base_multiply,ccstr,DisableIntrinsic,_mulAdd
|
||||
* -XX:CompileCommand=option,java.math.BigInteger::multiply,ccstr,DisableIntrinsic,_mulAdd
|
||||
* -XX:CompileCommand=option,java.math.BigInteger::square,ccstr,DisableIntrinsic,_mulAdd
|
||||
* -XX:CompileCommand=option,java.math.BigInteger::squareToLen,ccstr,DisableIntrinsic,_mulAdd
|
||||
* -XX:CompileCommand=option,java.math.BigInteger::mulAdd,ccstr,DisableIntrinsic,_mulAdd
|
||||
* -XX:CompileCommand=inline,java.math.BigInteger::multiply
|
||||
* -XX:CompileCommand=inline,java.math.BigInteger::square
|
||||
* -XX:CompileCommand=inline,java.math.BigInteger::squareToLen
|
||||
* -XX:CompileCommand=inline,java.math.BigInteger::mulAdd TestMulAdd
|
||||
*/
|
||||
|
||||
import java.util.Random;
|
||||
import java.math.*;
|
||||
|
||||
public class TestMulAdd {
|
||||
|
||||
// Avoid intrinsic by preventing inlining multiply() and mulAdd().
|
||||
public static BigInteger base_multiply(BigInteger op1) {
|
||||
return op1.multiply(op1);
|
||||
}
|
||||
|
||||
// Generate mulAdd() intrinsic by inlining multiply().
|
||||
public static BigInteger new_multiply(BigInteger op1) {
|
||||
return op1.multiply(op1);
|
||||
}
|
||||
|
||||
public static boolean bytecompare(BigInteger b1, BigInteger b2) {
|
||||
byte[] data1 = b1.toByteArray();
|
||||
byte[] data2 = b2.toByteArray();
|
||||
if (data1.length != data2.length)
|
||||
return false;
|
||||
for (int i = 0; i < data1.length; i++) {
|
||||
if (data1[i] != data2[i])
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public static String stringify(BigInteger b) {
|
||||
String strout= "";
|
||||
byte [] data = b.toByteArray();
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
strout += (String.format("%02x",data[i]) + " ");
|
||||
}
|
||||
return strout;
|
||||
}
|
||||
|
||||
public static void main(String args[]) throws Exception {
|
||||
|
||||
BigInteger oldsum = new BigInteger("0");
|
||||
BigInteger newsum = new BigInteger("0");
|
||||
|
||||
BigInteger b1, b2, oldres, newres;
|
||||
|
||||
Random rand = new Random();
|
||||
long seed = System.nanoTime();
|
||||
Random rand1 = new Random();
|
||||
long seed1 = System.nanoTime();
|
||||
rand.setSeed(seed);
|
||||
rand1.setSeed(seed1);
|
||||
|
||||
for (int j = 0; j < 100000; j++) {
|
||||
int rand_int = rand1.nextInt(3136)+32;
|
||||
b1 = new BigInteger(rand_int, rand);
|
||||
|
||||
oldres = base_multiply(b1);
|
||||
newres = new_multiply(b1);
|
||||
|
||||
oldsum = oldsum.add(oldres);
|
||||
newsum = newsum.add(newres);
|
||||
|
||||
if (!bytecompare(oldres,newres)) {
|
||||
System.out.print("mismatch for:b1:" + stringify(b1) + " :oldres:" + stringify(oldres) + " :newres:" + stringify(newres));
|
||||
System.out.println(b1);
|
||||
throw new Exception("Failed");
|
||||
}
|
||||
}
|
||||
if (!bytecompare(oldsum,newsum)) {
|
||||
System.out.println("Failure: oldsum:" + stringify(oldsum) + " newsum:" + stringify(newsum));
|
||||
throw new Exception("Failed");
|
||||
} else {
|
||||
System.out.println("Success");
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,114 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 8081778
|
||||
* @summary Add C2 x86 intrinsic for BigInteger::squareToLen() method
|
||||
*
|
||||
* @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch
|
||||
* -XX:CompileCommand=exclude,TestSquareToLen::main
|
||||
* -XX:CompileCommand=option,TestSquareToLen::base_multiply,ccstr,DisableIntrinsic,_squareToLen
|
||||
* -XX:CompileCommand=option,java.math.BigInteger::multiply,ccstr,DisableIntrinsic,_squareToLen
|
||||
* -XX:CompileCommand=option,java.math.BigInteger::square,ccstr,DisableIntrinsic,_squareToLen
|
||||
* -XX:CompileCommand=option,java.math.BigInteger::squareToLen,ccstr,DisableIntrinsic,_squareToLen
|
||||
* -XX:CompileCommand=inline,java.math.BigInteger::multiply
|
||||
* -XX:CompileCommand=inline,java.math.BigInteger::square
|
||||
* -XX:CompileCommand=inline,java.math.BigInteger::squareToLen TestSquareToLen
|
||||
*/
|
||||
|
||||
import java.util.Random;
|
||||
import java.math.*;
|
||||
|
||||
public class TestSquareToLen {
|
||||
|
||||
// Avoid intrinsic by preventing inlining multiply() and squareToLen().
|
||||
public static BigInteger base_multiply(BigInteger op1) {
|
||||
return op1.multiply(op1);
|
||||
}
|
||||
|
||||
// Generate squareToLen() intrinsic by inlining multiply().
|
||||
public static BigInteger new_multiply(BigInteger op1) {
|
||||
return op1.multiply(op1);
|
||||
}
|
||||
|
||||
public static boolean bytecompare(BigInteger b1, BigInteger b2) {
|
||||
byte[] data1 = b1.toByteArray();
|
||||
byte[] data2 = b2.toByteArray();
|
||||
if (data1.length != data2.length)
|
||||
return false;
|
||||
for (int i = 0; i < data1.length; i++) {
|
||||
if (data1[i] != data2[i])
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public static String stringify(BigInteger b) {
|
||||
String strout= "";
|
||||
byte [] data = b.toByteArray();
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
strout += (String.format("%02x",data[i]) + " ");
|
||||
}
|
||||
return strout;
|
||||
}
|
||||
|
||||
public static void main(String args[]) throws Exception {
|
||||
|
||||
BigInteger oldsum = new BigInteger("0");
|
||||
BigInteger newsum = new BigInteger("0");
|
||||
|
||||
BigInteger b1, b2, oldres, newres;
|
||||
|
||||
Random rand = new Random();
|
||||
long seed = System.nanoTime();
|
||||
Random rand1 = new Random();
|
||||
long seed1 = System.nanoTime();
|
||||
rand.setSeed(seed);
|
||||
rand1.setSeed(seed1);
|
||||
|
||||
for (int j = 0; j < 100000; j++) {
|
||||
int rand_int = rand1.nextInt(3136)+32;
|
||||
b1 = new BigInteger(rand_int, rand);
|
||||
|
||||
oldres = base_multiply(b1);
|
||||
newres = new_multiply(b1);
|
||||
|
||||
oldsum = oldsum.add(oldres);
|
||||
newsum = newsum.add(newres);
|
||||
|
||||
if (!bytecompare(oldres,newres)) {
|
||||
System.out.print("mismatch for:b1:" + stringify(b1) + " :oldres:" + stringify(oldres) + " :newres:" + stringify(newres));
|
||||
System.out.println(b1);
|
||||
throw new Exception("Failed");
|
||||
}
|
||||
}
|
||||
if (!bytecompare(oldsum,newsum)) {
|
||||
System.out.println("Failure: oldsum:" + stringify(oldsum) + " newsum:" + stringify(newsum));
|
||||
throw new Exception("Failed");
|
||||
} else {
|
||||
System.out.println("Success");
|
||||
}
|
||||
}
|
||||
}
|
94
hotspot/test/compiler/rangechecks/TestBadFoldCompare.java
Normal file
94
hotspot/test/compiler/rangechecks/TestBadFoldCompare.java
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8085832
|
||||
* @summary x <= 0 || x > 0 wrongly folded as (x-1) >u -1
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestBadFoldCompare
|
||||
*/
|
||||
|
||||
public class TestBadFoldCompare {
|
||||
|
||||
static boolean test1_taken;
|
||||
|
||||
static void helper1(int i, int a, int b, boolean flag) {
|
||||
if (flag) {
|
||||
if (i <= a || i > b) {
|
||||
test1_taken = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void test1(int i, boolean flag) {
|
||||
helper1(i, 0, 0, flag);
|
||||
}
|
||||
|
||||
static boolean test2_taken;
|
||||
|
||||
static void helper2(int i, int a, int b, boolean flag) {
|
||||
if (flag) {
|
||||
if (i > b || i <= a) {
|
||||
test2_taken = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void test2(int i, boolean flag) {
|
||||
helper2(i, 0, 0, flag);
|
||||
}
|
||||
|
||||
static public void main(String[] args) {
|
||||
boolean success = true;
|
||||
|
||||
for (int i = 0; i < 20000; i++) {
|
||||
helper1(5, 0, 10, (i%2)==0);
|
||||
helper1(-1, 0, 10, (i%2)==0);
|
||||
helper1(15, 0, 10, (i%2)==0);
|
||||
test1(0, false);
|
||||
}
|
||||
test1_taken = false;
|
||||
test1(0, true);
|
||||
if (!test1_taken) {
|
||||
System.out.println("Test1 failed");
|
||||
success = false;
|
||||
}
|
||||
|
||||
for (int i = 0; i < 20000; i++) {
|
||||
helper2(5, 0, 10, (i%2)==0);
|
||||
helper2(-1, 0, 10, (i%2)==0);
|
||||
helper2(15, 0, 10, (i%2)==0);
|
||||
test2(0, false);
|
||||
}
|
||||
test2_taken = false;
|
||||
test2(0, true);
|
||||
|
||||
if (!test2_taken) {
|
||||
System.out.println("Test2 failed");
|
||||
success = false;
|
||||
}
|
||||
if (!success) {
|
||||
throw new RuntimeException("Some tests failed");
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,143 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8076110
|
||||
* @summary Redefine running methods that have cached resolution errors
|
||||
* @library /testlibrary
|
||||
* @modules java.instrument
|
||||
* java.base/jdk.internal.org.objectweb.asm
|
||||
* @build RedefineClassHelper
|
||||
* @run main RedefineClassHelper
|
||||
* @run main/othervm -javaagent:redefineagent.jar -XX:TraceRedefineClasses=0x600 RedefineRunningMethodsWithResolutionErrors
|
||||
*/
|
||||
|
||||
import jdk.internal.org.objectweb.asm.ClassWriter;
|
||||
import jdk.internal.org.objectweb.asm.Label;
|
||||
import jdk.internal.org.objectweb.asm.MethodVisitor;
|
||||
import jdk.internal.org.objectweb.asm.Opcodes;
|
||||
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
|
||||
public class RedefineRunningMethodsWithResolutionErrors extends ClassLoader implements Opcodes {
|
||||
|
||||
@Override
|
||||
protected Class<?> findClass(String name) throws ClassNotFoundException {
|
||||
if (name.equals("C")) {
|
||||
byte[] b = loadC(false);
|
||||
return defineClass(name, b, 0, b.length);
|
||||
} else {
|
||||
return super.findClass(name);
|
||||
}
|
||||
}
|
||||
|
||||
private static byte[] loadC(boolean redefine) {
|
||||
ClassWriter cw = new ClassWriter(0);
|
||||
|
||||
cw.visit(52, ACC_SUPER | ACC_PUBLIC, "C", null, "java/lang/Object", null);
|
||||
{
|
||||
MethodVisitor mv;
|
||||
|
||||
mv = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, "m", "()V", null, null);
|
||||
mv.visitCode();
|
||||
|
||||
// First time we run we will:
|
||||
// 1) Cache resolution errors
|
||||
// 2) Redefine the class / method
|
||||
// 3) Try to read the resolution errors that were cached
|
||||
//
|
||||
// The redefined method will never run, throw error to be sure
|
||||
if (redefine) {
|
||||
createThrowRuntimeExceptionCode(mv, "The redefined method was called");
|
||||
} else {
|
||||
createMethodBody(mv);
|
||||
}
|
||||
mv.visitMaxs(3, 0);
|
||||
mv.visitEnd();
|
||||
}
|
||||
cw.visitEnd();
|
||||
return cw.toByteArray();
|
||||
}
|
||||
|
||||
private static void createMethodBody(MethodVisitor mv) {
|
||||
Label classExists = new Label();
|
||||
|
||||
// Cache resolution errors
|
||||
createLoadNonExistentClassCode(mv, classExists);
|
||||
|
||||
// Redefine our own class and method
|
||||
mv.visitMethodInsn(INVOKESTATIC, "RedefineRunningMethodsWithResolutionErrors", "redefine", "()V");
|
||||
|
||||
// Provoke the same error again to make sure the resolution error cache works
|
||||
createLoadNonExistentClassCode(mv, classExists);
|
||||
|
||||
// Test passed
|
||||
mv.visitInsn(RETURN);
|
||||
|
||||
mv.visitFrame(F_SAME, 0, new Object[0], 0, new Object[0]);
|
||||
mv.visitLabel(classExists);
|
||||
|
||||
createThrowRuntimeExceptionCode(mv, "Loaded class that shouldn't exist (\"NonExistentClass\")");
|
||||
}
|
||||
|
||||
private static void createLoadNonExistentClassCode(MethodVisitor mv, Label classExists) {
|
||||
Label tryLoadBegin = new Label();
|
||||
Label tryLoadEnd = new Label();
|
||||
Label catchLoadBlock = new Label();
|
||||
mv.visitTryCatchBlock(tryLoadBegin, tryLoadEnd, catchLoadBlock, "java/lang/NoClassDefFoundError");
|
||||
|
||||
// Try to load a class that does not exist to provoke resolution errors
|
||||
mv.visitLabel(tryLoadBegin);
|
||||
mv.visitMethodInsn(INVOKESTATIC, "NonExistentClass", "nonExistentMethod", "()V");
|
||||
mv.visitLabel(tryLoadEnd);
|
||||
|
||||
// No NoClassDefFoundError means NonExistentClass existed, which shouldn't happen
|
||||
mv.visitJumpInsn(GOTO, classExists);
|
||||
|
||||
mv.visitFrame(F_SAME1, 0, new Object[0], 1, new Object[] { "java/lang/NoClassDefFoundError" });
|
||||
mv.visitLabel(catchLoadBlock);
|
||||
|
||||
// Ignore the expected NoClassDefFoundError
|
||||
mv.visitInsn(POP);
|
||||
}
|
||||
|
||||
private static void createThrowRuntimeExceptionCode(MethodVisitor mv, String msg) {
|
||||
mv.visitTypeInsn(NEW, "java/lang/RuntimeException");
|
||||
mv.visitInsn(DUP);
|
||||
mv.visitLdcInsn(msg);
|
||||
mv.visitMethodInsn(INVOKESPECIAL, "java/lang/RuntimeException", "<init>", "(Ljava/lang/String;)V");
|
||||
mv.visitInsn(ATHROW);
|
||||
}
|
||||
|
||||
private static Class<?> c;
|
||||
|
||||
public static void redefine() throws Exception {
|
||||
RedefineClassHelper.redefineClass(c, loadC(true));
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws ClassNotFoundException, NoSuchMethodException, IllegalAccessException, InvocationTargetException {
|
||||
c = Class.forName("C", true, new RedefineRunningMethodsWithResolutionErrors());
|
||||
c.getMethod("m").invoke(null);
|
||||
}
|
||||
}
|
@ -21,9 +21,13 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import jdk.test.lib.Platform;
|
||||
import jdk.test.lib.ProcessTools;
|
||||
import jdk.test.lib.OutputAnalyzer;
|
||||
import jdk.test.lib.Utils;
|
||||
import jdk.test.lib.apps.LingeredApp;
|
||||
|
||||
/*
|
||||
@ -44,7 +48,10 @@ public class TestClassLoaderStats {
|
||||
|
||||
LingeredApp app = null;
|
||||
try {
|
||||
app = LingeredApp.startApp();
|
||||
List<String> vmArgs = new ArrayList<String>();
|
||||
vmArgs.add("-XX:+UsePerfData");
|
||||
vmArgs.addAll(Utils.getVmOptions());
|
||||
app = LingeredApp.startApp(vmArgs);
|
||||
|
||||
System.out.println("Attaching sun.jvm.hotspot.tools.ClassLoaderStats to " + app.getPid());
|
||||
ProcessBuilder processBuilder = ProcessTools.createJavaProcessBuilder(
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user