Merge
This commit is contained in:
commit
6a3bcf0a75
@ -309,3 +309,4 @@ ea38728b4f4bdd8fd0d7a89b18069f521cf05013 jdk9-b61
|
||||
82cf9aab9a83e41c8194ba01af9666afdb856cbe jdk9-b64
|
||||
7c31f9d7b932f7924f1258d52885b1c7c3e078c2 jdk9-b65
|
||||
dc6e8336f51bb6b67b7245766179eab5ca7720b4 jdk9-b66
|
||||
f546760134eb861fcfecd4ce611b0040b0d25a6a jdk9-b67
|
||||
|
@ -338,14 +338,16 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
|
||||
# no adjustment
|
||||
;;
|
||||
slowdebug )
|
||||
# Add runtime stack smashing and undefined behavior checks
|
||||
CFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
|
||||
CXXFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
|
||||
# Add runtime stack smashing and undefined behavior checks.
|
||||
# Not all versions of gcc support -fstack-protector
|
||||
STACK_PROTECTOR_CFLAG="-fstack-protector-all"
|
||||
FLAGS_COMPILER_CHECK_ARGUMENTS([$STACK_PROTECTOR_CFLAG], [], [STACK_PROTECTOR_CFLAG=""])
|
||||
|
||||
CFLAGS_DEBUG_OPTIONS="$STACK_PROTECTOR_CFLAG --param ssp-buffer-size=1"
|
||||
CXXFLAGS_DEBUG_OPTIONS="$STACK_PROTECTOR_CFLAG --param ssp-buffer-size=1"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
AC_SUBST(CFLAGS_DEBUG_OPTIONS)
|
||||
AC_SUBST(CXXFLAGS_DEBUG_OPTIONS)
|
||||
|
||||
# Optimization levels
|
||||
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
|
@ -718,8 +718,6 @@ C_O_FLAG_DEBUG
|
||||
C_O_FLAG_NORM
|
||||
C_O_FLAG_HI
|
||||
C_O_FLAG_HIGHEST
|
||||
CXXFLAGS_DEBUG_OPTIONS
|
||||
CFLAGS_DEBUG_OPTIONS
|
||||
CXXFLAGS_DEBUG_SYMBOLS
|
||||
CFLAGS_DEBUG_SYMBOLS
|
||||
CXX_FLAG_DEPS
|
||||
@ -4366,7 +4364,7 @@ VS_SDK_PLATFORM_NAME_2013=
|
||||
#CUSTOM_AUTOCONF_INCLUDE
|
||||
|
||||
# Do not change or remove the following line, it is needed for consistency checks:
|
||||
DATE_WHEN_GENERATED=1432629750
|
||||
DATE_WHEN_GENERATED=1433337614
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
@ -41837,14 +41835,80 @@ $as_echo "$ac_cv_c_bigendian" >&6; }
|
||||
# no adjustment
|
||||
;;
|
||||
slowdebug )
|
||||
# Add runtime stack smashing and undefined behavior checks
|
||||
CFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
|
||||
CXXFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
|
||||
;;
|
||||
esac
|
||||
# Add runtime stack smashing and undefined behavior checks.
|
||||
# Not all versions of gcc support -fstack-protector
|
||||
STACK_PROTECTOR_CFLAG="-fstack-protector-all"
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler supports \"$STACK_PROTECTOR_CFLAG\"" >&5
|
||||
$as_echo_n "checking if compiler supports \"$STACK_PROTECTOR_CFLAG\"... " >&6; }
|
||||
supports=yes
|
||||
|
||||
saved_cflags="$CFLAGS"
|
||||
CFLAGS="$CFLAGS $STACK_PROTECTOR_CFLAG"
|
||||
ac_ext=c
|
||||
ac_cpp='$CPP $CPPFLAGS'
|
||||
ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
|
||||
ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
|
||||
ac_compiler_gnu=$ac_cv_c_compiler_gnu
|
||||
|
||||
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
|
||||
/* end confdefs.h. */
|
||||
int i;
|
||||
_ACEOF
|
||||
if ac_fn_c_try_compile "$LINENO"; then :
|
||||
|
||||
else
|
||||
supports=no
|
||||
fi
|
||||
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
|
||||
ac_ext=cpp
|
||||
ac_cpp='$CXXCPP $CPPFLAGS'
|
||||
ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
|
||||
ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
|
||||
ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
|
||||
|
||||
CFLAGS="$saved_cflags"
|
||||
|
||||
saved_cxxflags="$CXXFLAGS"
|
||||
CXXFLAGS="$CXXFLAG $STACK_PROTECTOR_CFLAG"
|
||||
ac_ext=cpp
|
||||
ac_cpp='$CXXCPP $CPPFLAGS'
|
||||
ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
|
||||
ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
|
||||
ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
|
||||
|
||||
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
|
||||
/* end confdefs.h. */
|
||||
int i;
|
||||
_ACEOF
|
||||
if ac_fn_cxx_try_compile "$LINENO"; then :
|
||||
|
||||
else
|
||||
supports=no
|
||||
fi
|
||||
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
|
||||
ac_ext=cpp
|
||||
ac_cpp='$CXXCPP $CPPFLAGS'
|
||||
ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
|
||||
ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
|
||||
ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
|
||||
|
||||
CXXFLAGS="$saved_cxxflags"
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $supports" >&5
|
||||
$as_echo "$supports" >&6; }
|
||||
if test "x$supports" = "xyes" ; then
|
||||
:
|
||||
else
|
||||
STACK_PROTECTOR_CFLAG=""
|
||||
fi
|
||||
|
||||
|
||||
CFLAGS_DEBUG_OPTIONS="$STACK_PROTECTOR_CFLAG --param ssp-buffer-size=1"
|
||||
CXXFLAGS_DEBUG_OPTIONS="$STACK_PROTECTOR_CFLAG --param ssp-buffer-size=1"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Optimization levels
|
||||
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
|
@ -309,3 +309,4 @@ d27f7e0a7aca129969de23e9934408a31b4abf4c jdk9-b62
|
||||
0a5e5a7c3539e8bde73d9fe55750e49a49cb8dac jdk9-b64
|
||||
afc1e295c4bf83f9a5dd539c29914edd4a754a3f jdk9-b65
|
||||
44ee68f7dbacab24a45115fd6a8ccdc7eb6e8f0b jdk9-b66
|
||||
4418697e56f1f43597f55c7cb6573549c6117868 jdk9-b67
|
||||
|
@ -469,3 +469,4 @@ ee878f3d6732856f7725c590312bfbe2ffa52cc7 jdk9-b58
|
||||
bf92b8db249cdfa5651ef954b6c0743a7e0ea4cd jdk9-b64
|
||||
e7ae94c4f35e940ea423fc1dd260435df34a77c0 jdk9-b65
|
||||
197e94e0dacddd16816f101d24fc0442ab518326 jdk9-b66
|
||||
d47dfabd16d48eb96a451edd1b61194a39ee0eb5 jdk9-b67
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -466,6 +466,11 @@ class Address VALUE_OBJ_CLASS_SPEC {
|
||||
case base_plus_offset:
|
||||
{
|
||||
unsigned size = i->get(31, 30);
|
||||
if (i->get(26, 26) && i->get(23, 23)) {
|
||||
// SIMD Q Type - Size = 128 bits
|
||||
assert(size == 0, "bad size");
|
||||
size = 0b100;
|
||||
}
|
||||
unsigned mask = (1 << size) - 1;
|
||||
if (_offset < 0 || _offset & mask)
|
||||
{
|
||||
@ -1888,9 +1893,18 @@ public:
|
||||
};
|
||||
|
||||
enum SIMD_RegVariant {
|
||||
S32, D64, Q128
|
||||
B, H, S, D, Q
|
||||
};
|
||||
|
||||
#define INSN(NAME, op) \
|
||||
void NAME(FloatRegister Rt, SIMD_RegVariant T, const Address &adr) { \
|
||||
ld_st2((Register)Rt, adr, (int)T & 3, op + ((T==Q) ? 0b10:0b00), 1); \
|
||||
} \
|
||||
|
||||
INSN(ldr, 1);
|
||||
INSN(str, 0);
|
||||
|
||||
#undef INSN
|
||||
|
||||
private:
|
||||
|
||||
@ -1997,27 +2011,87 @@ public:
|
||||
rf(Vm, 16), f(0b000111, 15, 10), rf(Vn, 5), rf(Vd, 0); \
|
||||
}
|
||||
|
||||
INSN(eor, 0b101110001);
|
||||
INSN(orr, 0b001110101);
|
||||
INSN(eor, 0b101110001);
|
||||
INSN(orr, 0b001110101);
|
||||
INSN(andr, 0b001110001);
|
||||
INSN(bic, 0b001110011);
|
||||
INSN(bif, 0b101110111);
|
||||
INSN(bit, 0b101110101);
|
||||
INSN(bsl, 0b101110011);
|
||||
INSN(orn, 0b001110111);
|
||||
INSN(bic, 0b001110011);
|
||||
INSN(bif, 0b101110111);
|
||||
INSN(bit, 0b101110101);
|
||||
INSN(bsl, 0b101110011);
|
||||
INSN(orn, 0b001110111);
|
||||
|
||||
#undef INSN
|
||||
|
||||
#define INSN(NAME, opc) \
|
||||
#define INSN(NAME, opc, opc2) \
|
||||
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \
|
||||
starti; \
|
||||
f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \
|
||||
f((int)T >> 1, 23, 22), f(1, 21), rf(Vm, 16), f(0b100001, 15, 10); \
|
||||
f((int)T >> 1, 23, 22), f(1, 21), rf(Vm, 16), f(opc2, 15, 10); \
|
||||
rf(Vn, 5), rf(Vd, 0); \
|
||||
}
|
||||
|
||||
INSN(addv, 0);
|
||||
INSN(subv, 1);
|
||||
INSN(addv, 0, 0b100001);
|
||||
INSN(subv, 1, 0b100001);
|
||||
INSN(mulv, 0, 0b100111);
|
||||
INSN(sshl, 0, 0b010001);
|
||||
INSN(ushl, 1, 0b010001);
|
||||
|
||||
#undef INSN
|
||||
|
||||
#define INSN(NAME, opc, opc2) \
|
||||
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \
|
||||
starti; \
|
||||
f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \
|
||||
f((int)T >> 1, 23, 22), f(opc2, 21, 10); \
|
||||
rf(Vn, 5), rf(Vd, 0); \
|
||||
}
|
||||
|
||||
INSN(absr, 0, 0b100000101110);
|
||||
INSN(negr, 1, 0b100000101110);
|
||||
INSN(notr, 1, 0b100000010110);
|
||||
INSN(addv, 0, 0b110001101110);
|
||||
|
||||
#undef INSN
|
||||
|
||||
#define INSN(NAME, op0, cmode0) \
|
||||
void NAME(FloatRegister Vd, SIMD_Arrangement T, unsigned imm8, unsigned lsl = 0) { \
|
||||
unsigned cmode = cmode0; \
|
||||
unsigned op = op0; \
|
||||
starti; \
|
||||
assert(lsl == 0 || \
|
||||
((T == T4H || T == T8H) && lsl == 8) || \
|
||||
((T == T2S || T == T4S) && ((lsl >> 3) < 4)), "invalid shift"); \
|
||||
cmode |= lsl >> 2; \
|
||||
if (T == T4H || T == T8H) cmode |= 0b1000; \
|
||||
if (!(T == T4H || T == T8H || T == T2S || T == T4S)) { \
|
||||
assert(op == 0 && cmode0 == 0, "must be MOVI"); \
|
||||
cmode = 0b1110; \
|
||||
if (T == T1D || T == T2D) op = 1; \
|
||||
} \
|
||||
f(0, 31), f((int)T & 1, 30), f(op, 29), f(0b0111100000, 28, 19); \
|
||||
f(imm8 >> 5, 18, 16), f(cmode, 15, 12), f(0x01, 11, 10), f(imm8 & 0b11111, 9, 5); \
|
||||
rf(Vd, 0); \
|
||||
}
|
||||
|
||||
INSN(movi, 0, 0);
|
||||
INSN(orri, 0, 1);
|
||||
INSN(mvni, 1, 0);
|
||||
INSN(bici, 1, 1);
|
||||
|
||||
#undef INSN
|
||||
|
||||
#define INSN(NAME, op1, op2, op3) \
|
||||
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \
|
||||
starti; \
|
||||
assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); \
|
||||
f(0, 31), f((int)T & 1, 30), f(op1, 29), f(0b01110, 28, 24), f(op2, 23); \
|
||||
f(T==T2D ? 1:0, 22); f(1, 21), rf(Vm, 16), f(op3, 15, 10), rf(Vn, 5), rf(Vd, 0); \
|
||||
}
|
||||
|
||||
INSN(fadd, 0, 0, 0b110101);
|
||||
INSN(fdiv, 1, 0, 0b111111);
|
||||
INSN(fmul, 1, 0, 0b110111);
|
||||
INSN(fsub, 0, 1, 0b110101);
|
||||
|
||||
#undef INSN
|
||||
|
||||
@ -2064,19 +2138,40 @@ public:
|
||||
|
||||
#undef INSN
|
||||
|
||||
void shl(FloatRegister Vd, FloatRegister Vn, SIMD_Arrangement T, int shift){
|
||||
void ins(FloatRegister Vd, SIMD_RegVariant T, FloatRegister Vn, int didx, int sidx) {
|
||||
starti;
|
||||
/* The encodings for the immh:immb fields (bits 22:16) are
|
||||
* 0001 xxx 8B/16B, shift = xxx
|
||||
* 001x xxx 4H/8H, shift = xxxx
|
||||
* 01xx xxx 2S/4S, shift = xxxxx
|
||||
* 1xxx xxx 1D/2D, shift = xxxxxx (1D is RESERVED)
|
||||
*/
|
||||
assert((1 << ((T>>1)+3)) > shift, "Invalid Shift value");
|
||||
f(0, 31), f(T & 1, 30), f(0b0011110, 29, 23), f((1 << ((T>>1)+3))|shift, 22, 16);
|
||||
f(0b010101, 15, 10), rf(Vn, 5), rf(Vd, 0);
|
||||
assert(T != Q, "invalid register variant");
|
||||
f(0b01101110000, 31, 21), f(((didx<<1)|1)<<(int)T, 20, 16), f(0, 15);
|
||||
f(sidx<<(int)T, 14, 11), f(1, 10), rf(Vn, 5), rf(Vd, 0);
|
||||
}
|
||||
|
||||
void umov(Register Rd, FloatRegister Vn, SIMD_RegVariant T, int idx) {
|
||||
starti;
|
||||
f(0, 31), f(T==D ? 1:0, 30), f(0b001110000, 29, 21);
|
||||
f(((idx<<1)|1)<<(int)T, 20, 16), f(0b001111, 15, 10);
|
||||
rf(Vn, 5), rf(Rd, 0);
|
||||
}
|
||||
|
||||
#define INSN(NAME, opc, opc2) \
|
||||
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int shift){ \
|
||||
starti; \
|
||||
/* The encodings for the immh:immb fields (bits 22:16) are \
|
||||
* 0001 xxx 8B/16B, shift = xxx \
|
||||
* 001x xxx 4H/8H, shift = xxxx \
|
||||
* 01xx xxx 2S/4S, shift = xxxxx \
|
||||
* 1xxx xxx 1D/2D, shift = xxxxxx (1D is RESERVED) \
|
||||
*/ \
|
||||
assert((1 << ((T>>1)+3)) > shift, "Invalid Shift value"); \
|
||||
f(0, 31), f(T & 1, 30), f(opc, 29), f(0b011110, 28, 23), \
|
||||
f((1 << ((T>>1)+3))|shift, 22, 16); f(opc2, 15, 10), rf(Vn, 5), rf(Vd, 0); \
|
||||
}
|
||||
|
||||
INSN(shl, 0, 0b010101);
|
||||
INSN(sshr, 0, 0b000001);
|
||||
INSN(ushr, 1, 0b000001);
|
||||
|
||||
#undef INSN
|
||||
|
||||
void ushll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) {
|
||||
starti;
|
||||
/* The encodings for the immh:immb fields (bits 22:16) are
|
||||
@ -2149,6 +2244,23 @@ public:
|
||||
rf(Vn, 5), rf(Vd, 0);
|
||||
}
|
||||
|
||||
void dup(FloatRegister Vd, SIMD_Arrangement T, Register Xs)
|
||||
{
|
||||
starti;
|
||||
assert(T != T1D, "reserved encoding");
|
||||
f(0,31), f((int)T & 1, 30), f(0b001110000, 29, 21);
|
||||
f((1 << (T >> 1)), 20, 16), f(0b000011, 15, 10), rf(Xs, 5), rf(Vd, 0);
|
||||
}
|
||||
|
||||
void dup(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int index = 0)
|
||||
{
|
||||
starti;
|
||||
assert(T != T1D, "reserved encoding");
|
||||
f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21);
|
||||
f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16);
|
||||
f(0b000001, 15, 10), rf(Vn, 5), rf(Vd, 0);
|
||||
}
|
||||
|
||||
// CRC32 instructions
|
||||
#define INSN(NAME, sf, sz) \
|
||||
void NAME(Register Rd, Register Rn, Register Rm) { \
|
||||
|
@ -64,7 +64,7 @@ define_pd_global(intx, StackShadowPages, 4 DEBUG_ONLY(+5));
|
||||
define_pd_global(intx, PreInflateSpin, 10);
|
||||
|
||||
define_pd_global(bool, RewriteBytecodes, true);
|
||||
define_pd_global(bool, RewriteFrequentPairs, false);
|
||||
define_pd_global(bool, RewriteFrequentPairs, true);
|
||||
|
||||
define_pd_global(bool, UseMembar, true);
|
||||
|
||||
|
@ -2802,8 +2802,8 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
|
||||
uzp2(v21, v20, v16, T2D);
|
||||
eor(v20, T16B, v17, v21);
|
||||
|
||||
shl(v16, v28, T2D, 1);
|
||||
shl(v17, v20, T2D, 1);
|
||||
shl(v16, T2D, v28, 1);
|
||||
shl(v17, T2D, v20, 1);
|
||||
|
||||
eor(v0, T16B, v0, v16);
|
||||
eor(v1, T16B, v1, v17);
|
||||
|
@ -37,6 +37,7 @@ class MacroAssembler: public Assembler {
|
||||
friend class LIR_Assembler;
|
||||
|
||||
using Assembler::mov;
|
||||
using Assembler::movi;
|
||||
|
||||
protected:
|
||||
|
||||
@ -464,6 +465,45 @@ public:
|
||||
|
||||
void movptr(Register r, uintptr_t imm64);
|
||||
|
||||
// Macro to mov replicated immediate to vector register.
|
||||
// Where imm32 == hex abcdefgh, Vd will get the following values
|
||||
// for different arrangements in T
|
||||
// T8B: Vd = ghghghghghghghgh
|
||||
// T16B: Vd = ghghghghghghghghghghghghghghghgh
|
||||
// T4H: Vd = efghefghefghefgh
|
||||
// T8H: Vd = efghefghefghefghefghefghefghefgh
|
||||
// T2S: Vd = abcdefghabcdefgh
|
||||
// T4S: Vd = abcdefghabcdefghabcdefghabcdefgh
|
||||
// T1D/T2D: invalid
|
||||
void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) {
|
||||
assert(T != T1D && T != T2D, "invalid arrangement");
|
||||
u_int32_t nimm32 = ~imm32;
|
||||
if (T == T8B || T == T16B) { imm32 &= 0xff; nimm32 &= 0xff; }
|
||||
if (T == T4H || T == T8H) { imm32 &= 0xffff; nimm32 &= 0xffff; }
|
||||
u_int32_t x = imm32;
|
||||
int movi_cnt = 0;
|
||||
int movn_cnt = 0;
|
||||
while (x) { if (x & 0xff) movi_cnt++; x >>= 8; }
|
||||
x = nimm32;
|
||||
while (x) { if (x & 0xff) movn_cnt++; x >>= 8; }
|
||||
if (movn_cnt < movi_cnt) imm32 = nimm32;
|
||||
unsigned lsl = 0;
|
||||
while (imm32 && (imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; }
|
||||
if (movn_cnt < movi_cnt)
|
||||
mvni(Vd, T, imm32 & 0xff, lsl);
|
||||
else
|
||||
movi(Vd, T, imm32 & 0xff, lsl);
|
||||
imm32 >>= 8; lsl += 8;
|
||||
while (imm32) {
|
||||
while ((imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; }
|
||||
if (movn_cnt < movi_cnt)
|
||||
bici(Vd, T, imm32 & 0xff, lsl);
|
||||
else
|
||||
orri(Vd, T, imm32 & 0xff, lsl);
|
||||
lsl += 8; imm32 >>= 8;
|
||||
}
|
||||
}
|
||||
|
||||
// macro instructions for accessing and updating floating point
|
||||
// status register
|
||||
//
|
||||
|
@ -186,7 +186,7 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl {
|
||||
// it's optoregs.
|
||||
|
||||
number_of_registers = (2 * RegisterImpl::number_of_registers +
|
||||
2 * FloatRegisterImpl::number_of_registers +
|
||||
4 * FloatRegisterImpl::number_of_registers +
|
||||
1) // flags
|
||||
};
|
||||
|
||||
|
@ -513,23 +513,61 @@ void TemplateTable::nofast_iload() {
|
||||
void TemplateTable::iload_internal(RewriteControl rc) {
|
||||
transition(vtos, itos);
|
||||
if (RewriteFrequentPairs && rc == may_rewrite) {
|
||||
// TODO : check x86 code for what to do here
|
||||
__ call_Unimplemented();
|
||||
} else {
|
||||
locals_index(r1);
|
||||
__ ldr(r0, iaddress(r1));
|
||||
Label rewrite, done;
|
||||
Register bc = r4;
|
||||
|
||||
// get next bytecode
|
||||
__ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
|
||||
|
||||
// if _iload, wait to rewrite to iload2. We only want to rewrite the
|
||||
// last two iloads in a pair. Comparing against fast_iload means that
|
||||
// the next bytecode is neither an iload or a caload, and therefore
|
||||
// an iload pair.
|
||||
__ cmpw(r1, Bytecodes::_iload);
|
||||
__ br(Assembler::EQ, done);
|
||||
|
||||
// if _fast_iload rewrite to _fast_iload2
|
||||
__ cmpw(r1, Bytecodes::_fast_iload);
|
||||
__ movw(bc, Bytecodes::_fast_iload2);
|
||||
__ br(Assembler::EQ, rewrite);
|
||||
|
||||
// if _caload rewrite to _fast_icaload
|
||||
__ cmpw(r1, Bytecodes::_caload);
|
||||
__ movw(bc, Bytecodes::_fast_icaload);
|
||||
__ br(Assembler::EQ, rewrite);
|
||||
|
||||
// else rewrite to _fast_iload
|
||||
__ movw(bc, Bytecodes::_fast_iload);
|
||||
|
||||
// rewrite
|
||||
// bc: new bytecode
|
||||
__ bind(rewrite);
|
||||
patch_bytecode(Bytecodes::_iload, bc, r1, false);
|
||||
__ bind(done);
|
||||
|
||||
}
|
||||
|
||||
// do iload, get the local value into tos
|
||||
locals_index(r1);
|
||||
__ ldr(r0, iaddress(r1));
|
||||
|
||||
}
|
||||
|
||||
void TemplateTable::fast_iload2()
|
||||
{
|
||||
__ call_Unimplemented();
|
||||
transition(vtos, itos);
|
||||
locals_index(r1);
|
||||
__ ldr(r0, iaddress(r1));
|
||||
__ push(itos);
|
||||
locals_index(r1, 3);
|
||||
__ ldr(r0, iaddress(r1));
|
||||
}
|
||||
|
||||
void TemplateTable::fast_iload()
|
||||
{
|
||||
__ call_Unimplemented();
|
||||
transition(vtos, itos);
|
||||
locals_index(r1);
|
||||
__ ldr(r0, iaddress(r1));
|
||||
}
|
||||
|
||||
void TemplateTable::lload()
|
||||
@ -721,7 +759,18 @@ void TemplateTable::caload()
|
||||
// iload followed by caload frequent pair
|
||||
void TemplateTable::fast_icaload()
|
||||
{
|
||||
__ call_Unimplemented();
|
||||
transition(vtos, itos);
|
||||
// load index out of locals
|
||||
locals_index(r2);
|
||||
__ ldr(r1, iaddress(r2));
|
||||
|
||||
__ pop_ptr(r0);
|
||||
|
||||
// r0: array
|
||||
// r1: index
|
||||
index_check(r0, r1); // leaves index in r1, kills rscratch1
|
||||
__ lea(r1, Address(r0, r1, Address::uxtw(1)));
|
||||
__ load_unsigned_short(r0, Address(r1, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
|
||||
}
|
||||
|
||||
void TemplateTable::saload()
|
||||
@ -797,7 +846,47 @@ void TemplateTable::aload_0_internal(RewriteControl rc) {
|
||||
// These bytecodes with a small amount of code are most profitable
|
||||
// to rewrite
|
||||
if (RewriteFrequentPairs && rc == may_rewrite) {
|
||||
__ call_Unimplemented();
|
||||
Label rewrite, done;
|
||||
const Register bc = r4;
|
||||
|
||||
// get next bytecode
|
||||
__ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
|
||||
|
||||
// do actual aload_0
|
||||
aload(0);
|
||||
|
||||
// if _getfield then wait with rewrite
|
||||
__ cmpw(r1, Bytecodes::Bytecodes::_getfield);
|
||||
__ br(Assembler::EQ, done);
|
||||
|
||||
// if _igetfield then reqrite to _fast_iaccess_0
|
||||
assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
|
||||
__ cmpw(r1, Bytecodes::_fast_igetfield);
|
||||
__ movw(bc, Bytecodes::_fast_iaccess_0);
|
||||
__ br(Assembler::EQ, rewrite);
|
||||
|
||||
// if _agetfield then reqrite to _fast_aaccess_0
|
||||
assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
|
||||
__ cmpw(r1, Bytecodes::_fast_agetfield);
|
||||
__ movw(bc, Bytecodes::_fast_aaccess_0);
|
||||
__ br(Assembler::EQ, rewrite);
|
||||
|
||||
// if _fgetfield then reqrite to _fast_faccess_0
|
||||
assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
|
||||
__ cmpw(r1, Bytecodes::_fast_fgetfield);
|
||||
__ movw(bc, Bytecodes::_fast_faccess_0);
|
||||
__ br(Assembler::EQ, rewrite);
|
||||
|
||||
// else rewrite to _fast_aload0
|
||||
assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
|
||||
__ movw(bc, Bytecodes::Bytecodes::_fast_aload_0);
|
||||
|
||||
// rewrite
|
||||
// bc: new bytecode
|
||||
__ bind(rewrite);
|
||||
patch_bytecode(Bytecodes::_aload_0, bc, r1, false);
|
||||
|
||||
__ bind(done);
|
||||
} else {
|
||||
aload(0);
|
||||
}
|
||||
|
@ -3768,7 +3768,7 @@ HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define EXIT_TIMEOUT PRODUCT_ONLY(1000) NOT_PRODUCT(4000) /* 1 sec in product, 4 sec in debug */
|
||||
#define EXIT_TIMEOUT 300000 /* 5 minutes */
|
||||
|
||||
static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
|
||||
InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
|
||||
|
@ -1469,7 +1469,9 @@ void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr p
|
||||
} else {
|
||||
guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
|
||||
"Assumption");
|
||||
flag_type = T_BYTE;
|
||||
// Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
|
||||
// need to use unsigned instructions to use the large offset to load the satb_mark_queue.
|
||||
flag_type = T_BOOLEAN;
|
||||
}
|
||||
LIR_Opr thrd = getThreadPointer();
|
||||
LIR_Address* mark_active_flag_addr =
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
|
||||
|
||||
#include "gc/shared/genOopClosures.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
@ -641,6 +641,7 @@ void CompactibleFreeListSpace::set_end(HeapWord* value) {
|
||||
class FreeListSpace_DCTOC : public Filtering_DCTOC {
|
||||
CompactibleFreeListSpace* _cfls;
|
||||
CMSCollector* _collector;
|
||||
bool _parallel;
|
||||
protected:
|
||||
// Override.
|
||||
#define walk_mem_region_with_cl_DECL(ClosureType) \
|
||||
@ -661,9 +662,10 @@ public:
|
||||
CMSCollector* collector,
|
||||
ExtendedOopClosure* cl,
|
||||
CardTableModRefBS::PrecisionStyle precision,
|
||||
HeapWord* boundary) :
|
||||
HeapWord* boundary,
|
||||
bool parallel) :
|
||||
Filtering_DCTOC(sp, cl, precision, boundary),
|
||||
_cfls(sp), _collector(collector) {}
|
||||
_cfls(sp), _collector(collector), _parallel(parallel) {}
|
||||
};
|
||||
|
||||
// We de-virtualize the block-related calls below, since we know that our
|
||||
@ -674,10 +676,7 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,
|
||||
HeapWord* bottom, \
|
||||
HeapWord* top, \
|
||||
ClosureType* cl) { \
|
||||
bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0; \
|
||||
if (is_par) { \
|
||||
assert(GenCollectedHeap::heap()->n_par_threads() == \
|
||||
GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \
|
||||
if (_parallel) { \
|
||||
walk_mem_region_with_cl_par(mr, bottom, top, cl); \
|
||||
} else { \
|
||||
walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
|
||||
@ -747,8 +746,9 @@ FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
|
||||
DirtyCardToOopClosure*
|
||||
CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
|
||||
CardTableModRefBS::PrecisionStyle precision,
|
||||
HeapWord* boundary) {
|
||||
return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
|
||||
HeapWord* boundary,
|
||||
bool parallel) {
|
||||
return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary, parallel);
|
||||
}
|
||||
|
||||
|
||||
@ -1897,11 +1897,9 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
|
||||
assert(chunk->is_free() && ffc->is_free(), "Error");
|
||||
_bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
|
||||
if (rem_sz < SmallForDictionary) {
|
||||
bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
|
||||
// The freeList lock is held, but multiple GC task threads might be executing in parallel.
|
||||
bool is_par = Thread::current()->is_GC_task_thread();
|
||||
if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
|
||||
assert(!is_par ||
|
||||
(GenCollectedHeap::heap()->n_par_threads() ==
|
||||
GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
|
||||
returnChunkToFreeList(ffc);
|
||||
split(size, rem_sz);
|
||||
if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
|
||||
@ -1972,8 +1970,6 @@ void CompactibleFreeListSpace::save_marks() {
|
||||
|
||||
bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
|
||||
assert(_promoInfo.tracking(), "No preceding save_marks?");
|
||||
assert(GenCollectedHeap::heap()->n_par_threads() == 0,
|
||||
"Shouldn't be called if using parallel gc.");
|
||||
return _promoInfo.noPromotions();
|
||||
}
|
||||
|
||||
@ -1981,8 +1977,6 @@ bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
|
||||
\
|
||||
void CompactibleFreeListSpace:: \
|
||||
oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
|
||||
assert(GenCollectedHeap::heap()->n_par_threads() == 0, \
|
||||
"Shouldn't be called (yet) during parallel part of gc."); \
|
||||
_promoInfo.promoted_oops_iterate##nv_suffix(blk); \
|
||||
/* \
|
||||
* This also restores any displaced headers and removes the elements from \
|
||||
|
@ -438,7 +438,8 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||
// Override: provides a DCTO_CL specific to this kind of space.
|
||||
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
|
||||
CardTableModRefBS::PrecisionStyle precision,
|
||||
HeapWord* boundary);
|
||||
HeapWord* boundary,
|
||||
bool parallel);
|
||||
|
||||
void blk_iterate(BlkClosure* cl);
|
||||
void blk_iterate_careful(BlkClosureCareful* cl);
|
||||
|
@ -2428,14 +2428,18 @@ void CMSCollector::verify_after_remark_work_1() {
|
||||
MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
|
||||
gch->gen_process_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
NULL,
|
||||
NULL); // SSS: Provide correct closure
|
||||
{
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
NULL,
|
||||
NULL);
|
||||
}
|
||||
|
||||
// Now mark from the roots
|
||||
MarkFromRootsClosure markFromRootsClosure(this, _span,
|
||||
@ -2496,14 +2500,18 @@ void CMSCollector::verify_after_remark_work_2() {
|
||||
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
|
||||
gch->gen_process_roots(_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
NULL,
|
||||
&cld_closure);
|
||||
{
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
NULL,
|
||||
&cld_closure);
|
||||
}
|
||||
|
||||
// Now mark from the roots
|
||||
MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
|
||||
@ -2913,10 +2921,11 @@ class CMSParMarkTask : public AbstractGangTask {
|
||||
|
||||
// Parallel initial mark task
|
||||
class CMSParInitialMarkTask: public CMSParMarkTask {
|
||||
StrongRootsScope* _strong_roots_scope;
|
||||
public:
|
||||
CMSParInitialMarkTask(CMSCollector* collector, uint n_workers) :
|
||||
CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
|
||||
collector, n_workers) {}
|
||||
CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
|
||||
CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
|
||||
_strong_roots_scope(strong_roots_scope) {}
|
||||
void work(uint worker_id);
|
||||
};
|
||||
|
||||
@ -3004,24 +3013,26 @@ void CMSCollector::checkpointRootsInitialWork() {
|
||||
FlexibleWorkGang* workers = gch->workers();
|
||||
assert(workers != NULL, "Need parallel worker threads.");
|
||||
uint n_workers = workers->active_workers();
|
||||
CMSParInitialMarkTask tsk(this, n_workers);
|
||||
gch->set_par_threads(n_workers);
|
||||
|
||||
StrongRootsScope srs(n_workers);
|
||||
|
||||
CMSParInitialMarkTask tsk(this, &srs, n_workers);
|
||||
initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
|
||||
if (n_workers > 1) {
|
||||
StrongRootsScope srs;
|
||||
workers->run_task(&tsk);
|
||||
} else {
|
||||
StrongRootsScope srs;
|
||||
tsk.work(0);
|
||||
}
|
||||
gch->set_par_threads(0);
|
||||
} else {
|
||||
// The serial version.
|
||||
CLDToOopClosure cld_closure(¬Older, true);
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
gch->gen_process_roots(_cmsGen->level(),
|
||||
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
_cmsGen->level(),
|
||||
true, // younger gens are roots
|
||||
true, // activate StrongRootsScope
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
¬Older,
|
||||
@ -4452,9 +4463,9 @@ void CMSParInitialMarkTask::work(uint worker_id) {
|
||||
|
||||
CLDToOopClosure cld_closure(&par_mri_cl, true);
|
||||
|
||||
gch->gen_process_roots(_collector->_cmsGen->level(),
|
||||
gch->gen_process_roots(_strong_roots_scope,
|
||||
_collector->_cmsGen->level(),
|
||||
false, // yg was scanned above
|
||||
false, // this is parallel code
|
||||
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
||||
_collector->should_unload_classes(),
|
||||
&par_mri_cl,
|
||||
@ -4478,6 +4489,7 @@ class CMSParRemarkTask: public CMSParMarkTask {
|
||||
// The per-thread work queues, available here for stealing.
|
||||
OopTaskQueueSet* _task_queues;
|
||||
ParallelTaskTerminator _term;
|
||||
StrongRootsScope* _strong_roots_scope;
|
||||
|
||||
public:
|
||||
// A value of 0 passed to n_workers will cause the number of
|
||||
@ -4485,12 +4497,14 @@ class CMSParRemarkTask: public CMSParMarkTask {
|
||||
CMSParRemarkTask(CMSCollector* collector,
|
||||
CompactibleFreeListSpace* cms_space,
|
||||
uint n_workers, FlexibleWorkGang* workers,
|
||||
OopTaskQueueSet* task_queues):
|
||||
OopTaskQueueSet* task_queues,
|
||||
StrongRootsScope* strong_roots_scope):
|
||||
CMSParMarkTask("Rescan roots and grey objects in parallel",
|
||||
collector, n_workers),
|
||||
_cms_space(cms_space),
|
||||
_task_queues(task_queues),
|
||||
_term(n_workers, task_queues) { }
|
||||
_term(n_workers, task_queues),
|
||||
_strong_roots_scope(strong_roots_scope) { }
|
||||
|
||||
OopTaskQueueSet* task_queues() { return _task_queues; }
|
||||
|
||||
@ -4588,9 +4602,9 @@ void CMSParRemarkTask::work(uint worker_id) {
|
||||
// ---------- remaining roots --------------
|
||||
_timer.reset();
|
||||
_timer.start();
|
||||
gch->gen_process_roots(_collector->_cmsGen->level(),
|
||||
gch->gen_process_roots(_strong_roots_scope,
|
||||
_collector->_cmsGen->level(),
|
||||
false, // yg was scanned above
|
||||
false, // this is parallel code
|
||||
GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
|
||||
_collector->should_unload_classes(),
|
||||
&par_mrias_cl,
|
||||
@ -5058,22 +5072,15 @@ void CMSCollector::do_remark_parallel() {
|
||||
FlexibleWorkGang* workers = gch->workers();
|
||||
assert(workers != NULL, "Need parallel worker threads.");
|
||||
// Choose to use the number of GC workers most recently set
|
||||
// into "active_workers". If active_workers is not set, set it
|
||||
// to ParallelGCThreads.
|
||||
// into "active_workers".
|
||||
uint n_workers = workers->active_workers();
|
||||
if (n_workers == 0) {
|
||||
assert(n_workers > 0, "Should have been set during scavenge");
|
||||
n_workers = ParallelGCThreads;
|
||||
workers->set_active_workers(n_workers);
|
||||
}
|
||||
|
||||
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
|
||||
|
||||
CMSParRemarkTask tsk(this,
|
||||
cms_space,
|
||||
n_workers, workers, task_queues());
|
||||
StrongRootsScope srs(n_workers);
|
||||
|
||||
CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
|
||||
|
||||
// Set up for parallel process_roots work.
|
||||
gch->set_par_threads(n_workers);
|
||||
// We won't be iterating over the cards in the card table updating
|
||||
// the younger_gen cards, so we shouldn't call the following else
|
||||
// the verification code as well as subsequent younger_refs_iterate
|
||||
@ -5105,15 +5112,12 @@ void CMSCollector::do_remark_parallel() {
|
||||
// necessarily be so, since it's possible that we are doing
|
||||
// ST marking.
|
||||
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
|
||||
StrongRootsScope srs;
|
||||
workers->run_task(&tsk);
|
||||
} else {
|
||||
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
|
||||
StrongRootsScope srs;
|
||||
tsk.work(0);
|
||||
}
|
||||
|
||||
gch->set_par_threads(0); // 0 ==> non-parallel.
|
||||
// restore, single-threaded for now, any preserved marks
|
||||
// as a result of work_q overflow
|
||||
restore_preserved_marks_if_any();
|
||||
@ -5177,11 +5181,11 @@ void CMSCollector::do_remark_non_parallel() {
|
||||
verify_work_stacks_empty();
|
||||
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
StrongRootsScope srs;
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(_cmsGen->level(),
|
||||
gch->gen_process_roots(&srs,
|
||||
_cmsGen->level(),
|
||||
true, // younger gens as roots
|
||||
false, // use the local StrongRootsScope
|
||||
GenCollectedHeap::ScanningOption(roots_scanning_options()),
|
||||
should_unload_classes(),
|
||||
&mrias_cl,
|
||||
@ -5254,18 +5258,14 @@ public:
|
||||
CMSBitMap* mark_bit_map,
|
||||
AbstractWorkGang* workers,
|
||||
OopTaskQueueSet* task_queues):
|
||||
// XXX Should superclass AGTWOQ also know about AWG since it knows
|
||||
// about the task_queues used by the AWG? Then it could initialize
|
||||
// the terminator() object. See 6984287. The set_for_termination()
|
||||
// below is a temporary band-aid for the regression in 6984287.
|
||||
AbstractGangTaskWOopQueues("Process referents by policy in parallel",
|
||||
task_queues),
|
||||
task_queues,
|
||||
workers->active_workers()),
|
||||
_task(task),
|
||||
_collector(collector), _span(span), _mark_bit_map(mark_bit_map)
|
||||
{
|
||||
assert(_collector->_span.equals(_span) && !_span.is_empty(),
|
||||
"Inconsistency in _span");
|
||||
set_for_termination(workers->active_workers());
|
||||
}
|
||||
|
||||
OopTaskQueueSet* task_queues() { return queues(); }
|
||||
|
@ -39,16 +39,11 @@
|
||||
void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
|
||||
OopsInGenClosure* cl,
|
||||
CardTableRS* ct,
|
||||
int n_threads) {
|
||||
assert(n_threads > 0, "Error: expected n_threads > 0");
|
||||
assert((n_threads == 1 && ParallelGCThreads == 0) ||
|
||||
n_threads <= (int)ParallelGCThreads,
|
||||
"# worker threads != # requested!");
|
||||
assert(!Thread::current()->is_VM_thread() || (n_threads == 1), "There is only 1 VM thread");
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
!FLAG_IS_DEFAULT(ParallelGCThreads) ||
|
||||
n_threads == (int)ParallelGCThreads,
|
||||
"# worker threads != # requested!");
|
||||
uint n_threads) {
|
||||
assert(n_threads > 0, "expected n_threads > 0");
|
||||
assert(n_threads <= ParallelGCThreads,
|
||||
err_msg("n_threads: %u > ParallelGCThreads: " UINTX_FORMAT, n_threads, ParallelGCThreads));
|
||||
|
||||
// Make sure the LNC array is valid for the space.
|
||||
jbyte** lowest_non_clean;
|
||||
uintptr_t lowest_non_clean_base_chunk_index;
|
||||
@ -66,7 +61,8 @@ void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegio
|
||||
|
||||
uint stride = 0;
|
||||
while (!pst->is_task_claimed(/* reference */ stride)) {
|
||||
process_stride(sp, mr, stride, n_strides, cl, ct,
|
||||
process_stride(sp, mr, stride, n_strides,
|
||||
cl, ct,
|
||||
lowest_non_clean,
|
||||
lowest_non_clean_base_chunk_index,
|
||||
lowest_non_clean_chunk_size);
|
||||
@ -132,9 +128,13 @@ process_stride(Space* sp,
|
||||
assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
|
||||
assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
|
||||
|
||||
// This function is used by the parallel card table iteration.
|
||||
const bool parallel = true;
|
||||
|
||||
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
|
||||
cl->gen_boundary());
|
||||
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
|
||||
cl->gen_boundary(),
|
||||
parallel);
|
||||
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
|
||||
|
||||
|
||||
// Process the chunk.
|
||||
|
@ -567,23 +567,15 @@ void ParEvacuateFollowersClosure::do_void() {
|
||||
}
|
||||
|
||||
ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen,
|
||||
HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
|
||||
HeapWord* young_old_boundary, ParScanThreadStateSet* state_set,
|
||||
StrongRootsScope* strong_roots_scope) :
|
||||
AbstractGangTask("ParNewGeneration collection"),
|
||||
_gen(gen), _old_gen(old_gen),
|
||||
_young_old_boundary(young_old_boundary),
|
||||
_state_set(state_set)
|
||||
_state_set(state_set),
|
||||
_strong_roots_scope(strong_roots_scope)
|
||||
{}
|
||||
|
||||
// Reset the terminator for the given number of
|
||||
// active threads.
|
||||
void ParNewGenTask::set_for_termination(uint active_workers) {
|
||||
_state_set->reset(active_workers, _gen->promotion_failed());
|
||||
// Should the heap be passed in? There's only 1 for now so
|
||||
// grab it instead.
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
gch->set_n_termination(active_workers);
|
||||
}
|
||||
|
||||
void ParNewGenTask::work(uint worker_id) {
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
// Since this is being done in a separate thread, need new resource
|
||||
@ -603,10 +595,10 @@ void ParNewGenTask::work(uint worker_id) {
|
||||
false);
|
||||
|
||||
par_scan_state.start_strong_roots();
|
||||
gch->gen_process_roots(_gen->level(),
|
||||
gch->gen_process_roots(_strong_roots_scope,
|
||||
_gen->level(),
|
||||
true, // Process younger gens, if any,
|
||||
// as strong roots.
|
||||
false, // no scope; this is parallel code
|
||||
GenCollectedHeap::SO_ScavengeCodeCache,
|
||||
GenCollectedHeap::StrongAndWeakRoots,
|
||||
&par_scan_state.to_space_root_closure(),
|
||||
@ -759,9 +751,6 @@ public:
|
||||
|
||||
private:
|
||||
virtual void work(uint worker_id);
|
||||
virtual void set_for_termination(uint active_workers) {
|
||||
_state_set.terminator()->reset_for_reuse(active_workers);
|
||||
}
|
||||
private:
|
||||
ParNewGeneration& _gen;
|
||||
ProcessTask& _task;
|
||||
@ -838,7 +827,6 @@ void ParNewRefProcTaskExecutor::set_single_threaded_mode()
|
||||
{
|
||||
_state_set.flush();
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
gch->set_par_threads(0); // 0 ==> non-parallel.
|
||||
gch->save_marks();
|
||||
}
|
||||
|
||||
@ -939,33 +927,35 @@ void ParNewGeneration::collect(bool full,
|
||||
to()->clear(SpaceDecorator::Mangle);
|
||||
|
||||
gch->save_marks();
|
||||
assert(workers != NULL, "Need parallel worker threads.");
|
||||
uint n_workers = active_workers;
|
||||
|
||||
// Set the correct parallelism (number of queues) in the reference processor
|
||||
ref_processor()->set_active_mt_degree(n_workers);
|
||||
ref_processor()->set_active_mt_degree(active_workers);
|
||||
|
||||
// Always set the terminator for the active number of workers
|
||||
// because only those workers go through the termination protocol.
|
||||
ParallelTaskTerminator _term(n_workers, task_queues());
|
||||
ParScanThreadStateSet thread_state_set(workers->active_workers(),
|
||||
ParallelTaskTerminator _term(active_workers, task_queues());
|
||||
ParScanThreadStateSet thread_state_set(active_workers,
|
||||
*to(), *this, *_old_gen, *task_queues(),
|
||||
_overflow_stacks, desired_plab_sz(), _term);
|
||||
|
||||
ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set);
|
||||
gch->set_par_threads(n_workers);
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(true);
|
||||
// It turns out that even when we're using 1 thread, doing the work in a
|
||||
// separate thread causes wide variance in run times. We can't help this
|
||||
// in the multi-threaded case, but we special-case n=1 here to get
|
||||
// repeatable measurements of the 1-thread overhead of the parallel code.
|
||||
if (n_workers > 1) {
|
||||
StrongRootsScope srs;
|
||||
workers->run_task(&tsk);
|
||||
} else {
|
||||
StrongRootsScope srs;
|
||||
tsk.work(0);
|
||||
thread_state_set.reset(active_workers, promotion_failed());
|
||||
|
||||
{
|
||||
StrongRootsScope srs(active_workers);
|
||||
|
||||
ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(true);
|
||||
// It turns out that even when we're using 1 thread, doing the work in a
|
||||
// separate thread causes wide variance in run times. We can't help this
|
||||
// in the multi-threaded case, but we special-case n=1 here to get
|
||||
// repeatable measurements of the 1-thread overhead of the parallel code.
|
||||
if (active_workers > 1) {
|
||||
workers->run_task(&tsk);
|
||||
} else {
|
||||
tsk.work(0);
|
||||
}
|
||||
}
|
||||
|
||||
thread_state_set.reset(0 /* Bad value in debug if not reset */,
|
||||
promotion_failed());
|
||||
|
||||
@ -995,7 +985,6 @@ void ParNewGeneration::collect(bool full,
|
||||
_gc_timer, _gc_tracer.gc_id());
|
||||
} else {
|
||||
thread_state_set.flush();
|
||||
gch->set_par_threads(0); // 0 ==> non-parallel.
|
||||
gch->save_marks();
|
||||
stats = rp->process_discovered_references(&is_alive, &keep_alive,
|
||||
&evacuate_followers, NULL,
|
||||
@ -1033,7 +1022,7 @@ void ParNewGeneration::collect(bool full,
|
||||
to()->set_concurrent_iteration_safe_limit(to()->top());
|
||||
|
||||
if (ResizePLAB) {
|
||||
plab_stats()->adjust_desired_plab_sz(n_workers);
|
||||
plab_stats()->adjust_desired_plab_sz(active_workers);
|
||||
}
|
||||
|
||||
if (PrintGC && !PrintGCDetails) {
|
||||
@ -1477,9 +1466,9 @@ void ParNewGeneration::ref_processor_init() {
|
||||
_ref_processor =
|
||||
new ReferenceProcessor(_reserved, // span
|
||||
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
|
||||
(int) ParallelGCThreads, // mt processing degree
|
||||
(uint) ParallelGCThreads, // mt processing degree
|
||||
refs_discovery_is_mt(), // mt discovery
|
||||
(int) ParallelGCThreads, // mt discovery degree
|
||||
(uint) ParallelGCThreads, // mt discovery degree
|
||||
refs_discovery_is_atomic(), // atomic_discovery
|
||||
NULL); // is_alive_non_header
|
||||
}
|
||||
|
@ -39,6 +39,7 @@ class ParScanWithBarrierClosure;
|
||||
class ParRootScanWithoutBarrierClosure;
|
||||
class ParRootScanWithBarrierTwoGensClosure;
|
||||
class ParEvacuateFollowersClosure;
|
||||
class StrongRootsScope;
|
||||
|
||||
// It would be better if these types could be kept local to the .cpp file,
|
||||
// but they must be here to allow ParScanClosure::do_oop_work to be defined
|
||||
@ -237,20 +238,18 @@ class ParNewGenTask: public AbstractGangTask {
|
||||
Generation* _old_gen;
|
||||
HeapWord* _young_old_boundary;
|
||||
class ParScanThreadStateSet* _state_set;
|
||||
StrongRootsScope* _strong_roots_scope;
|
||||
|
||||
public:
|
||||
ParNewGenTask(ParNewGeneration* gen,
|
||||
Generation* old_gen,
|
||||
HeapWord* young_old_boundary,
|
||||
ParScanThreadStateSet* state_set);
|
||||
ParScanThreadStateSet* state_set,
|
||||
StrongRootsScope* strong_roots_scope);
|
||||
|
||||
HeapWord* young_old_boundary() { return _young_old_boundary; }
|
||||
|
||||
void work(uint worker_id);
|
||||
|
||||
// Reset the terminator in ParScanThreadStateSet for
|
||||
// "active_workers" threads.
|
||||
virtual void set_for_termination(uint active_workers);
|
||||
};
|
||||
|
||||
class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_GC_CMS_PAROOPCLOSURES_HPP
|
||||
|
||||
#include "gc/shared/genOopClosures.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
#include "memory/padded.hpp"
|
||||
|
||||
// Closures for ParNewGeneration
|
||||
|
@ -147,6 +147,13 @@ public:
|
||||
bool completed() const { return _status == COMPLETED; }
|
||||
bool aborted() const { return _status == ABORTED; }
|
||||
bool active() const { return _status == ACTIVE; }
|
||||
|
||||
// This method configures the task for proper termination.
|
||||
// Some tasks do not have any requirements on termination
|
||||
// and may inherit this method that does nothing. Some
|
||||
// tasks do some coordination on termination and override
|
||||
// this method to implement that coordination.
|
||||
virtual void set_for_termination(uint active_workers) {}
|
||||
};
|
||||
// Class YieldingWorkGang: A subclass of WorkGang.
|
||||
// In particular, a YieldingWorkGang is made up of
|
||||
|
@ -158,20 +158,10 @@ void CollectionSetChooser::add_region(HeapRegion* hr) {
|
||||
hr->calc_gc_efficiency();
|
||||
}
|
||||
|
||||
void CollectionSetChooser::prepare_for_par_region_addition(uint n_regions,
|
||||
void CollectionSetChooser::prepare_for_par_region_addition(uint n_threads,
|
||||
uint n_regions,
|
||||
uint chunk_size) {
|
||||
_first_par_unreserved_idx = 0;
|
||||
uint n_threads = (uint) ParallelGCThreads;
|
||||
if (UseDynamicNumberOfGCThreads) {
|
||||
assert(G1CollectedHeap::heap()->workers()->active_workers() > 0,
|
||||
"Should have been set earlier");
|
||||
// This is defensive code. As the assertion above says, the number
|
||||
// of active threads should be > 0, but in case there is some path
|
||||
// or some improperly initialized variable with leads to no
|
||||
// active threads, protect against that in a product build.
|
||||
n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
|
||||
1U);
|
||||
}
|
||||
uint max_waste = n_threads * chunk_size;
|
||||
// it should be aligned with respect to chunk_size
|
||||
uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size;
|
||||
|
@ -121,7 +121,7 @@ public:
|
||||
|
||||
// Must be called before calls to claim_array_chunk().
|
||||
// n_regions is the number of regions, chunk_size the chunk size.
|
||||
void prepare_for_par_region_addition(uint n_regions, uint chunk_size);
|
||||
void prepare_for_par_region_addition(uint n_threads, uint n_regions, uint chunk_size);
|
||||
// Returns the first index in a contiguous chunk of chunk_size indexes
|
||||
// that the calling thread has reserved. These must be set by the
|
||||
// calling thread using set_region() (to NULL if necessary).
|
||||
|
@ -35,7 +35,7 @@ ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosu
|
||||
{
|
||||
// Ergonomically select initial concurrent refinement parameters
|
||||
if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
|
||||
FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
|
||||
FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, (intx)ParallelGCThreads);
|
||||
}
|
||||
set_green_zone(G1ConcRefinementGreenZone);
|
||||
|
||||
|
@ -518,7 +518,7 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev
|
||||
_markStack(this),
|
||||
// _finger set in set_non_marking_state
|
||||
|
||||
_max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
|
||||
_max_worker_id((uint)ParallelGCThreads),
|
||||
// _active_tasks set in set_non_marking_state
|
||||
// _tasks set inside the constructor
|
||||
_task_queues(new CMTaskQueueSet((int) _max_worker_id)),
|
||||
@ -1218,15 +1218,13 @@ void ConcurrentMark::markFromRoots() {
|
||||
"Maximum number of marking threads exceeded");
|
||||
|
||||
uint active_workers = MAX2(1U, parallel_marking_threads());
|
||||
assert(active_workers > 0, "Should have been set");
|
||||
|
||||
// Parallel task terminator is set in "set_concurrency_and_phase()"
|
||||
set_concurrency_and_phase(active_workers, true /* concurrent */);
|
||||
|
||||
CMConcurrentMarkingTask markingTask(this, cmThread());
|
||||
_parallel_workers->set_active_workers(active_workers);
|
||||
// Don't set _n_par_threads because it affects MT in process_roots()
|
||||
// and the decisions on that MT processing is made elsewhere.
|
||||
assert(_parallel_workers->active_workers() > 0, "Should have been set");
|
||||
_parallel_workers->run_task(&markingTask);
|
||||
print_stats();
|
||||
}
|
||||
@ -1761,28 +1759,20 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class G1ParNoteEndTask;
|
||||
|
||||
class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1;
|
||||
size_t _max_live_bytes;
|
||||
uint _regions_claimed;
|
||||
size_t _freed_bytes;
|
||||
FreeRegionList* _local_cleanup_list;
|
||||
HeapRegionSetCount _old_regions_removed;
|
||||
HeapRegionSetCount _humongous_regions_removed;
|
||||
HRRSCleanupTask* _hrrs_cleanup_task;
|
||||
double _claimed_region_time;
|
||||
double _max_region_time;
|
||||
|
||||
public:
|
||||
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
|
||||
FreeRegionList* local_cleanup_list,
|
||||
HRRSCleanupTask* hrrs_cleanup_task) :
|
||||
_g1(g1),
|
||||
_max_live_bytes(0), _regions_claimed(0),
|
||||
_freed_bytes(0),
|
||||
_claimed_region_time(0.0), _max_region_time(0.0),
|
||||
_local_cleanup_list(local_cleanup_list),
|
||||
_old_regions_removed(),
|
||||
_humongous_regions_removed(),
|
||||
@ -1799,10 +1789,7 @@ public:
|
||||
// We use a claim value of zero here because all regions
|
||||
// were claimed with value 1 in the FinalCount task.
|
||||
_g1->reset_gc_time_stamps(hr);
|
||||
double start = os::elapsedTime();
|
||||
_regions_claimed++;
|
||||
hr->note_end_of_marking();
|
||||
_max_live_bytes += hr->max_live_bytes();
|
||||
|
||||
if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
|
||||
_freed_bytes += hr->used();
|
||||
@ -1819,18 +1806,8 @@ public:
|
||||
hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
|
||||
}
|
||||
|
||||
double region_time = (os::elapsedTime() - start);
|
||||
_claimed_region_time += region_time;
|
||||
if (region_time > _max_region_time) {
|
||||
_max_region_time = region_time;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t max_live_bytes() { return _max_live_bytes; }
|
||||
uint regions_claimed() { return _regions_claimed; }
|
||||
double claimed_region_time_sec() { return _claimed_region_time; }
|
||||
double max_region_time_sec() { return _max_region_time; }
|
||||
};
|
||||
|
||||
class G1ParNoteEndTask: public AbstractGangTask {
|
||||
@ -1838,14 +1815,12 @@ class G1ParNoteEndTask: public AbstractGangTask {
|
||||
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
size_t _max_live_bytes;
|
||||
size_t _freed_bytes;
|
||||
FreeRegionList* _cleanup_list;
|
||||
HeapRegionClaimer _hrclaimer;
|
||||
|
||||
public:
|
||||
G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
|
||||
AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
|
||||
AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
@ -1861,8 +1836,6 @@ public:
|
||||
{
|
||||
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||
_g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
|
||||
_max_live_bytes += g1_note_end.max_live_bytes();
|
||||
_freed_bytes += g1_note_end.freed_bytes();
|
||||
|
||||
// If we iterate over the global cleanup list at the end of
|
||||
// cleanup to do this printing we will not guarantee to only
|
||||
@ -1887,8 +1860,6 @@ public:
|
||||
HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
|
||||
}
|
||||
}
|
||||
size_t max_live_bytes() { return _max_live_bytes; }
|
||||
size_t freed_bytes() { return _freed_bytes; }
|
||||
};
|
||||
|
||||
class G1ParScrubRemSetTask: public AbstractGangTask {
|
||||
@ -1938,18 +1909,10 @@ void ConcurrentMark::cleanup() {
|
||||
|
||||
HeapRegionRemSet::reset_for_cleanup_tasks();
|
||||
|
||||
uint n_workers;
|
||||
|
||||
// Do counting once more with the world stopped for good measure.
|
||||
G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
|
||||
|
||||
g1h->set_par_threads();
|
||||
n_workers = g1h->n_par_threads();
|
||||
assert(g1h->n_par_threads() == n_workers,
|
||||
"Should not have been reset");
|
||||
g1h->workers()->run_task(&g1_par_count_task);
|
||||
// Done with the parallel phase so reset to 0.
|
||||
g1h->set_par_threads(0);
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
// Verify that the counting data accumulated during marking matches
|
||||
@ -1965,10 +1928,7 @@ void ConcurrentMark::cleanup() {
|
||||
&expected_region_bm,
|
||||
&expected_card_bm);
|
||||
|
||||
g1h->set_par_threads((int)n_workers);
|
||||
g1h->workers()->run_task(&g1_par_verify_task);
|
||||
// Done with the parallel phase so reset to 0.
|
||||
g1h->set_par_threads(0);
|
||||
|
||||
guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
|
||||
}
|
||||
@ -1990,11 +1950,11 @@ void ConcurrentMark::cleanup() {
|
||||
|
||||
g1h->reset_gc_time_stamp();
|
||||
|
||||
uint n_workers = _g1h->workers()->active_workers();
|
||||
|
||||
// Note end of marking in all heap regions.
|
||||
G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
|
||||
g1h->set_par_threads((int)n_workers);
|
||||
g1h->workers()->run_task(&g1_par_note_end_task);
|
||||
g1h->set_par_threads(0);
|
||||
g1h->check_gc_time_stamps();
|
||||
|
||||
if (!cleanup_list_is_empty()) {
|
||||
@ -2009,9 +1969,7 @@ void ConcurrentMark::cleanup() {
|
||||
if (G1ScrubRemSets) {
|
||||
double rs_scrub_start = os::elapsedTime();
|
||||
G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
|
||||
g1h->set_par_threads((int)n_workers);
|
||||
g1h->workers()->run_task(&g1_par_scrub_rs_task);
|
||||
g1h->set_par_threads(0);
|
||||
|
||||
double rs_scrub_end = os::elapsedTime();
|
||||
double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
|
||||
@ -2020,7 +1978,7 @@ void ConcurrentMark::cleanup() {
|
||||
|
||||
// this will also free any regions totally full of garbage objects,
|
||||
// and sort the regions.
|
||||
g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
|
||||
g1h->g1_policy()->record_concurrent_mark_cleanup_end();
|
||||
|
||||
// Statistics.
|
||||
double end = os::elapsedTime();
|
||||
@ -2312,9 +2270,7 @@ void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
|
||||
// and overflow handling in CMTask::do_marking_step() knows
|
||||
// how many workers to wait for.
|
||||
_cm->set_concurrency(_active_workers);
|
||||
_g1h->set_par_threads(_active_workers);
|
||||
_workers->run_task(&proc_task_proxy);
|
||||
_g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
|
||||
@ -2344,9 +2300,7 @@ void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
|
||||
// and overflow handling in CMTask::do_marking_step() knows
|
||||
// how many workers to wait for.
|
||||
_cm->set_concurrency(_active_workers);
|
||||
_g1h->set_par_threads(_active_workers);
|
||||
_workers->run_task(&enq_task_proxy);
|
||||
_g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
|
||||
@ -2608,27 +2562,23 @@ void ConcurrentMark::checkpointRootsFinalWork() {
|
||||
|
||||
g1h->ensure_parsability(false);
|
||||
|
||||
StrongRootsScope srs;
|
||||
// this is remark, so we'll use up all active threads
|
||||
uint active_workers = g1h->workers()->active_workers();
|
||||
if (active_workers == 0) {
|
||||
assert(active_workers > 0, "Should have been set earlier");
|
||||
active_workers = (uint) ParallelGCThreads;
|
||||
g1h->workers()->set_active_workers(active_workers);
|
||||
}
|
||||
set_concurrency_and_phase(active_workers, false /* concurrent */);
|
||||
// Leave _parallel_marking_threads at it's
|
||||
// value originally calculated in the ConcurrentMark
|
||||
// constructor and pass values of the active workers
|
||||
// through the gang in the task.
|
||||
|
||||
CMRemarkTask remarkTask(this, active_workers);
|
||||
// We will start all available threads, even if we decide that the
|
||||
// active_workers will be fewer. The extra ones will just bail out
|
||||
// immediately.
|
||||
g1h->set_par_threads(active_workers);
|
||||
g1h->workers()->run_task(&remarkTask);
|
||||
g1h->set_par_threads(0);
|
||||
{
|
||||
StrongRootsScope srs(active_workers);
|
||||
|
||||
CMRemarkTask remarkTask(this, active_workers);
|
||||
// We will start all available threads, even if we decide that the
|
||||
// active_workers will be fewer. The extra ones will just bail out
|
||||
// immediately.
|
||||
g1h->workers()->run_task(&remarkTask);
|
||||
}
|
||||
|
||||
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|
||||
guarantee(has_overflown() ||
|
||||
@ -3001,9 +2951,7 @@ void ConcurrentMark::aggregate_count_data() {
|
||||
G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
|
||||
_max_worker_id, n_workers);
|
||||
|
||||
_g1h->set_par_threads(n_workers);
|
||||
_g1h->workers()->run_task(&g1_par_agg_task);
|
||||
_g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
// Clear the per-worker arrays used to store the per-region counting data
|
||||
|
@ -1326,27 +1326,10 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
|
||||
workers()->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
n_workers == workers()->total_workers(),
|
||||
"If not dynamic should be using all the workers");
|
||||
workers()->set_active_workers(n_workers);
|
||||
// Set parallel threads in the heap (_n_par_threads) only
|
||||
// before a parallel phase and always reset it to 0 after
|
||||
// the phase so that the number of parallel threads does
|
||||
// no get carried forward to a serial phase where there
|
||||
// may be code that is "possibly_parallel".
|
||||
set_par_threads(n_workers);
|
||||
|
||||
ParRebuildRSTask rebuild_rs_task(this);
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
workers()->active_workers() == workers()->total_workers(),
|
||||
"Unless dynamic should use total workers");
|
||||
// Use the most recent number of active workers
|
||||
assert(workers()->active_workers() > 0,
|
||||
"Active workers not properly set");
|
||||
set_par_threads(workers()->active_workers());
|
||||
workers()->run_task(&rebuild_rs_task);
|
||||
set_par_threads(0);
|
||||
|
||||
// Rebuild the strong code root lists for each region
|
||||
rebuild_strong_code_roots();
|
||||
@ -1769,7 +1752,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_allocator = G1Allocator::create_allocator(this);
|
||||
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
|
||||
|
||||
int n_queues = MAX2((int)ParallelGCThreads, 1);
|
||||
int n_queues = (int)ParallelGCThreads;
|
||||
_task_queues = new RefToScanQueueSet(n_queues);
|
||||
|
||||
uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
|
||||
@ -2081,11 +2064,11 @@ void G1CollectedHeap::ref_processing_init() {
|
||||
new ReferenceProcessor(mr, // span
|
||||
ParallelRefProcEnabled && (ParallelGCThreads > 1),
|
||||
// mt processing
|
||||
(int) ParallelGCThreads,
|
||||
(uint) ParallelGCThreads,
|
||||
// degree of mt processing
|
||||
(ParallelGCThreads > 1) || (ConcGCThreads > 1),
|
||||
// mt discovery
|
||||
(int) MAX2(ParallelGCThreads, ConcGCThreads),
|
||||
(uint) MAX2(ParallelGCThreads, ConcGCThreads),
|
||||
// degree of mt discovery
|
||||
false,
|
||||
// Reference discovery is not atomic
|
||||
@ -2098,11 +2081,11 @@ void G1CollectedHeap::ref_processing_init() {
|
||||
new ReferenceProcessor(mr, // span
|
||||
ParallelRefProcEnabled && (ParallelGCThreads > 1),
|
||||
// mt processing
|
||||
MAX2((int)ParallelGCThreads, 1),
|
||||
(uint) ParallelGCThreads,
|
||||
// degree of mt processing
|
||||
(ParallelGCThreads > 1),
|
||||
// mt discovery
|
||||
MAX2((int)ParallelGCThreads, 1),
|
||||
(uint) ParallelGCThreads,
|
||||
// degree of mt discovery
|
||||
true,
|
||||
// Reference discovery is atomic
|
||||
@ -2502,8 +2485,7 @@ void G1CollectedHeap::clear_cset_start_regions() {
|
||||
assert(_worker_cset_start_region != NULL, "sanity");
|
||||
assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
|
||||
|
||||
int n_queues = MAX2((int)ParallelGCThreads, 1);
|
||||
for (int i = 0; i < n_queues; i++) {
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
_worker_cset_start_region[i] = NULL;
|
||||
_worker_cset_start_region_time_stamp[i] = 0;
|
||||
}
|
||||
@ -2541,9 +2523,6 @@ HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
|
||||
result = g1_policy()->collection_set();
|
||||
uint cs_size = g1_policy()->cset_region_length();
|
||||
uint active_workers = workers()->active_workers();
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
active_workers == workers()->total_workers(),
|
||||
"Unless dynamic should use total workers");
|
||||
|
||||
uint end_ind = (cs_size * worker_i) / active_workers;
|
||||
uint start_ind = 0;
|
||||
@ -3021,7 +3000,7 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
|
||||
G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
|
||||
|
||||
{
|
||||
G1RootProcessor root_processor(this);
|
||||
G1RootProcessor root_processor(this, 1);
|
||||
root_processor.process_all_roots(&rootsCl,
|
||||
&cldCl,
|
||||
&blobsCl);
|
||||
@ -3042,13 +3021,7 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
|
||||
if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
|
||||
|
||||
G1ParVerifyTask task(this, vo);
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
workers()->active_workers() == workers()->total_workers(),
|
||||
"If not dynamic should be using all the workers");
|
||||
uint n_workers = workers()->active_workers();
|
||||
set_par_threads(n_workers);
|
||||
workers()->run_task(&task);
|
||||
set_par_threads(0);
|
||||
if (task.failures()) {
|
||||
failures = true;
|
||||
}
|
||||
@ -3572,6 +3545,10 @@ public:
|
||||
};
|
||||
#endif // ASSERT
|
||||
|
||||
uint G1CollectedHeap::num_task_queues() const {
|
||||
return _task_queues->size();
|
||||
}
|
||||
|
||||
#if TASKQUEUE_STATS
|
||||
void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
|
||||
st->print_raw_cr("GC Task Stats");
|
||||
@ -3583,7 +3560,7 @@ void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
|
||||
print_taskqueue_stats_hdr(st);
|
||||
|
||||
TaskQueueStats totals;
|
||||
const uint n = workers()->total_workers();
|
||||
const uint n = num_task_queues();
|
||||
for (uint i = 0; i < n; ++i) {
|
||||
st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
|
||||
totals += task_queue(i)->stats;
|
||||
@ -3594,7 +3571,7 @@ void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::reset_taskqueue_stats() {
|
||||
const uint n = workers()->total_workers();
|
||||
const uint n = num_task_queues();
|
||||
for (uint i = 0; i < n; ++i) {
|
||||
task_queue(i)->stats.reset();
|
||||
}
|
||||
@ -3696,9 +3673,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
|
||||
workers()->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
active_workers == workers()->total_workers(),
|
||||
"If not dynamic should be using all the workers");
|
||||
workers()->set_active_workers(active_workers);
|
||||
|
||||
double pause_start_sec = os::elapsedTime();
|
||||
@ -3873,8 +3847,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
|
||||
if (evacuation_failed()) {
|
||||
_allocator->set_used(recalculate_used());
|
||||
uint n_queues = MAX2((int)ParallelGCThreads, 1);
|
||||
for (uint i = 0; i < n_queues; i++) {
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
if (_evacuation_failed_info_array[i].has_failed()) {
|
||||
_gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
|
||||
}
|
||||
@ -4041,10 +4014,8 @@ void G1CollectedHeap::finalize_for_evac_failure() {
|
||||
void G1CollectedHeap::remove_self_forwarding_pointers() {
|
||||
double remove_self_forwards_start = os::elapsedTime();
|
||||
|
||||
set_par_threads();
|
||||
G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
|
||||
workers()->run_task(&rsfp_task);
|
||||
set_par_threads(0);
|
||||
|
||||
// Now restore saved marks, if any.
|
||||
assert(_objs_with_preserved_marks.size() ==
|
||||
@ -4308,12 +4279,13 @@ protected:
|
||||
Mutex* stats_lock() { return &_stats_lock; }
|
||||
|
||||
public:
|
||||
G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor)
|
||||
G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
|
||||
: AbstractGangTask("G1 collection"),
|
||||
_g1h(g1h),
|
||||
_queues(task_queues),
|
||||
_root_processor(root_processor),
|
||||
_terminator(0, _queues),
|
||||
_terminator(n_workers, _queues),
|
||||
_n_workers(n_workers),
|
||||
_stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
|
||||
{}
|
||||
|
||||
@ -4325,12 +4297,6 @@ public:
|
||||
|
||||
ParallelTaskTerminator* terminator() { return &_terminator; }
|
||||
|
||||
virtual void set_for_termination(uint active_workers) {
|
||||
_root_processor->set_num_workers(active_workers);
|
||||
terminator()->reset_for_reuse(active_workers);
|
||||
_n_workers = active_workers;
|
||||
}
|
||||
|
||||
// Helps out with CLD processing.
|
||||
//
|
||||
// During InitialMark we need to:
|
||||
@ -4811,19 +4777,14 @@ void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
|
||||
|
||||
G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
|
||||
n_workers, class_unloading_occurred);
|
||||
set_par_threads(n_workers);
|
||||
workers()->run_task(&g1_unlink_task);
|
||||
set_par_threads(0);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
|
||||
bool process_strings, bool process_symbols) {
|
||||
{
|
||||
uint n_workers = workers()->active_workers();
|
||||
G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
|
||||
set_par_threads(n_workers);
|
||||
workers()->run_task(&g1_unlink_task);
|
||||
set_par_threads(0);
|
||||
}
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
@ -4851,13 +4812,9 @@ class G1RedirtyLoggedCardsTask : public AbstractGangTask {
|
||||
void G1CollectedHeap::redirty_logged_cards() {
|
||||
double redirty_logged_cards_start = os::elapsedTime();
|
||||
|
||||
uint n_workers = workers()->active_workers();
|
||||
|
||||
G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
|
||||
dirty_card_queue_set().reset_for_par_iteration();
|
||||
set_par_threads(n_workers);
|
||||
workers()->run_task(&redirty_task);
|
||||
set_par_threads(0);
|
||||
|
||||
DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
|
||||
dcq.merge_bufferlists(&dirty_card_queue_set());
|
||||
@ -5093,9 +5050,7 @@ void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
|
||||
ParallelTaskTerminator terminator(_active_workers, _queues);
|
||||
G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
|
||||
|
||||
_g1h->set_par_threads(_active_workers);
|
||||
_workers->run_task(&proc_task_proxy);
|
||||
_g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
// Gang task for parallel reference enqueueing.
|
||||
@ -5124,9 +5079,7 @@ void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
|
||||
|
||||
G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
|
||||
|
||||
_g1h->set_par_threads(_active_workers);
|
||||
_workers->run_task(&enq_task_proxy);
|
||||
_g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
// End of weak reference support closures
|
||||
@ -5219,7 +5172,7 @@ public:
|
||||
};
|
||||
|
||||
// Weak Reference processing during an evacuation pause (part 1).
|
||||
void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
|
||||
void G1CollectedHeap::process_discovered_references() {
|
||||
double ref_proc_start = os::elapsedTime();
|
||||
|
||||
ReferenceProcessor* rp = _ref_processor_stw;
|
||||
@ -5246,17 +5199,14 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
|
||||
// referents points to another object which is also referenced by an
|
||||
// object discovered by the STW ref processor.
|
||||
|
||||
assert(no_of_gc_workers == workers()->active_workers(), "Need to reset active GC workers");
|
||||
uint no_of_gc_workers = workers()->active_workers();
|
||||
|
||||
set_par_threads(no_of_gc_workers);
|
||||
G1ParPreserveCMReferentsTask keep_cm_referents(this,
|
||||
no_of_gc_workers,
|
||||
_task_queues);
|
||||
|
||||
workers()->run_task(&keep_cm_referents);
|
||||
|
||||
set_par_threads(0);
|
||||
|
||||
// Closure to test whether a referent is alive.
|
||||
G1STWIsAliveClosure is_alive(this);
|
||||
|
||||
@ -5330,7 +5280,7 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
|
||||
}
|
||||
|
||||
// Weak Reference processing during an evacuation pause (part 2).
|
||||
void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
|
||||
void G1CollectedHeap::enqueue_discovered_references() {
|
||||
double ref_enq_start = os::elapsedTime();
|
||||
|
||||
ReferenceProcessor* rp = _ref_processor_stw;
|
||||
@ -5344,12 +5294,12 @@ void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
|
||||
} else {
|
||||
// Parallel reference enqueueing
|
||||
|
||||
assert(no_of_gc_workers == workers()->active_workers(),
|
||||
"Need to reset active workers");
|
||||
assert(rp->num_q() == no_of_gc_workers, "sanity");
|
||||
assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
|
||||
uint n_workers = workers()->active_workers();
|
||||
|
||||
G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
|
||||
assert(rp->num_q() == n_workers, "sanity");
|
||||
assert(n_workers <= rp->max_num_q(), "sanity");
|
||||
|
||||
G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, n_workers);
|
||||
rp->enqueue_discovered_references(&par_task_executor);
|
||||
}
|
||||
|
||||
@ -5380,11 +5330,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
||||
hot_card_cache->set_use_cache(false);
|
||||
|
||||
const uint n_workers = workers()->active_workers();
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
n_workers == workers()->total_workers(),
|
||||
"If not dynamic should be using all the workers");
|
||||
set_par_threads(n_workers);
|
||||
|
||||
|
||||
init_for_evac_failure(NULL);
|
||||
|
||||
@ -5393,19 +5338,16 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
||||
double end_par_time_sec;
|
||||
|
||||
{
|
||||
G1RootProcessor root_processor(this);
|
||||
G1ParTask g1_par_task(this, _task_queues, &root_processor);
|
||||
G1RootProcessor root_processor(this, n_workers);
|
||||
G1ParTask g1_par_task(this, _task_queues, &root_processor, n_workers);
|
||||
// InitialMark needs claim bits to keep track of the marked-through CLDs.
|
||||
if (g1_policy()->during_initial_mark_pause()) {
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
}
|
||||
|
||||
// The individual threads will set their evac-failure closures.
|
||||
if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
|
||||
// These tasks use ShareHeap::_process_strong_tasks
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
workers()->active_workers() == workers()->total_workers(),
|
||||
"If not dynamic should be using all the workers");
|
||||
// The individual threads will set their evac-failure closures.
|
||||
if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
|
||||
|
||||
workers()->run_task(&g1_par_task);
|
||||
end_par_time_sec = os::elapsedTime();
|
||||
|
||||
@ -5425,14 +5367,12 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
||||
(os::elapsedTime() - end_par_time_sec) * 1000.0;
|
||||
phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
|
||||
|
||||
set_par_threads(0);
|
||||
|
||||
// Process any discovered reference objects - we have
|
||||
// to do this _before_ we retire the GC alloc regions
|
||||
// as we may have to copy some 'reachable' referent
|
||||
// objects (and their reachable sub-graphs) that were
|
||||
// not copied during the pause.
|
||||
process_discovered_references(n_workers);
|
||||
process_discovered_references();
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
double fixup_start = os::elapsedTime();
|
||||
@ -5474,7 +5414,7 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
||||
// will log these updates (and dirty their associated
|
||||
// cards). We need these updates logged to update any
|
||||
// RSets.
|
||||
enqueue_discovered_references(n_workers);
|
||||
enqueue_discovered_references();
|
||||
|
||||
redirty_logged_cards();
|
||||
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
||||
@ -5779,9 +5719,7 @@ void G1CollectedHeap::cleanUpCardTable() {
|
||||
// Iterate over the dirty cards region list.
|
||||
G1ParCleanupCTTask cleanup_task(ct_bs, this);
|
||||
|
||||
set_par_threads();
|
||||
workers()->run_task(&cleanup_task);
|
||||
set_par_threads(0);
|
||||
#ifndef PRODUCT
|
||||
if (G1VerifyCTCleanup || VerifyAfterGC) {
|
||||
G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
|
||||
@ -6314,21 +6252,6 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
|
||||
g1mm()->update_eden_size();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::set_par_threads() {
|
||||
// Don't change the number of workers. Use the value previously set
|
||||
// in the workgroup.
|
||||
uint n_workers = workers()->active_workers();
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
n_workers == workers()->total_workers(),
|
||||
"Otherwise should be using the total number of workers");
|
||||
if (n_workers == 0) {
|
||||
assert(false, "Should have been set in prior evacuation pause.");
|
||||
n_workers = ParallelGCThreads;
|
||||
workers()->set_active_workers(n_workers);
|
||||
}
|
||||
set_par_threads(n_workers);
|
||||
}
|
||||
|
||||
// Methods for the GC alloc regions
|
||||
|
||||
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
|
||||
|
@ -606,11 +606,11 @@ protected:
|
||||
|
||||
// Process any reference objects discovered during
|
||||
// an incremental evacuation pause.
|
||||
void process_discovered_references(uint no_of_gc_workers);
|
||||
void process_discovered_references();
|
||||
|
||||
// Enqueue any remaining discovered references
|
||||
// after processing.
|
||||
void enqueue_discovered_references(uint no_of_gc_workers);
|
||||
void enqueue_discovered_references();
|
||||
|
||||
public:
|
||||
FlexibleWorkGang* workers() const { return _workers; }
|
||||
@ -981,6 +981,8 @@ public:
|
||||
|
||||
RefToScanQueue *task_queue(uint i) const;
|
||||
|
||||
uint num_task_queues() const;
|
||||
|
||||
// A set of cards where updates happened during the GC
|
||||
DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
|
||||
|
||||
@ -1012,11 +1014,6 @@ public:
|
||||
// Initialize weak reference processing.
|
||||
void ref_processing_init();
|
||||
|
||||
// Explicitly import set_par_threads into this scope
|
||||
using CollectedHeap::set_par_threads;
|
||||
// Set _n_par_threads according to a policy TBD.
|
||||
void set_par_threads();
|
||||
|
||||
virtual Name kind() const {
|
||||
return CollectedHeap::G1CollectedHeap;
|
||||
}
|
||||
|
@ -1587,14 +1587,17 @@ uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint
|
||||
}
|
||||
|
||||
void
|
||||
G1CollectorPolicy::record_concurrent_mark_cleanup_end(uint n_workers) {
|
||||
G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
|
||||
_collectionSetChooser->clear();
|
||||
|
||||
FlexibleWorkGang* workers = _g1->workers();
|
||||
uint n_workers = workers->active_workers();
|
||||
|
||||
uint n_regions = _g1->num_regions();
|
||||
uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
|
||||
_collectionSetChooser->prepare_for_par_region_addition(n_regions, chunk_size);
|
||||
_collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
|
||||
ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
|
||||
_g1->workers()->run_task(&par_known_garbage_task);
|
||||
workers->run_task(&par_known_garbage_task);
|
||||
|
||||
_collectionSetChooser->sort_regions();
|
||||
|
||||
|
@ -692,7 +692,7 @@ public:
|
||||
|
||||
// Record start, end, and completion of cleanup.
|
||||
void record_concurrent_mark_cleanup_start();
|
||||
void record_concurrent_mark_cleanup_end(uint n_workers);
|
||||
void record_concurrent_mark_cleanup_end();
|
||||
void record_concurrent_mark_cleanup_completed();
|
||||
|
||||
// Records the information about the heap size for reporting in
|
||||
|
@ -127,7 +127,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
|
||||
MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
|
||||
{
|
||||
G1RootProcessor root_processor(g1h);
|
||||
G1RootProcessor root_processor(g1h, 1);
|
||||
root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure,
|
||||
&GenMarkSweep::follow_cld_closure,
|
||||
&follow_code_closure);
|
||||
@ -237,7 +237,7 @@ void G1MarkSweep::mark_sweep_phase3() {
|
||||
|
||||
CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
|
||||
{
|
||||
G1RootProcessor root_processor(g1h);
|
||||
G1RootProcessor root_processor(g1h, 1);
|
||||
root_processor.process_all_roots(&GenMarkSweep::adjust_pointer_closure,
|
||||
&GenMarkSweep::adjust_cld_closure,
|
||||
&adjust_code_closure);
|
||||
|
@ -50,8 +50,8 @@ void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan
|
||||
_par_scan_state = par_scan_state;
|
||||
_worker_id = par_scan_state->queue_num();
|
||||
|
||||
assert(_worker_id < MAX2((uint)ParallelGCThreads, 1u),
|
||||
err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, MAX2((uint)ParallelGCThreads, 1u)));
|
||||
assert(_worker_id < ParallelGCThreads,
|
||||
err_msg("The given worker id %u must be less than the number of threads " UINTX_FORMAT, _worker_id, ParallelGCThreads));
|
||||
}
|
||||
|
||||
// Generate G1 specialized oop_oop_iterate functions.
|
||||
|
@ -90,11 +90,10 @@ public:
|
||||
|
||||
|
||||
void G1RootProcessor::worker_has_discovered_all_strong_classes() {
|
||||
uint n_workers = _g1h->n_par_threads();
|
||||
assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
|
||||
|
||||
uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes);
|
||||
if (new_value == n_workers) {
|
||||
if (new_value == n_workers()) {
|
||||
// This thread is last. Notify the others.
|
||||
MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
|
||||
_lock.notify_all();
|
||||
@ -102,21 +101,20 @@ void G1RootProcessor::worker_has_discovered_all_strong_classes() {
|
||||
}
|
||||
|
||||
void G1RootProcessor::wait_until_all_strong_classes_discovered() {
|
||||
uint n_workers = _g1h->n_par_threads();
|
||||
assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
|
||||
|
||||
if ((uint)_n_workers_discovered_strong_classes != n_workers) {
|
||||
if ((uint)_n_workers_discovered_strong_classes != n_workers()) {
|
||||
MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
|
||||
while ((uint)_n_workers_discovered_strong_classes != n_workers) {
|
||||
while ((uint)_n_workers_discovered_strong_classes != n_workers()) {
|
||||
_lock.wait(Mutex::_no_safepoint_check_flag, 0, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h) :
|
||||
G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
|
||||
_g1h(g1h),
|
||||
_process_strong_tasks(new SubTasksDone(G1RP_PS_NumElements)),
|
||||
_srs(),
|
||||
_srs(n_workers),
|
||||
_lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
|
||||
_n_workers_discovered_strong_classes(0) {}
|
||||
|
||||
@ -206,7 +204,7 @@ void G1RootProcessor::evacuate_roots(OopClosure* scan_non_heap_roots,
|
||||
}
|
||||
}
|
||||
|
||||
_process_strong_tasks->all_tasks_completed();
|
||||
_process_strong_tasks->all_tasks_completed(n_workers());
|
||||
}
|
||||
|
||||
void G1RootProcessor::process_strong_roots(OopClosure* oops,
|
||||
@ -216,7 +214,7 @@ void G1RootProcessor::process_strong_roots(OopClosure* oops,
|
||||
process_java_roots(oops, clds, clds, NULL, blobs, NULL, 0);
|
||||
process_vm_roots(oops, NULL, NULL, 0);
|
||||
|
||||
_process_strong_tasks->all_tasks_completed();
|
||||
_process_strong_tasks->all_tasks_completed(n_workers());
|
||||
}
|
||||
|
||||
void G1RootProcessor::process_all_roots(OopClosure* oops,
|
||||
@ -230,7 +228,7 @@ void G1RootProcessor::process_all_roots(OopClosure* oops,
|
||||
CodeCache::blobs_do(blobs);
|
||||
}
|
||||
|
||||
_process_strong_tasks->all_tasks_completed();
|
||||
_process_strong_tasks->all_tasks_completed(n_workers());
|
||||
}
|
||||
|
||||
void G1RootProcessor::process_java_roots(OopClosure* strong_roots,
|
||||
@ -253,7 +251,7 @@ void G1RootProcessor::process_java_roots(OopClosure* strong_roots,
|
||||
|
||||
{
|
||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
|
||||
bool is_par = _g1h->n_par_threads() > 0;
|
||||
bool is_par = n_workers() > 1;
|
||||
Threads::possibly_parallel_oops_do(is_par, strong_roots, thread_stack_clds, strong_code);
|
||||
}
|
||||
}
|
||||
@ -329,6 +327,6 @@ void G1RootProcessor::scan_remembered_sets(G1ParPushHeapRSClosure* scan_rs,
|
||||
_g1h->g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
|
||||
}
|
||||
|
||||
void G1RootProcessor::set_num_workers(uint active_workers) {
|
||||
_process_strong_tasks->set_n_threads(active_workers);
|
||||
uint G1RootProcessor::n_workers() const {
|
||||
return _srs.n_threads();
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ class G1RootProcessor : public StackObj {
|
||||
uint worker_i);
|
||||
|
||||
public:
|
||||
G1RootProcessor(G1CollectedHeap* g1h);
|
||||
G1RootProcessor(G1CollectedHeap* g1h, uint n_workers);
|
||||
|
||||
// Apply closures to the strongly and weakly reachable roots in the system
|
||||
// in a single pass.
|
||||
@ -114,8 +114,8 @@ public:
|
||||
OopClosure* scan_non_heap_weak_roots,
|
||||
uint worker_i);
|
||||
|
||||
// Inform the root processor about the number of worker threads
|
||||
void set_num_workers(uint active_workers);
|
||||
// Number of worker threads used by the root processor.
|
||||
uint n_workers() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1ROOTPROCESSOR_HPP
|
||||
|
@ -153,9 +153,7 @@ void G1StringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive,
|
||||
|
||||
G1StringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash, phase_times);
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
g1h->set_par_threads();
|
||||
g1h->workers()->run_task(&task);
|
||||
g1h->set_par_threads(0);
|
||||
}
|
||||
|
||||
void G1StringDedup::threads_do(ThreadClosure* tc) {
|
||||
|
@ -42,7 +42,7 @@ G1StringDedupQueue::G1StringDedupQueue() :
|
||||
_cancel(false),
|
||||
_empty(true),
|
||||
_dropped(0) {
|
||||
_nqueues = MAX2(ParallelGCThreads, (size_t)1);
|
||||
_nqueues = ParallelGCThreads;
|
||||
_queues = NEW_C_HEAP_ARRAY(G1StringDedupWorkerQueue, _nqueues, mtGC);
|
||||
for (size_t i = 0; i < _nqueues; i++) {
|
||||
new (_queues + i) G1StringDedupWorkerQueue(G1StringDedupWorkerQueue::default_segment_size(), _max_cache_size, _max_size);
|
||||
|
@ -112,7 +112,7 @@ public:
|
||||
};
|
||||
|
||||
G1StringDedupEntryCache::G1StringDedupEntryCache() {
|
||||
_nlists = MAX2(ParallelGCThreads, (size_t)1);
|
||||
_nlists = ParallelGCThreads;
|
||||
_lists = PaddedArray<G1StringDedupEntryFreeList, mtGC>::create_unfreeable((uint)_nlists);
|
||||
}
|
||||
|
||||
|
@ -832,9 +832,9 @@ void PSParallelCompact::post_initialize() {
|
||||
_ref_processor =
|
||||
new ReferenceProcessor(mr, // span
|
||||
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
|
||||
(int) ParallelGCThreads, // mt processing degree
|
||||
(uint) ParallelGCThreads, // mt processing degree
|
||||
true, // mt discovery
|
||||
(int) ParallelGCThreads, // mt discovery degree
|
||||
(uint) ParallelGCThreads, // mt discovery degree
|
||||
true, // atomic_discovery
|
||||
&_is_alive_closure); // non-header is alive closure
|
||||
_counters = new CollectorCounters("PSParallelCompact", 1);
|
||||
@ -2029,7 +2029,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
// Set the number of GC threads to be used in this collection
|
||||
gc_task_manager()->set_active_gang();
|
||||
gc_task_manager()->task_idle_workers();
|
||||
heap->set_par_threads(gc_task_manager()->active_workers());
|
||||
|
||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
|
||||
|
@ -382,7 +382,6 @@ bool PSScavenge::invoke_no_policy() {
|
||||
// Get the active number of workers here and use that value
|
||||
// throughout the methods.
|
||||
uint active_workers = gc_task_manager()->active_workers();
|
||||
heap->set_par_threads(active_workers);
|
||||
|
||||
PSPromotionManager::pre_scavenge();
|
||||
|
||||
@ -846,9 +845,9 @@ void PSScavenge::initialize() {
|
||||
_ref_processor =
|
||||
new ReferenceProcessor(mr, // span
|
||||
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
|
||||
(int) ParallelGCThreads, // mt processing degree
|
||||
(uint) ParallelGCThreads, // mt processing degree
|
||||
true, // mt discovery
|
||||
(int) ParallelGCThreads, // mt discovery degree
|
||||
(uint) ParallelGCThreads, // mt discovery degree
|
||||
true, // atomic_discovery
|
||||
NULL); // header provides liveness info
|
||||
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "gc/shared/referencePolicy.hpp"
|
||||
#include "gc/shared/space.inline.hpp"
|
||||
#include "gc/shared/spaceDecorator.hpp"
|
||||
#include "gc/shared/strongRootsScope.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/instanceRefKlass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -454,7 +455,7 @@ void DefNewGeneration::compute_new_size() {
|
||||
}
|
||||
}
|
||||
|
||||
void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
|
||||
void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) {
|
||||
assert(false, "NYI -- are you sure you want to call this?");
|
||||
}
|
||||
|
||||
@ -625,15 +626,22 @@ void DefNewGeneration::collect(bool full,
|
||||
assert(gch->no_allocs_since_save_marks(0),
|
||||
"save marks have not been newly set.");
|
||||
|
||||
gch->gen_process_roots(_level,
|
||||
true, // Process younger gens, if any,
|
||||
// as strong roots.
|
||||
true, // activate StrongRootsScope
|
||||
GenCollectedHeap::SO_ScavengeCodeCache,
|
||||
GenCollectedHeap::StrongAndWeakRoots,
|
||||
&fsc_with_no_gc_barrier,
|
||||
&fsc_with_gc_barrier,
|
||||
&cld_scan_closure);
|
||||
{
|
||||
// DefNew needs to run with n_threads == 0, to make sure the serial
|
||||
// version of the card table scanning code is used.
|
||||
// See: CardTableModRefBS::non_clean_card_iterate_possibly_parallel.
|
||||
StrongRootsScope srs(0);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
_level,
|
||||
true, // Process younger gens, if any,
|
||||
// as strong roots.
|
||||
GenCollectedHeap::SO_ScavengeCodeCache,
|
||||
GenCollectedHeap::StrongAndWeakRoots,
|
||||
&fsc_with_no_gc_barrier,
|
||||
&fsc_with_gc_barrier,
|
||||
&cld_scan_closure);
|
||||
}
|
||||
|
||||
// "evacuate followers".
|
||||
evacuate_followers.do_void();
|
||||
|
@ -255,7 +255,7 @@ protected:
|
||||
// Iteration
|
||||
void object_iterate(ObjectClosure* blk);
|
||||
|
||||
void younger_refs_iterate(OopsInGenClosure* cl);
|
||||
void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads);
|
||||
|
||||
void space_iterate(SpaceClosure* blk, bool usedOnly = false);
|
||||
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "gc/shared/modRefBarrierSet.hpp"
|
||||
#include "gc/shared/referencePolicy.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "gc/shared/strongRootsScope.hpp"
|
||||
#include "oops/instanceRefKlass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
@ -200,14 +201,18 @@ void GenMarkSweep::mark_sweep_phase1(int level,
|
||||
// Need new claim bits before marking starts.
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
|
||||
gch->gen_process_roots(level,
|
||||
false, // Younger gens are not roots.
|
||||
true, // activate StrongRootsScope
|
||||
GenCollectedHeap::SO_None,
|
||||
GenCollectedHeap::StrongRootsOnly,
|
||||
&follow_root_closure,
|
||||
&follow_root_closure,
|
||||
&follow_cld_closure);
|
||||
{
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
level,
|
||||
false, // Younger gens are not roots.
|
||||
GenCollectedHeap::SO_None,
|
||||
GenCollectedHeap::StrongRootsOnly,
|
||||
&follow_root_closure,
|
||||
&follow_root_closure,
|
||||
&follow_cld_closure);
|
||||
}
|
||||
|
||||
// Process reference objects found during marking
|
||||
{
|
||||
@ -284,14 +289,18 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
|
||||
assert(level == 1, "We don't use mark-sweep on young generations.");
|
||||
adjust_pointer_closure.set_orig_generation(gch->old_gen());
|
||||
|
||||
gch->gen_process_roots(level,
|
||||
false, // Younger gens are not roots.
|
||||
true, // activate StrongRootsScope
|
||||
GenCollectedHeap::SO_AllCodeCache,
|
||||
GenCollectedHeap::StrongAndWeakRoots,
|
||||
&adjust_pointer_closure,
|
||||
&adjust_pointer_closure,
|
||||
&adjust_cld_closure);
|
||||
{
|
||||
StrongRootsScope srs(1);
|
||||
|
||||
gch->gen_process_roots(&srs,
|
||||
level,
|
||||
false, // Younger gens are not roots.
|
||||
GenCollectedHeap::SO_AllCodeCache,
|
||||
GenCollectedHeap::StrongAndWeakRoots,
|
||||
&adjust_pointer_closure,
|
||||
&adjust_pointer_closure,
|
||||
&adjust_cld_closure);
|
||||
}
|
||||
|
||||
gch->gen_process_weak_roots(&adjust_pointer_closure);
|
||||
|
||||
|
@ -161,7 +161,7 @@ uint AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
|
||||
}
|
||||
_debug_perturbation = !_debug_perturbation;
|
||||
}
|
||||
assert((new_active_workers <= (uintx) ParallelGCThreads) &&
|
||||
assert((new_active_workers <= ParallelGCThreads) &&
|
||||
(new_active_workers >= min_workers),
|
||||
"Jiggled active workers too much");
|
||||
}
|
||||
|
@ -353,8 +353,8 @@ void CardGeneration::space_iterate(SpaceClosure* blk,
|
||||
blk->do_space(space());
|
||||
}
|
||||
|
||||
void CardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
|
||||
void CardGeneration::younger_refs_iterate(OopsInGenClosure* blk, uint n_threads) {
|
||||
blk->set_generation(this);
|
||||
younger_refs_in_space_iterate(space(), blk);
|
||||
younger_refs_in_space_iterate(space(), blk, n_threads);
|
||||
blk->reset_generation();
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ class CardGeneration: public Generation {
|
||||
|
||||
void space_iterate(SpaceClosure* blk, bool usedOnly = false);
|
||||
|
||||
void younger_refs_iterate(OopsInGenClosure* blk);
|
||||
void younger_refs_iterate(OopsInGenClosure* blk, uint n_threads);
|
||||
|
||||
bool is_in(const void* p) const;
|
||||
|
||||
|
@ -440,31 +440,11 @@ void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool relea
|
||||
void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
|
||||
MemRegion mr,
|
||||
OopsInGenClosure* cl,
|
||||
CardTableRS* ct) {
|
||||
CardTableRS* ct,
|
||||
uint n_threads) {
|
||||
if (!mr.is_empty()) {
|
||||
// Caller (process_roots()) claims that all GC threads
|
||||
// execute this call. With UseDynamicNumberOfGCThreads now all
|
||||
// active GC threads execute this call. The number of active GC
|
||||
// threads needs to be passed to par_non_clean_card_iterate_work()
|
||||
// to get proper partitioning and termination.
|
||||
//
|
||||
// This is an example of where n_par_threads() is used instead
|
||||
// of workers()->active_workers(). n_par_threads can be set to 0 to
|
||||
// turn off parallelism. For example when this code is called as
|
||||
// part of verification during root processing then n_par_threads()
|
||||
// may have been set to 0. active_workers is not overloaded with
|
||||
// the meaning that it is a switch to disable parallelism and so keeps
|
||||
// the meaning of the number of active gc workers. If parallelism has
|
||||
// not been shut off by setting n_par_threads to 0, then n_par_threads
|
||||
// should be equal to active_workers. When a different mechanism for
|
||||
// shutting off parallelism is used, then active_workers can be used in
|
||||
// place of n_par_threads.
|
||||
int n_threads = GenCollectedHeap::heap()->n_par_threads();
|
||||
bool is_par = n_threads > 0;
|
||||
if (is_par) {
|
||||
if (n_threads > 0) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
assert(GenCollectedHeap::heap()->n_par_threads() ==
|
||||
GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch");
|
||||
non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
|
||||
#else // INCLUDE_ALL_GCS
|
||||
fatal("Parallel gc not supported here.");
|
||||
@ -472,8 +452,11 @@ void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
|
||||
} else {
|
||||
// clear_cl finds contiguous dirty ranges of cards to process and clear.
|
||||
|
||||
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary());
|
||||
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
|
||||
// This is the single-threaded version used by DefNew.
|
||||
const bool parallel = false;
|
||||
|
||||
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
|
||||
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
|
||||
|
||||
clear_cl.do_MemRegion(mr);
|
||||
}
|
||||
|
@ -178,14 +178,15 @@ class CardTableModRefBS: public ModRefBarrierSet {
|
||||
// region mr in the given space and apply cl to any dirty sub-regions
|
||||
// of mr. Clears the dirty cards as they are processed.
|
||||
void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr,
|
||||
OopsInGenClosure* cl, CardTableRS* ct);
|
||||
OopsInGenClosure* cl, CardTableRS* ct,
|
||||
uint n_threads);
|
||||
|
||||
private:
|
||||
// Work method used to implement non_clean_card_iterate_possibly_parallel()
|
||||
// above in the parallel case.
|
||||
void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
|
||||
OopsInGenClosure* cl, CardTableRS* ct,
|
||||
int n_threads);
|
||||
uint n_threads);
|
||||
|
||||
protected:
|
||||
// Dirty the bytes corresponding to "mr" (not all of which must be
|
||||
|
@ -102,9 +102,10 @@ void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {
|
||||
}
|
||||
|
||||
void CardTableRS::younger_refs_iterate(Generation* g,
|
||||
OopsInGenClosure* blk) {
|
||||
OopsInGenClosure* blk,
|
||||
uint n_threads) {
|
||||
_last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val();
|
||||
g->younger_refs_iterate(blk);
|
||||
g->younger_refs_iterate(blk, n_threads);
|
||||
}
|
||||
|
||||
inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
|
||||
@ -164,15 +165,8 @@ inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) {
|
||||
}
|
||||
|
||||
ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
|
||||
DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) :
|
||||
_dirty_card_closure(dirty_card_closure), _ct(ct) {
|
||||
// Cannot yet substitute active_workers for n_par_threads
|
||||
// in the case where parallelism is being turned off by
|
||||
// setting n_par_threads to 0.
|
||||
_is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
|
||||
assert(!_is_par ||
|
||||
(GenCollectedHeap::heap()->n_par_threads() ==
|
||||
GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
|
||||
DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par) :
|
||||
_dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) {
|
||||
}
|
||||
|
||||
bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) {
|
||||
@ -272,7 +266,8 @@ void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
|
||||
}
|
||||
|
||||
void CardTableRS::younger_refs_in_space_iterate(Space* sp,
|
||||
OopsInGenClosure* cl) {
|
||||
OopsInGenClosure* cl,
|
||||
uint n_threads) {
|
||||
const MemRegion urasm = sp->used_region_at_save_marks();
|
||||
#ifdef ASSERT
|
||||
// Convert the assertion check to a warning if we are running
|
||||
@ -301,7 +296,7 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp,
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
#endif
|
||||
_ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
|
||||
_ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads);
|
||||
}
|
||||
|
||||
void CardTableRS::clear_into_younger(Generation* old_gen) {
|
||||
|
@ -56,7 +56,7 @@ class CardTableRS: public GenRemSet {
|
||||
|
||||
CardTableModRefBSForCTRS* _ct_bs;
|
||||
|
||||
virtual void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
|
||||
virtual void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
|
||||
|
||||
void verify_space(Space* s, HeapWord* gen_start);
|
||||
|
||||
@ -116,7 +116,7 @@ public:
|
||||
// Card table entries are cleared before application; "blk" is
|
||||
// responsible for dirtying if the oop is still older-to-younger after
|
||||
// closure application.
|
||||
void younger_refs_iterate(Generation* g, OopsInGenClosure* blk);
|
||||
void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads);
|
||||
|
||||
void inline_write_ref_field_gc(void* field, oop new_val) {
|
||||
jbyte* byte = _ct_bs->byte_for(field);
|
||||
@ -183,7 +183,7 @@ private:
|
||||
bool is_word_aligned(jbyte* entry);
|
||||
|
||||
public:
|
||||
ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct);
|
||||
ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par);
|
||||
void do_MemRegion(MemRegion mr);
|
||||
};
|
||||
|
||||
|
@ -160,8 +160,7 @@ void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
|
||||
// Memory state functions.
|
||||
|
||||
|
||||
CollectedHeap::CollectedHeap() : _n_par_threads(0)
|
||||
{
|
||||
CollectedHeap::CollectedHeap() {
|
||||
const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
|
||||
const size_t elements_per_word = HeapWordSize / sizeof(jint);
|
||||
_filler_array_max_size = align_object_size(filler_array_hdr_size() +
|
||||
|
@ -101,7 +101,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
protected:
|
||||
BarrierSet* _barrier_set;
|
||||
bool _is_gc_active;
|
||||
uint _n_par_threads;
|
||||
|
||||
unsigned int _total_collections; // ... started
|
||||
unsigned int _total_full_collections; // ... started
|
||||
@ -291,12 +290,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
}
|
||||
GCCause::Cause gc_cause() { return _gc_cause; }
|
||||
|
||||
// Number of threads currently working on GC tasks.
|
||||
uint n_par_threads() { return _n_par_threads; }
|
||||
|
||||
// May be overridden to set additional parallelism.
|
||||
virtual void set_par_threads(uint t) { _n_par_threads = t; };
|
||||
|
||||
// General obj/array allocation facilities.
|
||||
inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
|
||||
inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
|
||||
|
@ -561,16 +561,6 @@ HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab)
|
||||
return collector_policy()->satisfy_failed_allocation(size, is_tlab);
|
||||
}
|
||||
|
||||
void GenCollectedHeap::set_par_threads(uint t) {
|
||||
assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
|
||||
CollectedHeap::set_par_threads(t);
|
||||
set_n_termination(t);
|
||||
}
|
||||
|
||||
void GenCollectedHeap::set_n_termination(uint t) {
|
||||
_process_strong_tasks->set_n_threads(t);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
class AssertNonScavengableClosure: public OopClosure {
|
||||
public:
|
||||
@ -582,15 +572,13 @@ public:
|
||||
static AssertNonScavengableClosure assert_is_non_scavengable_closure;
|
||||
#endif
|
||||
|
||||
void GenCollectedHeap::process_roots(bool activate_scope,
|
||||
void GenCollectedHeap::process_roots(StrongRootsScope* scope,
|
||||
ScanningOption so,
|
||||
OopClosure* strong_roots,
|
||||
OopClosure* weak_roots,
|
||||
CLDClosure* strong_cld_closure,
|
||||
CLDClosure* weak_cld_closure,
|
||||
CodeBlobClosure* code_roots) {
|
||||
StrongRootsScope srs(activate_scope);
|
||||
|
||||
// General roots.
|
||||
assert(Threads::thread_claim_parity() != 0, "must have called prologue code");
|
||||
assert(code_roots != NULL, "code root closure should always be set");
|
||||
@ -609,7 +597,7 @@ void GenCollectedHeap::process_roots(bool activate_scope,
|
||||
// Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
|
||||
CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
|
||||
|
||||
bool is_par = n_par_threads() > 0;
|
||||
bool is_par = scope->n_threads() > 1;
|
||||
Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_clds_p, roots_from_code_p);
|
||||
|
||||
if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
|
||||
@ -669,9 +657,9 @@ void GenCollectedHeap::process_roots(bool activate_scope,
|
||||
|
||||
}
|
||||
|
||||
void GenCollectedHeap::gen_process_roots(int level,
|
||||
void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
|
||||
int level,
|
||||
bool younger_gens_as_roots,
|
||||
bool activate_scope,
|
||||
ScanningOption so,
|
||||
bool only_strong_roots,
|
||||
OopsInGenClosure* not_older_gens,
|
||||
@ -689,7 +677,7 @@ void GenCollectedHeap::gen_process_roots(int level,
|
||||
OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens;
|
||||
CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
|
||||
|
||||
process_roots(activate_scope, so,
|
||||
process_roots(scope, so,
|
||||
not_older_gens, weak_roots,
|
||||
cld_closure, weak_cld_closure,
|
||||
&mark_code_closure);
|
||||
@ -707,11 +695,11 @@ void GenCollectedHeap::gen_process_roots(int level,
|
||||
// older-gen scanning.
|
||||
if (level == 0) {
|
||||
older_gens->set_generation(_old_gen);
|
||||
rem_set()->younger_refs_iterate(_old_gen, older_gens);
|
||||
rem_set()->younger_refs_iterate(_old_gen, older_gens, scope->n_threads());
|
||||
older_gens->reset_generation();
|
||||
}
|
||||
|
||||
_process_strong_tasks->all_tasks_completed();
|
||||
_process_strong_tasks->all_tasks_completed(scope->n_threads());
|
||||
}
|
||||
|
||||
|
||||
|
@ -30,8 +30,9 @@
|
||||
#include "gc/shared/collectorPolicy.hpp"
|
||||
#include "gc/shared/generation.hpp"
|
||||
|
||||
class SubTasksDone;
|
||||
class FlexibleWorkGang;
|
||||
class StrongRootsScope;
|
||||
class SubTasksDone;
|
||||
|
||||
// A "GenCollectedHeap" is a CollectedHeap that uses generational
|
||||
// collection. It has two generations, young and old.
|
||||
@ -363,9 +364,6 @@ public:
|
||||
// asserted to be this type.
|
||||
static GenCollectedHeap* heap();
|
||||
|
||||
void set_par_threads(uint t);
|
||||
void set_n_termination(uint t);
|
||||
|
||||
// Invoke the "do_oop" method of one of the closures "not_older_gens"
|
||||
// or "older_gens" on root locations for the generation at
|
||||
// "level". (The "older_gens" closure is used for scanning references
|
||||
@ -385,7 +383,7 @@ public:
|
||||
};
|
||||
|
||||
private:
|
||||
void process_roots(bool activate_scope,
|
||||
void process_roots(StrongRootsScope* scope,
|
||||
ScanningOption so,
|
||||
OopClosure* strong_roots,
|
||||
OopClosure* weak_roots,
|
||||
@ -393,24 +391,13 @@ public:
|
||||
CLDClosure* weak_cld_closure,
|
||||
CodeBlobClosure* code_roots);
|
||||
|
||||
void gen_process_roots(int level,
|
||||
bool younger_gens_as_roots,
|
||||
bool activate_scope,
|
||||
ScanningOption so,
|
||||
OopsInGenClosure* not_older_gens,
|
||||
OopsInGenClosure* weak_roots,
|
||||
OopsInGenClosure* older_gens,
|
||||
CLDClosure* cld_closure,
|
||||
CLDClosure* weak_cld_closure,
|
||||
CodeBlobClosure* code_closure);
|
||||
|
||||
public:
|
||||
static const bool StrongAndWeakRoots = false;
|
||||
static const bool StrongRootsOnly = true;
|
||||
|
||||
void gen_process_roots(int level,
|
||||
void gen_process_roots(StrongRootsScope* scope,
|
||||
int level,
|
||||
bool younger_gens_as_roots,
|
||||
bool activate_scope,
|
||||
ScanningOption so,
|
||||
bool only_strong_roots,
|
||||
OopsInGenClosure* not_older_gens,
|
||||
|
@ -35,11 +35,6 @@ class CardTableModRefBS;
|
||||
class DefNewGeneration;
|
||||
class KlassRemSet;
|
||||
|
||||
template<class E, MEMFLAGS F, unsigned int N> class GenericTaskQueue;
|
||||
typedef GenericTaskQueue<oop, mtGC, TASKQUEUE_SIZE> OopTaskQueue;
|
||||
template<class T, MEMFLAGS F> class GenericTaskQueueSet;
|
||||
typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
|
||||
|
||||
// Closure for iterating roots from a particular generation
|
||||
// Note: all classes deriving from this MUST call this do_barrier
|
||||
// method at the end of their own do_oop method!
|
||||
|
@ -77,10 +77,11 @@ public:
|
||||
// 1) that are in objects allocated in "g" at the time of the last call
|
||||
// to "save_Marks", and
|
||||
// 2) that point to objects in younger generations.
|
||||
virtual void younger_refs_iterate(Generation* g, OopsInGenClosure* blk) = 0;
|
||||
virtual void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads) = 0;
|
||||
|
||||
virtual void younger_refs_in_space_iterate(Space* sp,
|
||||
OopsInGenClosure* cl) = 0;
|
||||
OopsInGenClosure* cl,
|
||||
uint n_threads) = 0;
|
||||
|
||||
// This method is used to notify the remembered set that "new_val" has
|
||||
// been written into "field" by the garbage collector.
|
||||
|
@ -293,9 +293,10 @@ void Generation::oop_iterate(ExtendedOopClosure* cl) {
|
||||
}
|
||||
|
||||
void Generation::younger_refs_in_space_iterate(Space* sp,
|
||||
OopsInGenClosure* cl) {
|
||||
OopsInGenClosure* cl,
|
||||
uint n_threads) {
|
||||
GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
|
||||
rs->younger_refs_in_space_iterate(sp, cl);
|
||||
rs->younger_refs_in_space_iterate(sp, cl, n_threads);
|
||||
}
|
||||
|
||||
class GenerationObjIterateClosure : public SpaceClosure {
|
||||
|
@ -122,7 +122,7 @@ class Generation: public CHeapObj<mtGC> {
|
||||
// The iteration is only over objects allocated at the start of the
|
||||
// iterations; objects allocated as a result of applying the closure are
|
||||
// not included.
|
||||
void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
|
||||
void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
|
||||
|
||||
public:
|
||||
// The set of possible generation kinds.
|
||||
@ -526,7 +526,7 @@ class Generation: public CHeapObj<mtGC> {
|
||||
// in the current generation that contain pointers to objects in younger
|
||||
// generations. Objects allocated since the last "save_marks" call are
|
||||
// excluded.
|
||||
virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0;
|
||||
virtual void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) = 0;
|
||||
|
||||
// Inform a generation that it longer contains references to objects
|
||||
// in any younger generation. [e.g. Because younger gens are empty,
|
||||
|
@ -181,7 +181,8 @@ void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
|
||||
|
||||
DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
|
||||
CardTableModRefBS::PrecisionStyle precision,
|
||||
HeapWord* boundary) {
|
||||
HeapWord* boundary,
|
||||
bool parallel) {
|
||||
return new DirtyCardToOopClosure(this, cl, precision, boundary);
|
||||
}
|
||||
|
||||
@ -260,7 +261,8 @@ ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
|
||||
DirtyCardToOopClosure*
|
||||
ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
|
||||
CardTableModRefBS::PrecisionStyle precision,
|
||||
HeapWord* boundary) {
|
||||
HeapWord* boundary,
|
||||
bool parallel) {
|
||||
return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
|
||||
}
|
||||
|
||||
|
@ -183,7 +183,8 @@ class Space: public CHeapObj<mtGC> {
|
||||
// operate. ResourceArea allocated.
|
||||
virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
|
||||
CardTableModRefBS::PrecisionStyle precision,
|
||||
HeapWord* boundary = NULL);
|
||||
HeapWord* boundary,
|
||||
bool parallel);
|
||||
|
||||
// If "p" is in the space, returns the address of the start of the
|
||||
// "block" that contains "p". We say "block" instead of "object" since
|
||||
@ -629,7 +630,8 @@ class ContiguousSpace: public CompactibleSpace {
|
||||
// Override.
|
||||
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
|
||||
CardTableModRefBS::PrecisionStyle precision,
|
||||
HeapWord* boundary = NULL);
|
||||
HeapWord* boundary,
|
||||
bool parallel);
|
||||
|
||||
// Apply "blk->do_oop" to the addresses of all reference fields in objects
|
||||
// starting with the _saved_mark_word, which was noted during a generation's
|
||||
|
@ -28,24 +28,18 @@
|
||||
#include "gc/shared/strongRootsScope.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
MarkScope::MarkScope(bool activate) : _active(activate) {
|
||||
if (_active) {
|
||||
nmethod::oops_do_marking_prologue();
|
||||
}
|
||||
MarkScope::MarkScope() {
|
||||
nmethod::oops_do_marking_prologue();
|
||||
}
|
||||
|
||||
MarkScope::~MarkScope() {
|
||||
if (_active) {
|
||||
nmethod::oops_do_marking_epilogue();
|
||||
}
|
||||
nmethod::oops_do_marking_epilogue();
|
||||
}
|
||||
|
||||
StrongRootsScope::StrongRootsScope(bool activate) : MarkScope(activate) {
|
||||
if (_active) {
|
||||
Threads::change_thread_claim_parity();
|
||||
// Zero the claimed high water mark in the StringTable
|
||||
StringTable::clear_parallel_claimed_index();
|
||||
}
|
||||
StrongRootsScope::StrongRootsScope(uint n_threads) : _n_threads(n_threads) {
|
||||
Threads::change_thread_claim_parity();
|
||||
// Zero the claimed high water mark in the StringTable
|
||||
StringTable::clear_parallel_claimed_index();
|
||||
}
|
||||
|
||||
StrongRootsScope::~StrongRootsScope() {
|
||||
|
@ -29,18 +29,21 @@
|
||||
|
||||
class MarkScope : public StackObj {
|
||||
protected:
|
||||
bool _active;
|
||||
public:
|
||||
MarkScope(bool activate = true);
|
||||
MarkScope();
|
||||
~MarkScope();
|
||||
};
|
||||
|
||||
// Sets up and tears down the required state for parallel root processing.
|
||||
|
||||
class StrongRootsScope : public MarkScope {
|
||||
// Number of threads participating in the roots processing.
|
||||
const uint _n_threads;
|
||||
|
||||
public:
|
||||
StrongRootsScope(bool activate = true);
|
||||
StrongRootsScope(uint n_threads);
|
||||
~StrongRootsScope();
|
||||
|
||||
uint n_threads() const { return _n_threads; }
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_SHARED_STRONGROOTSSCOPE_HPP
|
||||
|
@ -382,6 +382,8 @@ public:
|
||||
bool steal(uint queue_num, int* seed, E& t);
|
||||
|
||||
bool peek();
|
||||
|
||||
uint size() const { return _n; }
|
||||
};
|
||||
|
||||
template<class T, MEMFLAGS F> void
|
||||
|
@ -133,8 +133,6 @@ void WorkGang::run_task(AbstractGangTask* task) {
|
||||
}
|
||||
|
||||
void WorkGang::run_task(AbstractGangTask* task, uint no_of_parallel_workers) {
|
||||
task->set_for_termination(no_of_parallel_workers);
|
||||
|
||||
// This thread is executed by the VM thread which does not block
|
||||
// on ordinary MutexLocker's.
|
||||
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
|
||||
@ -434,7 +432,7 @@ void WorkGangBarrierSync::abort() {
|
||||
// SubTasksDone functions.
|
||||
|
||||
SubTasksDone::SubTasksDone(uint n) :
|
||||
_n_tasks(n), _n_threads(1), _tasks(NULL) {
|
||||
_n_tasks(n), _tasks(NULL) {
|
||||
_tasks = NEW_C_HEAP_ARRAY(uint, n, mtInternal);
|
||||
guarantee(_tasks != NULL, "alloc failure");
|
||||
clear();
|
||||
@ -444,12 +442,6 @@ bool SubTasksDone::valid() {
|
||||
return _tasks != NULL;
|
||||
}
|
||||
|
||||
void SubTasksDone::set_n_threads(uint t) {
|
||||
assert(_claimed == 0 || _threads_completed == _n_threads,
|
||||
"should not be called while tasks are being processed!");
|
||||
_n_threads = (t == 0 ? 1 : t);
|
||||
}
|
||||
|
||||
void SubTasksDone::clear() {
|
||||
for (uint i = 0; i < _n_tasks; i++) {
|
||||
_tasks[i] = 0;
|
||||
@ -477,7 +469,7 @@ bool SubTasksDone::is_task_claimed(uint t) {
|
||||
return res;
|
||||
}
|
||||
|
||||
void SubTasksDone::all_tasks_completed() {
|
||||
void SubTasksDone::all_tasks_completed(uint n_threads) {
|
||||
jint observed = _threads_completed;
|
||||
jint old;
|
||||
do {
|
||||
@ -485,7 +477,10 @@ void SubTasksDone::all_tasks_completed() {
|
||||
observed = Atomic::cmpxchg(old+1, &_threads_completed, old);
|
||||
} while (observed != old);
|
||||
// If this was the last thread checking in, clear the tasks.
|
||||
if (observed+1 == (jint)_n_threads) clear();
|
||||
uint adjusted_thread_count = (n_threads == 0 ? 1 : n_threads);
|
||||
if (observed + 1 == (jint)adjusted_thread_count) {
|
||||
clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -59,13 +59,6 @@ public:
|
||||
// The argument tells you which member of the gang you are.
|
||||
virtual void work(uint worker_id) = 0;
|
||||
|
||||
// This method configures the task for proper termination.
|
||||
// Some tasks do not have any requirements on termination
|
||||
// and may inherit this method that does nothing. Some
|
||||
// tasks do some coordination on termination and override
|
||||
// this method to implement that coordination.
|
||||
virtual void set_for_termination(uint active_workers) {};
|
||||
|
||||
// Debugging accessor for the name.
|
||||
const char* name() const PRODUCT_RETURN_(return NULL;);
|
||||
int counter() { return _counter; }
|
||||
@ -99,12 +92,9 @@ class AbstractGangTaskWOopQueues : public AbstractGangTask {
|
||||
OopTaskQueueSet* _queues;
|
||||
ParallelTaskTerminator _terminator;
|
||||
public:
|
||||
AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues) :
|
||||
AbstractGangTask(name), _queues(queues), _terminator(0, _queues) {}
|
||||
AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
|
||||
AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
|
||||
ParallelTaskTerminator* terminator() { return &_terminator; }
|
||||
virtual void set_for_termination(uint active_workers) {
|
||||
terminator()->reset_for_reuse(active_workers);
|
||||
}
|
||||
OopTaskQueueSet* queues() { return _queues; }
|
||||
};
|
||||
|
||||
@ -315,16 +305,20 @@ class FlexibleWorkGang: public WorkGang {
|
||||
uint _active_workers;
|
||||
public:
|
||||
// Constructor and destructor.
|
||||
// Initialize active_workers to a minimum value. Setting it to
|
||||
// the parameter "workers" will initialize it to a maximum
|
||||
// value which is not desirable.
|
||||
FlexibleWorkGang(const char* name, uint workers,
|
||||
bool are_GC_task_threads,
|
||||
bool are_ConcurrentGC_threads) :
|
||||
WorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads),
|
||||
_active_workers(UseDynamicNumberOfGCThreads ? 1U : ParallelGCThreads) {}
|
||||
// Accessors for fields
|
||||
virtual uint active_workers() const { return _active_workers; }
|
||||
_active_workers(UseDynamicNumberOfGCThreads ? 1U : workers) {}
|
||||
|
||||
// Accessors for fields.
|
||||
virtual uint active_workers() const {
|
||||
assert(_active_workers <= _total_workers,
|
||||
err_msg("_active_workers: %u > _total_workers: %u", _active_workers, _total_workers));
|
||||
assert(UseDynamicNumberOfGCThreads || _active_workers == _total_workers,
|
||||
"Unless dynamic should use total workers");
|
||||
return _active_workers;
|
||||
}
|
||||
void set_active_workers(uint v) {
|
||||
assert(v <= _total_workers,
|
||||
"Trying to set more workers active than there are");
|
||||
@ -390,12 +384,6 @@ public:
|
||||
class SubTasksDone: public CHeapObj<mtInternal> {
|
||||
uint* _tasks;
|
||||
uint _n_tasks;
|
||||
// _n_threads is used to determine when a sub task is done.
|
||||
// It does not control how many threads will execute the subtask
|
||||
// but must be initialized to the number that do execute the task
|
||||
// in order to correctly decide when the subtask is done (all the
|
||||
// threads working on the task have finished).
|
||||
uint _n_threads;
|
||||
uint _threads_completed;
|
||||
#ifdef ASSERT
|
||||
volatile uint _claimed;
|
||||
@ -413,11 +401,6 @@ public:
|
||||
// True iff the object is in a valid state.
|
||||
bool valid();
|
||||
|
||||
// Get/set the number of parallel threads doing the tasks to "t". Can only
|
||||
// be called before tasks start or after they are complete.
|
||||
uint n_threads() { return _n_threads; }
|
||||
void set_n_threads(uint t);
|
||||
|
||||
// Returns "false" if the task "t" is unclaimed, and ensures that task is
|
||||
// claimed. The task "t" is required to be within the range of "this".
|
||||
bool is_task_claimed(uint t);
|
||||
@ -426,7 +409,9 @@ public:
|
||||
// tasks that it will try to claim. Every thread in the parallel task
|
||||
// must execute this. (When the last thread does so, the task array is
|
||||
// cleared.)
|
||||
void all_tasks_completed();
|
||||
//
|
||||
// n_threads - Number of threads executing the sub-tasks.
|
||||
void all_tasks_completed(uint n_threads);
|
||||
|
||||
// Destructor.
|
||||
~SubTasksDone();
|
||||
|
@ -381,9 +381,4 @@ template <> class Devirtualizer<false> {
|
||||
template <class OopClosureType> static bool do_metadata(OopClosureType* closure);
|
||||
};
|
||||
|
||||
// Helper to convert the oop iterate macro suffixes into bool values that can be used by template functions.
|
||||
#define nvs_nv_to_bool true
|
||||
#define nvs_v_to_bool false
|
||||
#define nvs_to_bool(nv_suffix) nvs##nv_suffix##_to_bool
|
||||
|
||||
#endif // SHARE_VM_MEMORY_ITERATOR_HPP
|
||||
|
@ -144,4 +144,36 @@ class ArrayKlass: public Klass {
|
||||
void oop_verify_on(oop obj, outputStream* st);
|
||||
};
|
||||
|
||||
// Array oop iteration macros for declarations.
|
||||
// Used to generate the declarations in the *ArrayKlass header files.
|
||||
|
||||
#define OOP_OOP_ITERATE_DECL_RANGE(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end);
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Named NO_BACKWARDS because the definition used by *ArrayKlass isn't reversed, see below.
|
||||
#define OOP_OOP_ITERATE_DECL_NO_BACKWARDS(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
||||
// Array oop iteration macros for definitions.
|
||||
// Used to generate the definitions in the *ArrayKlass.inline.hpp files.
|
||||
|
||||
#define OOP_OOP_ITERATE_DEFN_RANGE(KlassType, OopClosureType, nv_suffix) \
|
||||
\
|
||||
int KlassType::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) { \
|
||||
return oop_oop_iterate_range<nvs_to_bool(nv_suffix)>(obj, closure, start, end); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix) \
|
||||
int KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
/* No reverse implementation ATM. */ \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(KlassType, OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
#endif // SHARE_VM_OOPS_ARRAYKLASS_HPP
|
||||
|
@ -87,19 +87,12 @@ public:
|
||||
|
||||
public:
|
||||
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \
|
||||
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
};
|
||||
|
@ -78,33 +78,9 @@ inline int InstanceClassLoaderKlass::oop_oop_iterate_bounded(oop obj, OopClosure
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceClassLoaderKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceClassLoaderKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceClassLoaderKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr); \
|
||||
}
|
||||
|
||||
#define ALL_INSTANCE_CLASS_LOADER_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \
|
||||
InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
OOP_OOP_ITERATE_DEFN( InstanceClassLoaderKlass, OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceClassLoaderKlass, OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceClassLoaderKlass, OopClosureType, nv_suffix)
|
||||
|
||||
#endif // SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP
|
||||
|
@ -1084,19 +1084,12 @@ class InstanceKlass: public Klass {
|
||||
|
||||
public:
|
||||
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \
|
||||
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
u2 idnum_allocated_count() const { return _idnum_allocated_count; }
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -187,29 +188,9 @@ INLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closu
|
||||
|
||||
#undef INLINE
|
||||
|
||||
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
int InstanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr); \
|
||||
}
|
||||
|
||||
#define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
InstanceKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
InstanceKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \
|
||||
InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
OOP_OOP_ITERATE_DEFN( InstanceKlass, OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceKlass, OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceKlass, OopClosureType, nv_suffix)
|
||||
|
||||
#endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
|
||||
|
@ -149,19 +149,12 @@ class InstanceMirrorKlass: public InstanceKlass {
|
||||
|
||||
public:
|
||||
|
||||
#define InstanceMirrorKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \
|
||||
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
};
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -132,33 +133,9 @@ int InstanceMirrorKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closur
|
||||
return oop_size(obj);
|
||||
}
|
||||
|
||||
|
||||
#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceMirrorKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceMirrorKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
|
||||
#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceMirrorKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr); \
|
||||
}
|
||||
|
||||
#define ALL_INSTANCE_MIRROR_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \
|
||||
InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
OOP_OOP_ITERATE_DEFN( InstanceMirrorKlass, OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceMirrorKlass, OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceMirrorKlass, OopClosureType, nv_suffix)
|
||||
|
||||
#endif // SHARE_VM_OOPS_INSTANCEMIRRORKLASS_INLINE_HPP
|
||||
|
@ -119,19 +119,12 @@ private:
|
||||
|
||||
public:
|
||||
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \
|
||||
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_BACKWARDS)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_BACKWARDS)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
static void release_and_notify_pending_list_lock(BasicLock *pending_list_basic_lock);
|
||||
|
@ -141,34 +141,9 @@ int InstanceRefKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure,
|
||||
|
||||
// Macro to define InstanceRefKlass::oop_oop_iterate for virtual/nonvirtual for
|
||||
// all closures. Macros calling macros above for each oop size.
|
||||
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceRefKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceRefKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceRefKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr); \
|
||||
}
|
||||
|
||||
#define ALL_INSTANCE_REF_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
InstanceRefKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \
|
||||
InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
|
||||
OOP_OOP_ITERATE_DEFN( InstanceRefKlass, OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceRefKlass, OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceRefKlass, OopClosureType, nv_suffix)
|
||||
|
||||
#endif // SHARE_VM_OOPS_INSTANCEREFKLASS_INLINE_HPP
|
||||
|
@ -583,20 +583,20 @@ protected:
|
||||
|
||||
// Iterators specialized to particular subtypes
|
||||
// of ExtendedOopClosure, to avoid closure virtual calls.
|
||||
#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
virtual int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) = 0; \
|
||||
/* Iterates "closure" over all the oops in "obj" (of type "this") within "mr". */ \
|
||||
virtual int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) = 0;
|
||||
#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
virtual int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) = 0; \
|
||||
/* Iterates "closure" over all the oops in "obj" (of type "this") within "mr". */ \
|
||||
virtual int oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) = 0;
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define Klass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
#define Klass_OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix) \
|
||||
virtual int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) = 0;
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL_BACKWARDS)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
virtual void array_klasses_do(void f(Klass* k)) {}
|
||||
@ -651,4 +651,44 @@ protected:
|
||||
void klass_update_barrier_set_pre(oop* p, oop v);
|
||||
};
|
||||
|
||||
// Helper to convert the oop iterate macro suffixes into bool values that can be used by template functions.
|
||||
#define nvs_nv_to_bool true
|
||||
#define nvs_v_to_bool false
|
||||
#define nvs_to_bool(nv_suffix) nvs##nv_suffix##_to_bool
|
||||
|
||||
// Oop iteration macros for declarations.
|
||||
// Used to generate declarations in the *Klass header files.
|
||||
|
||||
#define OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \
|
||||
int oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define OOP_OOP_ITERATE_DECL_BACKWARDS(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
||||
// Oop iteration macros for definitions.
|
||||
// Used to generate definitions in the *Klass.inline.hpp files.
|
||||
|
||||
#define OOP_OOP_ITERATE_DEFN(KlassType, OopClosureType, nv_suffix) \
|
||||
int KlassType::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix) \
|
||||
int KlassType::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define OOP_OOP_ITERATE_DEFN_BACKWARDS(KlassType, OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
#define OOP_OOP_ITERATE_DEFN_BOUNDED(KlassType, OopClosureType, nv_suffix) \
|
||||
int KlassType::oop_oop_iterate_bounded##nv_suffix(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr); \
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_OOPS_KLASS_HPP
|
||||
|
@ -163,22 +163,14 @@ class ObjArrayKlass : public ArrayKlass {
|
||||
|
||||
public:
|
||||
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \
|
||||
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, \
|
||||
MemRegion mr); \
|
||||
int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* blk, \
|
||||
int start, int end);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_RANGE)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_RANGE)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_NO_BACKWARDS)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_NO_BACKWARDS)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// JVM support
|
||||
|
@ -27,6 +27,8 @@
|
||||
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "oops/arrayKlass.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -149,41 +151,10 @@ int ObjArrayKlass::oop_oop_iterate_range(oop obj, OopClosureType* closure, int s
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
int ObjArrayKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
/* No reverse implementation ATM. */ \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int ObjArrayKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr); \
|
||||
}
|
||||
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int ObjArrayKlass::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) { \
|
||||
return oop_oop_iterate_range<nvs_to_bool(nv_suffix)>(obj, closure, start, end); \
|
||||
}
|
||||
|
||||
|
||||
#define ALL_OBJ_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
ObjArrayKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \
|
||||
ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r( OopClosureType, nv_suffix)
|
||||
|
||||
#define ALL_OBJ_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN( ObjArrayKlass, OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN_BOUNDED( ObjArrayKlass, OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN_RANGE( ObjArrayKlass, OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(ObjArrayKlass, OopClosureType, nv_suffix)
|
||||
|
||||
#endif // SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP
|
||||
|
@ -741,7 +741,7 @@ inline int oopDesc::oop_iterate(OopClosureType* blk) { \
|
||||
} \
|
||||
\
|
||||
inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \
|
||||
return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \
|
||||
return klass()->oop_oop_iterate_bounded##nv_suffix(this, blk, mr); \
|
||||
}
|
||||
|
||||
|
||||
|
@ -92,24 +92,24 @@ class TypeArrayKlass : public ArrayKlass {
|
||||
// The implementation used by all oop_oop_iterate functions in TypeArrayKlasses.
|
||||
inline int oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure);
|
||||
|
||||
// Wraps oop_oop_iterate_impl to conform to macros.
|
||||
template <bool nv, typename OopClosureType>
|
||||
inline int oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
|
||||
// Wraps oop_oop_iterate_impl to conform to macros.
|
||||
template <bool nv, typename OopClosureType>
|
||||
inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
public:
|
||||
|
||||
#define TypeArrayKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \
|
||||
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, \
|
||||
MemRegion mr); \
|
||||
int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, \
|
||||
int start, int end);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(TypeArrayKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(TypeArrayKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_RANGE)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_RANGE)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_OOP_ITERATE_DECL_NO_BACKWARDS)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_OOP_ITERATE_DECL_NO_BACKWARDS)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
||||
|
@ -25,6 +25,8 @@
|
||||
#ifndef SHARE_VM_OOPS_TYPEARRAYKLASS_INLINE_HPP
|
||||
#define SHARE_VM_OOPS_TYPEARRAYKLASS_INLINE_HPP
|
||||
|
||||
#include "oops/arrayKlass.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/typeArrayKlass.hpp"
|
||||
#include "oops/typeArrayOop.hpp"
|
||||
@ -39,35 +41,19 @@ inline int TypeArrayKlass::oop_oop_iterate_impl(oop obj, ExtendedOopClosure* clo
|
||||
return t->object_size();
|
||||
}
|
||||
|
||||
#define TypeArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int TypeArrayKlass:: \
|
||||
oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate_impl(obj, closure); \
|
||||
template <bool nv, typename OopClosureType>
|
||||
int TypeArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
return oop_oop_iterate_impl(obj, closure);
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int TypeArrayKlass:: \
|
||||
oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate_impl(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
|
||||
#define TypeArrayKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int TypeArrayKlass:: \
|
||||
oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
return oop_oop_iterate_impl(obj, closure); \
|
||||
template <bool nv, typename OopClosureType>
|
||||
int TypeArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
return oop_oop_iterate_impl(obj, closure);
|
||||
}
|
||||
|
||||
#define ALL_TYPE_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
TypeArrayKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
TypeArrayKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \
|
||||
TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
#define ALL_TYPE_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN( TypeArrayKlass, OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN_BOUNDED( TypeArrayKlass, OopClosureType, nv_suffix) \
|
||||
OOP_OOP_ITERATE_DEFN_NO_BACKWARDS(TypeArrayKlass, OopClosureType, nv_suffix)
|
||||
|
||||
#endif // SHARE_VM_OOPS_TYPEARRAYKLASS_INLINE_HPP
|
||||
|
@ -1278,10 +1278,8 @@ void Arguments::set_cms_and_parnew_gc_flags() {
|
||||
|
||||
// Preferred young gen size for "short" pauses:
|
||||
// upper bound depends on # of threads and NewRatio.
|
||||
const uintx parallel_gc_threads =
|
||||
(ParallelGCThreads == 0 ? 1 : ParallelGCThreads);
|
||||
const size_t preferred_max_new_size_unaligned =
|
||||
MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * parallel_gc_threads));
|
||||
MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
|
||||
size_t preferred_max_new_size =
|
||||
align_size_up(preferred_max_new_size_unaligned, os::vm_page_size());
|
||||
|
||||
|
@ -261,7 +261,12 @@ bool ElfFile::specifies_noexecstack() {
|
||||
}
|
||||
}
|
||||
}
|
||||
// AARCH64 defaults to noexecstack. All others default to execstack.
|
||||
#ifdef AARCH64
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -41,10 +41,32 @@ public class StableConfiguration {
|
||||
System.out.println("Server Compiler: " + get());
|
||||
}
|
||||
|
||||
// The method 'get' below returns true if the method is server compiled
|
||||
// and is used by the Stable tests to determine whether methods in
|
||||
// general are being server compiled or not as the -XX:+FoldStableValues
|
||||
// option is only applicable to -server.
|
||||
//
|
||||
// On aarch64 we DeOptimize when patching. This means that when the
|
||||
// method is compiled as a result of -Xcomp it DeOptimizes immediately.
|
||||
// The result is that getMethodCompilationLevel returns 0. This means
|
||||
// the method returns true based on java.vm.name.
|
||||
//
|
||||
// However when the tests are run with -XX:+TieredCompilation and
|
||||
// -XX:TieredStopAtLevel=1 this fails because methods will always
|
||||
// be client compiled.
|
||||
//
|
||||
// Solution is to add a simple method 'get1' which should never be
|
||||
// DeOpted and use that to determine the compilation level instead.
|
||||
static void get1() {
|
||||
}
|
||||
|
||||
|
||||
|
||||
// ::get() is among immediately compiled methods.
|
||||
static boolean get() {
|
||||
try {
|
||||
Method m = StableConfiguration.class.getDeclaredMethod("get");
|
||||
get1();
|
||||
Method m = StableConfiguration.class.getDeclaredMethod("get1");
|
||||
int level = WB.getMethodCompilationLevel(m);
|
||||
if (level > 0) {
|
||||
return (level == 4);
|
||||
|
57
hotspot/test/serviceability/sa/TestClassLoaderStats.java
Normal file
57
hotspot/test/serviceability/sa/TestClassLoaderStats.java
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import jdk.test.lib.Platform;
|
||||
import jdk.test.lib.ProcessTools;
|
||||
import jdk.test.lib.OutputAnalyzer;
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @library /testlibrary
|
||||
* @build jdk.test.lib.*
|
||||
* @run main TestClassLoaderStats
|
||||
*/
|
||||
public class TestClassLoaderStats {
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
if (!Platform.shouldSAAttach()) {
|
||||
System.out.println("SA attach not expected to work - test skipped.");
|
||||
return;
|
||||
}
|
||||
|
||||
ProcessBuilder processBuilder = ProcessTools.createJavaProcessBuilder(
|
||||
"-XX:+UsePerfData",
|
||||
"sun.jvm.hotspot.tools.ClassLoaderStats",
|
||||
Integer.toString(ProcessTools.getProcessId()));
|
||||
OutputAnalyzer output = ProcessTools.executeProcess(processBuilder);
|
||||
System.out.println(output.getOutput());
|
||||
|
||||
output.shouldHaveExitValue(0);
|
||||
output.shouldContain("Debugger attached successfully.");
|
||||
// The class loader stats header needs to be presented in the output:
|
||||
output.shouldMatch("class_loader\\W+classes\\W+bytes\\W+parent_loader\\W+alive?\\W+type");
|
||||
output.stderrShouldNotMatch("[E|e]xception");
|
||||
output.stderrShouldNotMatch("[E|e]rror");
|
||||
}
|
||||
|
||||
}
|
55
hotspot/test/serviceability/sa/TestStackTrace.java
Normal file
55
hotspot/test/serviceability/sa/TestStackTrace.java
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import jdk.test.lib.OutputAnalyzer;
|
||||
import jdk.test.lib.Platform;
|
||||
import jdk.test.lib.ProcessTools;
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @library /testlibrary
|
||||
* @build jdk.test.lib.*
|
||||
* @run main TestStackTrace
|
||||
*/
|
||||
public class TestStackTrace {
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
if (!Platform.shouldSAAttach()) {
|
||||
System.out.println("SA attach not expected to work - test skipped.");
|
||||
return;
|
||||
}
|
||||
|
||||
ProcessBuilder processBuilder = ProcessTools.createJavaProcessBuilder(
|
||||
"-XX:+UsePerfData",
|
||||
"sun.jvm.hotspot.tools.StackTrace",
|
||||
Integer.toString(ProcessTools.getProcessId()));
|
||||
OutputAnalyzer output = ProcessTools.executeProcess(processBuilder);
|
||||
System.out.println(output.getOutput());
|
||||
|
||||
output.shouldHaveExitValue(0);
|
||||
output.shouldContain("Debugger attached successfully.");
|
||||
output.stderrShouldNotMatch("[E|e]xception");
|
||||
output.stderrShouldNotMatch("[E|e]rror");
|
||||
}
|
||||
|
||||
}
|
@ -309,3 +309,4 @@ fd3281c400347088b36aeb16273aa679d53a81a4 jdk9-b63
|
||||
7de8d036ad0980d988d1b9b4b4e6be555d9fbf98 jdk9-b64
|
||||
ed94f3e7ba6bbfec0772de6d24e39543e13f6d88 jdk9-b65
|
||||
4fbcca8ab812198c7fb747ea7b213b6e404f36e9 jdk9-b66
|
||||
1abd45df5480a04bff98fba1851d66a5230e67d4 jdk9-b67
|
||||
|
@ -166,6 +166,16 @@ SUNWprivate_1.1 {
|
||||
Java_java_lang_Package_getSystemPackage0;
|
||||
Java_java_lang_Package_getSystemPackages0;
|
||||
Java_java_lang_ProcessEnvironment_environ;
|
||||
Java_java_lang_ProcessHandleImpl_getCurrentPid0;
|
||||
Java_java_lang_ProcessHandleImpl_parent0;
|
||||
Java_java_lang_ProcessHandleImpl_isAlive0;
|
||||
Java_java_lang_ProcessHandleImpl_getProcessPids0;
|
||||
Java_java_lang_ProcessHandleImpl_destroy0;
|
||||
Java_java_lang_ProcessHandleImpl_waitForProcessExit0;
|
||||
Java_java_lang_ProcessHandleImpl_00024Info_initIDs;
|
||||
Java_java_lang_ProcessHandleImpl_00024Info_info0;
|
||||
Java_java_lang_ProcessImpl_init;
|
||||
Java_java_lang_ProcessImpl_forkAndExec;
|
||||
Java_java_lang_reflect_Array_get;
|
||||
Java_java_lang_reflect_Array_getBoolean;
|
||||
Java_java_lang_reflect_Array_getByte;
|
||||
@ -214,10 +224,6 @@ SUNWprivate_1.1 {
|
||||
Java_java_lang_Throwable_fillInStackTrace;
|
||||
Java_java_lang_Throwable_getStackTraceDepth;
|
||||
Java_java_lang_Throwable_getStackTraceElement;
|
||||
Java_java_lang_ProcessImpl_init;
|
||||
Java_java_lang_ProcessImpl_waitForProcessExit;
|
||||
Java_java_lang_ProcessImpl_forkAndExec;
|
||||
Java_java_lang_ProcessImpl_destroyProcess;
|
||||
Java_java_nio_Bits_copyFromShortArray;
|
||||
Java_java_nio_Bits_copyToShortArray;
|
||||
Java_java_nio_Bits_copyFromIntArray;
|
||||
@ -277,7 +283,7 @@ SUNWprivate_1.1 {
|
||||
|
||||
Java_jdk_internal_jimage_concurrent_ConcurrentPReader_initIDs;
|
||||
Java_jdk_internal_jimage_concurrent_ConcurrentPReader_pread;
|
||||
|
||||
|
||||
# ZipFile.c needs this one
|
||||
throwFileNotFoundException;
|
||||
# zip_util.c needs this one
|
||||
|
@ -42,7 +42,7 @@ SUNWprivate_1.1 {
|
||||
Java_java_net_Inet4Address_init;
|
||||
Java_java_net_Inet6Address_init;
|
||||
Java_java_net_PlainDatagramSocketImpl_setTTL;
|
||||
Java_java_net_PlainDatagramSocketImpl_socketSetOption;
|
||||
Java_java_net_PlainDatagramSocketImpl_socketSetOption0;
|
||||
Java_java_net_PlainDatagramSocketImpl_bind0;
|
||||
Java_java_net_PlainSocketImpl_socketAccept;
|
||||
Java_java_net_DatagramPacket_init;
|
||||
@ -73,7 +73,7 @@ SUNWprivate_1.1 {
|
||||
Java_java_net_SocketOutputStream_init;
|
||||
Java_java_net_PlainDatagramSocketImpl_peek;
|
||||
Java_java_net_PlainDatagramSocketImpl_peekData;
|
||||
Java_java_net_PlainSocketImpl_socketSetOption;
|
||||
Java_java_net_PlainSocketImpl_socketSetOption0;
|
||||
Java_java_net_PlainSocketImpl_socketSendUrgentData;
|
||||
Java_java_net_PlainDatagramSocketImpl_datagramSocketCreate;
|
||||
Java_java_net_PlainSocketImpl_socketGetOption;
|
||||
|
@ -0,0 +1,401 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "jni.h"
|
||||
#include "jni_util.h"
|
||||
#include "java_lang_ProcessHandleImpl.h"
|
||||
#include "java_lang_ProcessHandleImpl_Info.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <sys/sysctl.h>
|
||||
|
||||
/**
|
||||
* Implementations of ProcessHandleImpl functions for MAC OS X;
|
||||
* are NOT common to all Unix variants.
|
||||
*/
|
||||
|
||||
static void getStatInfo(JNIEnv *env, jobject jinfo, pid_t pid);
|
||||
static void getCmdlineInfo(JNIEnv *env, jobject jinfo, pid_t pid);
|
||||
|
||||
/*
|
||||
* Common Unix function to lookup the uid and return the user name.
|
||||
*/
|
||||
extern jstring uidToUser(JNIEnv* env, uid_t uid);
|
||||
|
||||
/* Field id for jString 'command' in java.lang.ProcessHandle.Info */
|
||||
static jfieldID ProcessHandleImpl_Info_commandID;
|
||||
|
||||
/* Field id for jString[] 'arguments' in java.lang.ProcessHandle.Info */
|
||||
static jfieldID ProcessHandleImpl_Info_argumentsID;
|
||||
|
||||
/* Field id for jlong 'totalTime' in java.lang.ProcessHandle.Info */
|
||||
static jfieldID ProcessHandleImpl_Info_totalTimeID;
|
||||
|
||||
/* Field id for jlong 'startTime' in java.lang.ProcessHandle.Info */
|
||||
static jfieldID ProcessHandleImpl_Info_startTimeID;
|
||||
|
||||
/* Field id for jString 'user' in java.lang.ProcessHandleImpl.Info */
|
||||
static jfieldID ProcessHandleImpl_Info_userID;
|
||||
|
||||
/* static value for clock ticks per second. */
|
||||
static long clock_ticks_per_second;
|
||||
|
||||
/**************************************************************
|
||||
* Static method to initialize field IDs and the ticks per second rate.
|
||||
*
|
||||
* Class: java_lang_ProcessHandleImpl_Info
|
||||
* Method: initIDs
|
||||
* Signature: ()V
|
||||
*/
|
||||
JNIEXPORT void JNICALL Java_java_lang_ProcessHandleImpl_00024Info_initIDs
|
||||
(JNIEnv *env, jclass clazz) {
|
||||
|
||||
CHECK_NULL(ProcessHandleImpl_Info_commandID =
|
||||
(*env)->GetFieldID(env, clazz, "command", "Ljava/lang/String;"));
|
||||
CHECK_NULL(ProcessHandleImpl_Info_argumentsID =
|
||||
(*env)->GetFieldID(env, clazz, "arguments", "[Ljava/lang/String;"));
|
||||
CHECK_NULL(ProcessHandleImpl_Info_totalTimeID =
|
||||
(*env)->GetFieldID(env, clazz, "totalTime", "J"));
|
||||
CHECK_NULL(ProcessHandleImpl_Info_startTimeID =
|
||||
(*env)->GetFieldID(env, clazz, "startTime", "J"));
|
||||
CHECK_NULL(ProcessHandleImpl_Info_userID =
|
||||
(*env)->GetFieldID(env, clazz, "user", "Ljava/lang/String;"));
|
||||
clock_ticks_per_second = sysconf(_SC_CLK_TCK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the parent pid of the requested pid.
|
||||
*
|
||||
* Class: java_lang_ProcessHandleImpl
|
||||
* Method: parent0
|
||||
* Signature: (J)J
|
||||
*/
|
||||
JNIEXPORT jlong JNICALL Java_java_lang_ProcessHandleImpl_parent0
|
||||
(JNIEnv *env, jobject obj, jlong jpid) {
|
||||
pid_t pid = (pid_t) jpid;
|
||||
pid_t ppid = -1;
|
||||
|
||||
if (pid == getpid()) {
|
||||
ppid = getppid();
|
||||
} else {
|
||||
const pid_t pid = (pid_t) jpid;
|
||||
struct kinfo_proc kp;
|
||||
size_t bufSize = sizeof kp;
|
||||
|
||||
// Read the process info for the specific pid
|
||||
int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
|
||||
if (sysctl(mib, 4, &kp, &bufSize, NULL, 0) < 0) {
|
||||
JNU_ThrowByNameWithLastError(env,
|
||||
"java/lang/RuntimeException", "sysctl failed");
|
||||
return -1;
|
||||
}
|
||||
ppid = (bufSize > 0 && kp.kp_proc.p_pid == pid) ? kp.kp_eproc.e_ppid : -1;
|
||||
}
|
||||
return (jlong) ppid;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the children of the requested pid and optionally each parent.
|
||||
*
|
||||
* Class: java_lang_ProcessHandleImpl
|
||||
* Method: getProcessPids0
|
||||
* Signature: (J[J[J)I
|
||||
*
|
||||
* Use sysctl to accumulate any process whose parent pid is zero or matches.
|
||||
* The resulting pids are stored into the array of longs.
|
||||
* The number of pids is returned if they all fit.
|
||||
* If the parentArray is non-null, store the parent pid.
|
||||
* If the array is too short, excess pids are not stored and
|
||||
* the desired length is returned.
|
||||
*/
|
||||
JNIEXPORT jint JNICALL Java_java_lang_ProcessHandleImpl_getProcessPids0
|
||||
(JNIEnv *env, jclass clazz, jlong jpid,
|
||||
jlongArray jarray, jlongArray jparentArray)
|
||||
{
|
||||
size_t count = 0;
|
||||
jlong* pids = NULL;
|
||||
jlong* ppids = NULL;
|
||||
size_t parentArraySize = 0;
|
||||
size_t arraySize = 0;
|
||||
size_t bufSize = 0;
|
||||
pid_t pid = (pid_t) jpid;
|
||||
|
||||
arraySize = (*env)->GetArrayLength(env, jarray);
|
||||
JNU_CHECK_EXCEPTION_RETURN(env, -1);
|
||||
if (jparentArray != NULL) {
|
||||
parentArraySize = (*env)->GetArrayLength(env, jparentArray);
|
||||
JNU_CHECK_EXCEPTION_RETURN(env, -1);
|
||||
|
||||
if (arraySize != parentArraySize) {
|
||||
JNU_ThrowIllegalArgumentException(env, "array sizes not equal");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Get buffer size needed to read all processes
|
||||
int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_ALL, 0};
|
||||
if (sysctl(mib, 4, NULL, &bufSize, NULL, 0) < 0) {
|
||||
JNU_ThrowByNameWithLastError(env,
|
||||
"java/lang/RuntimeException", "sysctl failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Allocate buffer big enough for all processes
|
||||
void *buffer = malloc(bufSize);
|
||||
if (buffer == NULL) {
|
||||
JNU_ThrowOutOfMemoryError(env, "malloc failed");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Read process info for all processes
|
||||
if (sysctl(mib, 4, buffer, &bufSize, NULL, 0) < 0) {
|
||||
JNU_ThrowByNameWithLastError(env,
|
||||
"java/lang/RuntimeException", "sysctl failed");
|
||||
free(buffer);
|
||||
return -1;
|
||||
}
|
||||
|
||||
do { // Block to break out of on Exception
|
||||
struct kinfo_proc *kp = (struct kinfo_proc *) buffer;
|
||||
unsigned long nentries = bufSize / sizeof (struct kinfo_proc);
|
||||
long i;
|
||||
|
||||
pids = (*env)->GetLongArrayElements(env, jarray, NULL);
|
||||
if (pids == NULL) {
|
||||
break;
|
||||
}
|
||||
if (jparentArray != NULL) {
|
||||
ppids = (*env)->GetLongArrayElements(env, jparentArray, NULL);
|
||||
if (ppids == NULL) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Process each entry in the buffer
|
||||
for (i = nentries; --i >= 0; ++kp) {
|
||||
if (pid == 0 || kp->kp_eproc.e_ppid == pid) {
|
||||
if (count < arraySize) {
|
||||
// Only store if it fits
|
||||
pids[count] = (jlong) kp->kp_proc.p_pid;
|
||||
if (ppids != NULL) {
|
||||
// Store the parentPid
|
||||
ppids[count] = (jlong) kp->kp_eproc.e_ppid;
|
||||
}
|
||||
}
|
||||
count++; // Count to tabulate size needed
|
||||
}
|
||||
}
|
||||
} while (0);
|
||||
|
||||
if (pids != NULL) {
|
||||
(*env)->ReleaseLongArrayElements(env, jarray, pids, 0);
|
||||
}
|
||||
if (ppids != NULL) {
|
||||
(*env)->ReleaseLongArrayElements(env, jparentArray, ppids, 0);
|
||||
}
|
||||
|
||||
free(buffer);
|
||||
// If more pids than array had size for; count will be greater than array size
|
||||
return count;
|
||||
}
|
||||
|
||||
/**************************************************************
|
||||
* Implementation of ProcessHandleImpl_Info native methods.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Fill in the Info object from the OS information about the process.
|
||||
*
|
||||
* Class: java_lang_ProcessHandleImpl
|
||||
* Method: info0
|
||||
* Signature: (J)I
|
||||
*/
|
||||
JNIEXPORT void JNICALL Java_java_lang_ProcessHandleImpl_00024Info_info0
|
||||
(JNIEnv *env, jobject jinfo, jlong jpid) {
|
||||
pid_t pid = (pid_t) jpid;
|
||||
getStatInfo(env, jinfo, pid);
|
||||
getCmdlineInfo(env, jinfo, pid);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read /proc/<pid>/stat and fill in the fields of the Info object.
|
||||
* The executable name, plus the user, system, and start times are gathered.
|
||||
*/
|
||||
static void getStatInfo(JNIEnv *env, jobject jinfo, pid_t jpid) {
|
||||
jlong totalTime; // nanoseconds
|
||||
unsigned long long startTime; // microseconds
|
||||
|
||||
const pid_t pid = (pid_t) jpid;
|
||||
struct kinfo_proc kp;
|
||||
size_t bufSize = sizeof kp;
|
||||
|
||||
// Read the process info for the specific pid
|
||||
int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
|
||||
|
||||
if (sysctl(mib, 4, &kp, &bufSize, NULL, 0) < 0) {
|
||||
if (errno == EINVAL) {
|
||||
return;
|
||||
} else {
|
||||
JNU_ThrowByNameWithLastError(env,
|
||||
"java/lang/RuntimeException", "sysctl failed");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Convert the UID to the username
|
||||
jstring name = NULL;
|
||||
CHECK_NULL((name = uidToUser(env, kp.kp_eproc.e_ucred.cr_uid)));
|
||||
(*env)->SetObjectField(env, jinfo, ProcessHandleImpl_Info_userID, name);
|
||||
JNU_CHECK_EXCEPTION(env);
|
||||
|
||||
startTime = kp.kp_proc.p_starttime.tv_sec * 1000 +
|
||||
kp.kp_proc.p_starttime.tv_usec / 1000;
|
||||
|
||||
(*env)->SetLongField(env, jinfo, ProcessHandleImpl_Info_startTimeID, startTime);
|
||||
JNU_CHECK_EXCEPTION(env);
|
||||
|
||||
// Get cputime if for current process
|
||||
if (pid == getpid()) {
|
||||
struct rusage usage;
|
||||
if (getrusage(RUSAGE_SELF, &usage) != 0) {
|
||||
return;
|
||||
}
|
||||
jlong microsecs =
|
||||
usage.ru_utime.tv_sec * 1000 * 1000 + usage.ru_utime.tv_usec +
|
||||
usage.ru_stime.tv_sec * 1000 * 1000 + usage.ru_stime.tv_usec;
|
||||
totalTime = microsecs * 1000;
|
||||
(*env)->SetLongField(env, jinfo, ProcessHandleImpl_Info_totalTimeID, totalTime);
|
||||
JNU_CHECK_EXCEPTION(env);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct the argument array by parsing the arguments from the sequence of arguments.
|
||||
*/
|
||||
static int fillArgArray(JNIEnv *env, jobject jinfo, int nargs,
|
||||
const char *cp, const char *argsEnd) {
|
||||
jstring str = NULL;
|
||||
jobject argsArray;
|
||||
int i;
|
||||
|
||||
if (nargs < 1) {
|
||||
return 0;
|
||||
}
|
||||
// Create a String array for nargs-1 elements
|
||||
CHECK_NULL_RETURN((argsArray = (*env)->NewObjectArray(env,
|
||||
nargs - 1, JNU_ClassString(env), NULL)), -1);
|
||||
|
||||
for (i = 0; i < nargs - 1; i++) {
|
||||
// skip to the next argument; omits arg[0]
|
||||
cp += strnlen(cp, (argsEnd - cp)) + 1;
|
||||
|
||||
if (cp > argsEnd || *cp == '\0') {
|
||||
return -2; // Off the end pointer or an empty argument is an error
|
||||
}
|
||||
|
||||
CHECK_NULL_RETURN((str = JNU_NewStringPlatform(env, cp)), -1);
|
||||
|
||||
(*env)->SetObjectArrayElement(env, argsArray, i, str);
|
||||
JNU_CHECK_EXCEPTION_RETURN(env, -3);
|
||||
}
|
||||
(*env)->SetObjectField(env, jinfo, ProcessHandleImpl_Info_argumentsID, argsArray);
|
||||
JNU_CHECK_EXCEPTION_RETURN(env, -4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the command and arguments for the process and store them
|
||||
* into the Info object.
|
||||
*/
|
||||
static void getCmdlineInfo(JNIEnv *env, jobject jinfo, pid_t pid) {
|
||||
int mib[3], maxargs, nargs, i;
|
||||
size_t size;
|
||||
char *args, *cp, *sp, *np;
|
||||
|
||||
// Get the maximum size of the arguments
|
||||
mib[0] = CTL_KERN;
|
||||
mib[1] = KERN_ARGMAX;
|
||||
size = sizeof(maxargs);
|
||||
if (sysctl(mib, 2, &maxargs, &size, NULL, 0) == -1) {
|
||||
JNU_ThrowByNameWithLastError(env,
|
||||
"java/lang/RuntimeException", "sysctl failed");
|
||||
return;
|
||||
}
|
||||
|
||||
// Allocate an args buffer and get the arguments
|
||||
args = (char *)malloc(maxargs);
|
||||
if (args == NULL) {
|
||||
JNU_ThrowOutOfMemoryError(env, "malloc failed");
|
||||
return;
|
||||
}
|
||||
|
||||
do { // a block to break out of on error
|
||||
char *argsEnd;
|
||||
jstring str = NULL;
|
||||
|
||||
mib[0] = CTL_KERN;
|
||||
mib[1] = KERN_PROCARGS2;
|
||||
mib[2] = pid;
|
||||
size = (size_t) maxargs;
|
||||
if (sysctl(mib, 3, args, &size, NULL, 0) == -1) {
|
||||
if (errno != EINVAL) {
|
||||
JNU_ThrowByNameWithLastError(env,
|
||||
"java/lang/RuntimeException", "sysctl failed");
|
||||
}
|
||||
break;
|
||||
}
|
||||
memcpy(&nargs, args, sizeof(nargs));
|
||||
|
||||
cp = &args[sizeof(nargs)]; // Strings start after nargs
|
||||
argsEnd = &args[size];
|
||||
|
||||
// Store the command executable path
|
||||
if ((str = JNU_NewStringPlatform(env, cp)) == NULL) {
|
||||
break;
|
||||
}
|
||||
(*env)->SetObjectField(env, jinfo, ProcessHandleImpl_Info_commandID, str);
|
||||
if ((*env)->ExceptionCheck(env)) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Skip trailing nulls after the executable path
|
||||
for (cp = cp + strnlen(cp, argsEnd - cp); cp < argsEnd; cp++) {
|
||||
if (*cp != '\0') {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fillArgArray(env, jinfo, nargs, cp, argsEnd);
|
||||
} while (0);
|
||||
// Free the arg buffer
|
||||
free(args);
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
package java.io;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
@ -50,7 +51,7 @@ public abstract class InputStream implements Closeable {
|
||||
// use when skipping.
|
||||
private static final int MAX_SKIP_BUFFER_SIZE = 2048;
|
||||
|
||||
private static final int TRANSFER_BUFFER_SIZE = 8192;
|
||||
private static final int DEFAULT_BUFFER_SIZE = 8192;
|
||||
|
||||
/**
|
||||
* Reads the next byte of data from the input stream. The value byte is
|
||||
@ -191,6 +192,128 @@ public abstract class InputStream implements Closeable {
|
||||
return i;
|
||||
}
|
||||
|
||||
/**
|
||||
* The maximum size of array to allocate.
|
||||
* Some VMs reserve some header words in an array.
|
||||
* Attempts to allocate larger arrays may result in
|
||||
* OutOfMemoryError: Requested array size exceeds VM limit
|
||||
*/
|
||||
private static final int MAX_BUFFER_SIZE = Integer.MAX_VALUE - 8;
|
||||
|
||||
/**
|
||||
* Reads all remaining bytes from the input stream. This method blocks until
|
||||
* all remaining bytes have been read and end of stream is detected, or an
|
||||
* exception is thrown. This method does not close the input stream.
|
||||
*
|
||||
* <p> When this stream reaches end of stream, further invocations of this
|
||||
* method will return an empty byte array.
|
||||
*
|
||||
* <p> Note that this method is intended for simple cases where it is
|
||||
* convenient to read all bytes into a byte array. It is not intended for
|
||||
* reading input streams with large amounts of data.
|
||||
*
|
||||
* <p> The behavior for the case where the input stream is <i>asynchronously
|
||||
* closed</i>, or the thread interrupted during the read, is highly input
|
||||
* stream specific, and therefore not specified.
|
||||
*
|
||||
* <p> If an I/O error occurs reading from the input stream, then it may do
|
||||
* so after some, but not all, bytes have been read. Consequently the input
|
||||
* stream may not be at end of stream and may be in an inconsistent state.
|
||||
* It is strongly recommended that the stream be promptly closed if an I/O
|
||||
* error occurs.
|
||||
*
|
||||
* @return a byte array containing the bytes read from this input stream
|
||||
* @throws IOException if an I/O error occurs
|
||||
* @throws OutOfMemoryError if an array of the required size cannot be
|
||||
* allocated. For example, if an array larger than {@code 2GB} would
|
||||
* be required to store the bytes.
|
||||
*
|
||||
* @since 1.9
|
||||
*/
|
||||
public byte[] readAllBytes() throws IOException {
|
||||
byte[] buf = new byte[DEFAULT_BUFFER_SIZE];
|
||||
int capacity = buf.length;
|
||||
int nread = 0;
|
||||
int n;
|
||||
for (;;) {
|
||||
// read to EOF which may read more or less than initial buffer size
|
||||
while ((n = read(buf, nread, capacity - nread)) > 0)
|
||||
nread += n;
|
||||
|
||||
// if the last call to read returned -1, then we're done
|
||||
if (n < 0)
|
||||
break;
|
||||
|
||||
// need to allocate a larger buffer
|
||||
if (capacity <= MAX_BUFFER_SIZE - capacity) {
|
||||
capacity = capacity << 1;
|
||||
} else {
|
||||
if (capacity == MAX_BUFFER_SIZE)
|
||||
throw new OutOfMemoryError("Required array size too large");
|
||||
capacity = MAX_BUFFER_SIZE;
|
||||
}
|
||||
buf = Arrays.copyOf(buf, capacity);
|
||||
}
|
||||
return (capacity == nread) ? buf : Arrays.copyOf(buf, nread);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads the requested number of bytes from the input stream into the given
|
||||
* byte array. This method blocks until {@code len} bytes of input data have
|
||||
* been read, end of stream is detected, or an exception is thrown. The
|
||||
* number of bytes actually read, possibly zero, is returned. This method
|
||||
* does not close the input stream.
|
||||
*
|
||||
* <p> In the case where end of stream is reached before {@code len} bytes
|
||||
* have been read, then the actual number of bytes read will be returned.
|
||||
* When this stream reaches end of stream, further invocations of this
|
||||
* method will return zero.
|
||||
*
|
||||
* <p> If {@code len} is zero, then no bytes are read and {@code 0} is
|
||||
* returned; otherwise, there is an attempt to read up to {@code len} bytes.
|
||||
*
|
||||
* <p> The first byte read is stored into element {@code b[off]}, the next
|
||||
* one in to {@code b[off+1]}, and so on. The number of bytes read is, at
|
||||
* most, equal to {@code len}. Let <i>k</i> be the number of bytes actually
|
||||
* read; these bytes will be stored in elements {@code b[off]} through
|
||||
* {@code b[off+}<i>k</i>{@code -1]}, leaving elements {@code b[off+}<i>k</i>
|
||||
* {@code ]} through {@code b[off+len-1]} unaffected.
|
||||
*
|
||||
* <p> The behavior for the case where the input stream is <i>asynchronously
|
||||
* closed</i>, or the thread interrupted during the read, is highly input
|
||||
* stream specific, and therefore not specified.
|
||||
*
|
||||
* <p> If an I/O error occurs reading from the input stream, then it may do
|
||||
* so after some, but not all, bytes of {@code b} have been updated with
|
||||
* data from the input stream. Consequently the input stream and {@code b}
|
||||
* may be in an inconsistent state. It is strongly recommended that the
|
||||
* stream be promptly closed if an I/O error occurs.
|
||||
*
|
||||
* @param b the byte array into which the data is read
|
||||
* @param off the start offset in {@code b} at which the data is written
|
||||
* @param len the maximum number of bytes to read
|
||||
* @return the actual number of bytes read into the buffer
|
||||
* @throws IOException if an I/O error occurs
|
||||
* @throws NullPointerException if {@code b} is {@code null}
|
||||
* @throws IndexOutOfBoundsException If {@code off} is negative, {@code len}
|
||||
* is negative, or {@code len} is greater than {@code b.length - off}
|
||||
*
|
||||
* @since 1.9
|
||||
*/
|
||||
public int readNBytes(byte[] b, int off, int len) throws IOException {
|
||||
Objects.requireNonNull(b);
|
||||
if (off < 0 || len < 0 || len > b.length - off)
|
||||
throw new IndexOutOfBoundsException();
|
||||
int n = 0;
|
||||
while (n < len) {
|
||||
int count = read(b, off + n, len - n);
|
||||
if (count < 0)
|
||||
break;
|
||||
n += count;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
/**
|
||||
* Skips over and discards <code>n</code> bytes of data from this input
|
||||
* stream. The <code>skip</code> method may, for a variety of reasons, end
|
||||
@ -396,9 +519,9 @@ public abstract class InputStream implements Closeable {
|
||||
public long transferTo(OutputStream out) throws IOException {
|
||||
Objects.requireNonNull(out, "out");
|
||||
long transferred = 0;
|
||||
byte[] buffer = new byte[TRANSFER_BUFFER_SIZE];
|
||||
byte[] buffer = new byte[DEFAULT_BUFFER_SIZE];
|
||||
int read;
|
||||
while ((read = this.read(buffer, 0, TRANSFER_BUFFER_SIZE)) >= 0) {
|
||||
while ((read = this.read(buffer, 0, DEFAULT_BUFFER_SIZE)) >= 0) {
|
||||
out.write(buffer, 0, read);
|
||||
transferred += read;
|
||||
}
|
||||
|
@ -253,9 +253,6 @@ public class ObjectInputStream
|
||||
/** flag set when at end of field value block with no TC_ENDBLOCKDATA */
|
||||
private boolean defaultDataEnd = false;
|
||||
|
||||
/** buffer for reading primitive field values */
|
||||
private byte[] primVals;
|
||||
|
||||
/** if true, invoke readObjectOverride() instead of readObject() */
|
||||
private final boolean enableOverride;
|
||||
/** if true, invoke resolveObject() */
|
||||
@ -500,7 +497,11 @@ public class ObjectInputStream
|
||||
Object curObj = ctx.getObj();
|
||||
ObjectStreamClass curDesc = ctx.getDesc();
|
||||
bin.setBlockDataMode(false);
|
||||
defaultReadFields(curObj, curDesc);
|
||||
FieldValues vals = defaultReadFields(curObj, curDesc);
|
||||
if (curObj != null) {
|
||||
defaultCheckFieldValues(curObj, curDesc, vals);
|
||||
defaultSetFieldValues(curObj, curDesc, vals);
|
||||
}
|
||||
bin.setBlockDataMode(true);
|
||||
if (!curDesc.hasWriteObjectData()) {
|
||||
/*
|
||||
@ -1881,6 +1882,26 @@ public class ObjectInputStream
|
||||
throws IOException
|
||||
{
|
||||
ObjectStreamClass.ClassDataSlot[] slots = desc.getClassDataLayout();
|
||||
// Best effort Failure Atomicity; slotValues will be non-null if field
|
||||
// values can be set after reading all field data in the hierarchy.
|
||||
// Field values can only be set after reading all data if there are no
|
||||
// user observable methods in the hierarchy, readObject(NoData). The
|
||||
// top most Serializable class in the hierarchy can be skipped.
|
||||
FieldValues[] slotValues = null;
|
||||
|
||||
boolean hasSpecialReadMethod = false;
|
||||
for (int i = 1; i < slots.length; i++) {
|
||||
ObjectStreamClass slotDesc = slots[i].desc;
|
||||
if (slotDesc.hasReadObjectMethod()
|
||||
|| slotDesc.hasReadObjectNoDataMethod()) {
|
||||
hasSpecialReadMethod = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// No special read methods, can store values and defer setting.
|
||||
if (!hasSpecialReadMethod)
|
||||
slotValues = new FieldValues[slots.length];
|
||||
|
||||
for (int i = 0; i < slots.length; i++) {
|
||||
ObjectStreamClass slotDesc = slots[i].desc;
|
||||
|
||||
@ -1917,7 +1938,13 @@ public class ObjectInputStream
|
||||
*/
|
||||
defaultDataEnd = false;
|
||||
} else {
|
||||
defaultReadFields(obj, slotDesc);
|
||||
FieldValues vals = defaultReadFields(obj, slotDesc);
|
||||
if (slotValues != null) {
|
||||
slotValues[i] = vals;
|
||||
} else if (obj != null) {
|
||||
defaultCheckFieldValues(obj, slotDesc, vals);
|
||||
defaultSetFieldValues(obj, slotDesc, vals);
|
||||
}
|
||||
}
|
||||
if (slotDesc.hasWriteObjectData()) {
|
||||
skipCustomData();
|
||||
@ -1933,6 +1960,19 @@ public class ObjectInputStream
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (obj != null && slotValues != null) {
|
||||
// Check that the non-primitive types are assignable for all slots
|
||||
// before assigning.
|
||||
for (int i = 0; i < slots.length; i++) {
|
||||
if (slotValues[i] != null)
|
||||
defaultCheckFieldValues(obj, slots[i].desc, slotValues[i]);
|
||||
}
|
||||
for (int i = 0; i < slots.length; i++) {
|
||||
if (slotValues[i] != null)
|
||||
defaultSetFieldValues(obj, slots[i].desc, slotValues[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1964,12 +2004,22 @@ public class ObjectInputStream
|
||||
}
|
||||
}
|
||||
|
||||
private class FieldValues {
|
||||
final byte[] primValues;
|
||||
final Object[] objValues;
|
||||
|
||||
FieldValues(byte[] primValues, Object[] objValues) {
|
||||
this.primValues = primValues;
|
||||
this.objValues = objValues;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads in values of serializable fields declared by given class
|
||||
* descriptor. If obj is non-null, sets field values in obj. Expects that
|
||||
* passHandle is set to obj's handle before this method is called.
|
||||
* descriptor. Expects that passHandle is set to obj's handle before this
|
||||
* method is called.
|
||||
*/
|
||||
private void defaultReadFields(Object obj, ObjectStreamClass desc)
|
||||
private FieldValues defaultReadFields(Object obj, ObjectStreamClass desc)
|
||||
throws IOException
|
||||
{
|
||||
Class<?> cl = desc.forClass();
|
||||
@ -1977,22 +2027,19 @@ public class ObjectInputStream
|
||||
throw new ClassCastException();
|
||||
}
|
||||
|
||||
byte[] primVals = null;
|
||||
int primDataSize = desc.getPrimDataSize();
|
||||
if (primDataSize > 0) {
|
||||
if (primVals == null || primVals.length < primDataSize) {
|
||||
primVals = new byte[primDataSize];
|
||||
}
|
||||
primVals = new byte[primDataSize];
|
||||
bin.readFully(primVals, 0, primDataSize, false);
|
||||
if (obj != null) {
|
||||
desc.setPrimFieldValues(obj, primVals);
|
||||
}
|
||||
}
|
||||
|
||||
Object[] objVals = null;
|
||||
int numObjFields = desc.getNumObjFields();
|
||||
if (numObjFields > 0) {
|
||||
int objHandle = passHandle;
|
||||
ObjectStreamField[] fields = desc.getFields(false);
|
||||
Object[] objVals = new Object[numObjFields];
|
||||
objVals = new Object[numObjFields];
|
||||
int numPrimFields = fields.length - objVals.length;
|
||||
for (int i = 0; i < objVals.length; i++) {
|
||||
ObjectStreamField f = fields[numPrimFields + i];
|
||||
@ -2001,11 +2048,30 @@ public class ObjectInputStream
|
||||
handles.markDependency(objHandle, passHandle);
|
||||
}
|
||||
}
|
||||
if (obj != null) {
|
||||
desc.setObjFieldValues(obj, objVals);
|
||||
}
|
||||
passHandle = objHandle;
|
||||
}
|
||||
|
||||
return new FieldValues(primVals, objVals);
|
||||
}
|
||||
|
||||
/** Throws ClassCastException if any value is not assignable. */
|
||||
private void defaultCheckFieldValues(Object obj, ObjectStreamClass desc,
|
||||
FieldValues values) {
|
||||
Object[] objectValues = values.objValues;
|
||||
if (objectValues != null)
|
||||
desc.checkObjFieldValueTypes(obj, objectValues);
|
||||
}
|
||||
|
||||
/** Sets field values in obj. */
|
||||
private void defaultSetFieldValues(Object obj, ObjectStreamClass desc,
|
||||
FieldValues values) {
|
||||
byte[] primValues = values.primValues;
|
||||
Object[] objectValues = values.objValues;
|
||||
|
||||
if (primValues != null)
|
||||
desc.setPrimFieldValues(obj, primValues);
|
||||
if (objectValues != null)
|
||||
desc.setObjFieldValues(obj, objectValues);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1252,6 +1252,15 @@ public class ObjectStreamClass implements Serializable {
|
||||
fieldRefl.getObjFieldValues(obj, vals);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks that the given values, from array vals starting at offset 0,
|
||||
* are assignable to the given serializable object fields.
|
||||
* @throws ClassCastException if any value is not assignable
|
||||
*/
|
||||
void checkObjFieldValueTypes(Object obj, Object[] vals) {
|
||||
fieldRefl.checkObjectFieldValueTypes(obj, vals);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the serializable object fields of object obj using values from
|
||||
* array vals starting at offset 0. It is the responsibility of the caller
|
||||
@ -2069,6 +2078,15 @@ public class ObjectStreamClass implements Serializable {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks that the given values, from array vals starting at offset 0,
|
||||
* are assignable to the given serializable object fields.
|
||||
* @throws ClassCastException if any value is not assignable
|
||||
*/
|
||||
void checkObjectFieldValueTypes(Object obj, Object[] vals) {
|
||||
setObjFieldValues(obj, vals, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the serializable object fields of object obj using values from
|
||||
* array vals starting at offset 0. The caller is responsible for
|
||||
@ -2077,6 +2095,10 @@ public class ObjectStreamClass implements Serializable {
|
||||
* ClassCastException.
|
||||
*/
|
||||
void setObjFieldValues(Object obj, Object[] vals) {
|
||||
setObjFieldValues(obj, vals, false);
|
||||
}
|
||||
|
||||
private void setObjFieldValues(Object obj, Object[] vals, boolean dryRun) {
|
||||
if (obj == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
@ -2101,7 +2123,8 @@ public class ObjectStreamClass implements Serializable {
|
||||
f.getType().getName() + " in instance of " +
|
||||
obj.getClass().getName());
|
||||
}
|
||||
unsafe.putObject(obj, key, val);
|
||||
if (!dryRun)
|
||||
unsafe.putObject(obj, key, val);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -646,13 +646,11 @@ class Character implements java.io.Serializable, Comparable<Character> {
|
||||
*/
|
||||
public static final class UnicodeBlock extends Subset {
|
||||
/**
|
||||
* 510 - the expected number of enteties
|
||||
* 510 - the expected number of entities
|
||||
* 0.75 - the default load factor of HashMap
|
||||
*/
|
||||
private static final int INITIAL_CAPACITY =
|
||||
(int)(510 / 0.75f + 1.0f);
|
||||
private static Map<String, UnicodeBlock> map =
|
||||
new HashMap<>(INITIAL_CAPACITY);
|
||||
new HashMap<>((int)(510 / 0.75f + 1.0f));
|
||||
|
||||
/**
|
||||
* Creates a UnicodeBlock with the given identifier name.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1995, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1995, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,25 +26,31 @@
|
||||
package java.lang;
|
||||
|
||||
import java.io.*;
|
||||
import java.lang.ProcessBuilder.Redirect;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
* {@code Process} provides control of native processes started by
|
||||
* ProcessBuilder.start and Runtime.exec.
|
||||
* The class provides methods for performing input from the process, performing
|
||||
* output to the process, waiting for the process to complete,
|
||||
* checking the exit status of the process, and destroying (killing)
|
||||
* the process.
|
||||
* The {@link ProcessBuilder#start()} and
|
||||
* {@link Runtime#exec(String[],String[],File) Runtime.exec}
|
||||
* methods create a native process and return an instance of a
|
||||
* subclass of {@code Process} that can be used to control the process
|
||||
* and obtain information about it. The class {@code Process}
|
||||
* provides methods for performing input from the process, performing
|
||||
* output to the process, waiting for the process to complete,
|
||||
* checking the exit status of the process, and destroying (killing)
|
||||
* the process.
|
||||
* and obtain information about it.
|
||||
*
|
||||
* <p>The methods that create processes may not work well for special
|
||||
* processes on certain native platforms, such as native windowing
|
||||
* processes, daemon processes, Win16/DOS processes on Microsoft
|
||||
* Windows, or shell scripts.
|
||||
*
|
||||
* <p>By default, the created subprocess does not have its own terminal
|
||||
* <p>By default, the created process does not have its own terminal
|
||||
* or console. All its standard I/O (i.e. stdin, stdout, stderr)
|
||||
* operations will be redirected to the parent process, where they can
|
||||
* be accessed via the streams obtained using the methods
|
||||
@ -52,35 +58,49 @@ import java.util.concurrent.TimeUnit;
|
||||
* {@link #getInputStream()}, and
|
||||
* {@link #getErrorStream()}.
|
||||
* The parent process uses these streams to feed input to and get output
|
||||
* from the subprocess. Because some native platforms only provide
|
||||
* from the process. Because some native platforms only provide
|
||||
* limited buffer size for standard input and output streams, failure
|
||||
* to promptly write the input stream or read the output stream of
|
||||
* the subprocess may cause the subprocess to block, or even deadlock.
|
||||
* the process may cause the process to block, or even deadlock.
|
||||
*
|
||||
* <p>Where desired, <a href="ProcessBuilder.html#redirect-input">
|
||||
* subprocess I/O can also be redirected</a>
|
||||
* process I/O can also be redirected</a>
|
||||
* using methods of the {@link ProcessBuilder} class.
|
||||
*
|
||||
* <p>The subprocess is not killed when there are no more references to
|
||||
* the {@code Process} object, but rather the subprocess
|
||||
* <p>The process is not killed when there are no more references to
|
||||
* the {@code Process} object, but rather the process
|
||||
* continues executing asynchronously.
|
||||
*
|
||||
* <p>There is no requirement that a process represented by a {@code
|
||||
* <p>There is no requirement that the process represented by a {@code
|
||||
* Process} object execute asynchronously or concurrently with respect
|
||||
* to the Java process that owns the {@code Process} object.
|
||||
*
|
||||
* <p>As of 1.5, {@link ProcessBuilder#start()} is the preferred way
|
||||
* to create a {@code Process}.
|
||||
*
|
||||
* <p>Subclasses of Process should override the {@link #onExit()} and
|
||||
* {@link #toHandle()} methods to provide a fully functional Process including the
|
||||
* {@link #getPid() process id},
|
||||
* {@link #info() information about the process},
|
||||
* {@link #children() direct children}, and
|
||||
* {@link #allChildren() direct and indirect children} of the process.
|
||||
* Delegating to the underlying Process or ProcessHandle is typically
|
||||
* easiest and most efficient.
|
||||
*
|
||||
* @since 1.0
|
||||
*/
|
||||
public abstract class Process {
|
||||
/**
|
||||
* Default constructor for Process.
|
||||
*/
|
||||
public Process() {}
|
||||
|
||||
/**
|
||||
* Returns the output stream connected to the normal input of the
|
||||
* subprocess. Output to the stream is piped into the standard
|
||||
* process. Output to the stream is piped into the standard
|
||||
* input of the process represented by this {@code Process} object.
|
||||
*
|
||||
* <p>If the standard input of the subprocess has been redirected using
|
||||
* <p>If the standard input of the process has been redirected using
|
||||
* {@link ProcessBuilder#redirectInput(Redirect)
|
||||
* ProcessBuilder.redirectInput}
|
||||
* then this method will return a
|
||||
@ -90,42 +110,42 @@ public abstract class Process {
|
||||
* output stream to be buffered.
|
||||
*
|
||||
* @return the output stream connected to the normal input of the
|
||||
* subprocess
|
||||
* process
|
||||
*/
|
||||
public abstract OutputStream getOutputStream();
|
||||
|
||||
/**
|
||||
* Returns the input stream connected to the normal output of the
|
||||
* subprocess. The stream obtains data piped from the standard
|
||||
* process. The stream obtains data piped from the standard
|
||||
* output of the process represented by this {@code Process} object.
|
||||
*
|
||||
* <p>If the standard output of the subprocess has been redirected using
|
||||
* <p>If the standard output of the process has been redirected using
|
||||
* {@link ProcessBuilder#redirectOutput(Redirect)
|
||||
* ProcessBuilder.redirectOutput}
|
||||
* then this method will return a
|
||||
* <a href="ProcessBuilder.html#redirect-output">null input stream</a>.
|
||||
*
|
||||
* <p>Otherwise, if the standard error of the subprocess has been
|
||||
* <p>Otherwise, if the standard error of the process has been
|
||||
* redirected using
|
||||
* {@link ProcessBuilder#redirectErrorStream(boolean)
|
||||
* ProcessBuilder.redirectErrorStream}
|
||||
* then the input stream returned by this method will receive the
|
||||
* merged standard output and the standard error of the subprocess.
|
||||
* merged standard output and the standard error of the process.
|
||||
*
|
||||
* <p>Implementation note: It is a good idea for the returned
|
||||
* input stream to be buffered.
|
||||
*
|
||||
* @return the input stream connected to the normal output of the
|
||||
* subprocess
|
||||
* process
|
||||
*/
|
||||
public abstract InputStream getInputStream();
|
||||
|
||||
/**
|
||||
* Returns the input stream connected to the error output of the
|
||||
* subprocess. The stream obtains data piped from the error output
|
||||
* process. The stream obtains data piped from the error output
|
||||
* of the process represented by this {@code Process} object.
|
||||
*
|
||||
* <p>If the standard error of the subprocess has been redirected using
|
||||
* <p>If the standard error of the process has been redirected using
|
||||
* {@link ProcessBuilder#redirectError(Redirect)
|
||||
* ProcessBuilder.redirectError} or
|
||||
* {@link ProcessBuilder#redirectErrorStream(boolean)
|
||||
@ -137,19 +157,19 @@ public abstract class Process {
|
||||
* input stream to be buffered.
|
||||
*
|
||||
* @return the input stream connected to the error output of
|
||||
* the subprocess
|
||||
* the process
|
||||
*/
|
||||
public abstract InputStream getErrorStream();
|
||||
|
||||
/**
|
||||
* Causes the current thread to wait, if necessary, until the
|
||||
* process represented by this {@code Process} object has
|
||||
* terminated. This method returns immediately if the subprocess
|
||||
* has already terminated. If the subprocess has not yet
|
||||
* terminated. This method returns immediately if the process
|
||||
* has already terminated. If the process has not yet
|
||||
* terminated, the calling thread will be blocked until the
|
||||
* subprocess exits.
|
||||
* process exits.
|
||||
*
|
||||
* @return the exit value of the subprocess represented by this
|
||||
* @return the exit value of the process represented by this
|
||||
* {@code Process} object. By convention, the value
|
||||
* {@code 0} indicates normal termination.
|
||||
* @throws InterruptedException if the current thread is
|
||||
@ -161,10 +181,10 @@ public abstract class Process {
|
||||
|
||||
/**
|
||||
* Causes the current thread to wait, if necessary, until the
|
||||
* subprocess represented by this {@code Process} object has
|
||||
* process represented by this {@code Process} object has
|
||||
* terminated, or the specified waiting time elapses.
|
||||
*
|
||||
* <p>If the subprocess has already terminated then this method returns
|
||||
* <p>If the process has already terminated then this method returns
|
||||
* immediately with the value {@code true}. If the process has not
|
||||
* terminated and the timeout value is less than, or equal to, zero, then
|
||||
* this method returns immediately with the value {@code false}.
|
||||
@ -176,8 +196,8 @@ public abstract class Process {
|
||||
*
|
||||
* @param timeout the maximum time to wait
|
||||
* @param unit the time unit of the {@code timeout} argument
|
||||
* @return {@code true} if the subprocess has exited and {@code false} if
|
||||
* the waiting time elapsed before the subprocess has exited.
|
||||
* @return {@code true} if the process has exited and {@code false} if
|
||||
* the waiting time elapsed before the process has exited.
|
||||
* @throws InterruptedException if the current thread is interrupted
|
||||
* while waiting.
|
||||
* @throws NullPointerException if unit is null
|
||||
@ -204,41 +224,60 @@ public abstract class Process {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the exit value for the subprocess.
|
||||
* Returns the exit value for the process.
|
||||
*
|
||||
* @return the exit value of the subprocess represented by this
|
||||
* @return the exit value of the process represented by this
|
||||
* {@code Process} object. By convention, the value
|
||||
* {@code 0} indicates normal termination.
|
||||
* @throws IllegalThreadStateException if the subprocess represented
|
||||
* @throws IllegalThreadStateException if the process represented
|
||||
* by this {@code Process} object has not yet terminated
|
||||
*/
|
||||
public abstract int exitValue();
|
||||
|
||||
/**
|
||||
* Kills the subprocess. Whether the subprocess represented by this
|
||||
* {@code Process} object is forcibly terminated or not is
|
||||
* Kills the process.
|
||||
* Whether the process represented by this {@code Process} object is
|
||||
* {@link #supportsNormalTermination normally terminated} or not is
|
||||
* implementation dependent.
|
||||
* Forcible process destruction is defined as the immediate termination of a
|
||||
* process, whereas normal termination allows the process to shut down cleanly.
|
||||
* If the process is not alive, no action is taken.
|
||||
* <p>
|
||||
* The {@link java.util.concurrent.CompletableFuture} from {@link #onExit} is
|
||||
* {@link java.util.concurrent.CompletableFuture#complete completed}
|
||||
* when the process has terminated.
|
||||
*/
|
||||
public abstract void destroy();
|
||||
|
||||
/**
|
||||
* Kills the subprocess. The subprocess represented by this
|
||||
* Kills the process forcibly. The process represented by this
|
||||
* {@code Process} object is forcibly terminated.
|
||||
* Forcible process destruction is defined as the immediate termination of a
|
||||
* process, whereas normal termination allows the process to shut down cleanly.
|
||||
* If the process is not alive, no action is taken.
|
||||
* <p>
|
||||
* The {@link java.util.concurrent.CompletableFuture} from {@link #onExit} is
|
||||
* {@link java.util.concurrent.CompletableFuture#complete completed}
|
||||
* when the process has terminated.
|
||||
* <p>
|
||||
* Invoking this method on {@code Process} objects returned by
|
||||
* {@link ProcessBuilder#start} and {@link Runtime#exec} forcibly terminate
|
||||
* the process.
|
||||
*
|
||||
* <p>The default implementation of this method invokes {@link #destroy}
|
||||
* and so may not forcibly terminate the process. Concrete implementations
|
||||
* of this class are strongly encouraged to override this method with a
|
||||
* compliant implementation. Invoking this method on {@code Process}
|
||||
* objects returned by {@link ProcessBuilder#start} and
|
||||
* {@link Runtime#exec} will forcibly terminate the process.
|
||||
*
|
||||
* <p>Note: The subprocess may not terminate immediately.
|
||||
* @implSpec
|
||||
* The default implementation of this method invokes {@link #destroy}
|
||||
* and so may not forcibly terminate the process.
|
||||
* @implNote
|
||||
* Concrete implementations of this class are strongly encouraged to override
|
||||
* this method with a compliant implementation.
|
||||
* @apiNote
|
||||
* The process may not terminate immediately.
|
||||
* i.e. {@code isAlive()} may return true for a brief period
|
||||
* after {@code destroyForcibly()} is called. This method
|
||||
* may be chained to {@code waitFor()} if needed.
|
||||
*
|
||||
* @return the {@code Process} object representing the
|
||||
* subprocess to be forcibly destroyed.
|
||||
* process forcibly destroyed
|
||||
* @since 1.8
|
||||
*/
|
||||
public Process destroyForcibly() {
|
||||
@ -247,10 +286,36 @@ public abstract class Process {
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests whether the subprocess represented by this {@code Process} is
|
||||
* Returns {@code true} if the implementation of {@link #destroy} is to
|
||||
* normally terminate the process,
|
||||
* Returns {@code false} if the implementation of {@code destroy}
|
||||
* forcibly and immediately terminates the process.
|
||||
* <p>
|
||||
* Invoking this method on {@code Process} objects returned by
|
||||
* {@link ProcessBuilder#start} and {@link Runtime#exec} return
|
||||
* {@code true} or {@code false} depending on the platform implementation.
|
||||
*
|
||||
* @implSpec
|
||||
* This implementation throws an instance of
|
||||
* {@link java.lang.UnsupportedOperationException} and performs no other action.
|
||||
*
|
||||
* @return {@code true} if the implementation of {@link #destroy} is to
|
||||
* normally terminate the process;
|
||||
* otherwise, {@link #destroy} forcibly terminates the process
|
||||
* @throws UnsupportedOperationException if the Process implementation
|
||||
* does not support this operation
|
||||
* @since 1.9
|
||||
*/
|
||||
public boolean supportsNormalTermination() {
|
||||
throw new UnsupportedOperationException(this.getClass()
|
||||
+ ".supportsNormalTermination() not supported" );
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests whether the process represented by this {@code Process} is
|
||||
* alive.
|
||||
*
|
||||
* @return {@code true} if the subprocess represented by this
|
||||
* @return {@code true} if the process represented by this
|
||||
* {@code Process} object has not yet terminated.
|
||||
* @since 1.8
|
||||
*/
|
||||
@ -264,16 +329,222 @@ public abstract class Process {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the native process id of the subprocess.
|
||||
* The native process id is an identification number that the operating
|
||||
* Returns the native process ID of the process.
|
||||
* The native process ID is an identification number that the operating
|
||||
* system assigns to the process.
|
||||
*
|
||||
* @return the native process id of the subprocess
|
||||
* @implSpec
|
||||
* The implementation of this method returns the process id as:
|
||||
* {@link #toHandle toHandle().getPid()}.
|
||||
*
|
||||
* @return the native process id of the process
|
||||
* @throws UnsupportedOperationException if the Process implementation
|
||||
* does not support this operation
|
||||
* does not support this operation
|
||||
* @since 1.9
|
||||
*/
|
||||
public long getPid() {
|
||||
throw new UnsupportedOperationException();
|
||||
return toHandle().getPid();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@code CompletableFuture<Process>} for the termination of the Process.
|
||||
* The {@link java.util.concurrent.CompletableFuture} provides the ability
|
||||
* to trigger dependent functions or actions that may be run synchronously
|
||||
* or asynchronously upon process termination.
|
||||
* When the process terminates the CompletableFuture is
|
||||
* {@link java.util.concurrent.CompletableFuture#complete completed} regardless
|
||||
* of the exit status of the process.
|
||||
* <p>
|
||||
* Calling {@code onExit().get()} waits for the process to terminate and returns
|
||||
* the Process. The future can be used to check if the process is
|
||||
* {@link java.util.concurrent.CompletableFuture#isDone done} or to
|
||||
* {@link java.util.concurrent.CompletableFuture#get() wait} for it to terminate.
|
||||
* {@link java.util.concurrent.CompletableFuture#cancel(boolean) Cancelling}
|
||||
* the CompletableFuture does not affect the Process.
|
||||
* <p>
|
||||
* If the process is {@link #isAlive not alive} the {@link CompletableFuture}
|
||||
* returned has been {@link java.util.concurrent.CompletableFuture#complete completed}.
|
||||
* <p>
|
||||
* Processes returned from {@link ProcessBuilder#start} override the
|
||||
* default implementation to provide an efficient mechanism to wait
|
||||
* for process exit.
|
||||
* <p>
|
||||
* @apiNote
|
||||
* Using {@link #onExit() onExit} is an alternative to
|
||||
* {@link #waitFor() waitFor} that enables both additional concurrency
|
||||
* and convenient access to the result of the Process.
|
||||
* Lambda expressions can be used to evaluate the result of the Process
|
||||
* execution.
|
||||
* If there is other processing to be done before the value is used
|
||||
* then {@linkplain #onExit onExit} is a convenient mechanism to
|
||||
* free the current thread and block only if and when the value is needed.
|
||||
* <br>
|
||||
* For example, launching a process to compare two files and get a boolean if they are identical:
|
||||
* <pre> {@code Process p = new ProcessBuilder("cmp", "f1", "f2").start();
|
||||
* Future<Boolean> identical = p.onExit().thenApply(p1 -> p1.exitValue() == 0);
|
||||
* ...
|
||||
* if (identical.get()) { ... }
|
||||
* }</pre>
|
||||
*
|
||||
* @implSpec
|
||||
* This implementation executes {@link #waitFor()} in a separate thread
|
||||
* repeatedly until it returns successfully. If the execution of
|
||||
* {@code waitFor} is interrupted, the thread's interrupt status is preserved.
|
||||
* <p>
|
||||
* When {@link #waitFor()} returns successfully the CompletableFuture is
|
||||
* {@link java.util.concurrent.CompletableFuture#complete completed} regardless
|
||||
* of the exit status of the process.
|
||||
*
|
||||
* This implementation may consume a lot of memory for thread stacks if a
|
||||
* large number of processes are waited for concurrently.
|
||||
* <p>
|
||||
* External implementations should override this method and provide
|
||||
* a more efficient implementation. For example, to delegate to the underlying
|
||||
* process, it can do the following:
|
||||
* <pre>{@code
|
||||
* public CompletableFuture<Process> onExit() {
|
||||
* return delegate.onExit().thenApply(p -> this);
|
||||
* }
|
||||
* }</pre>
|
||||
*
|
||||
* @return a new {@code CompletableFuture<Process>} for the Process
|
||||
*
|
||||
* @since 1.9
|
||||
*/
|
||||
public CompletableFuture<Process> onExit() {
|
||||
return CompletableFuture.supplyAsync(this::waitForInternal);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for the process to exit by calling {@code waitFor}.
|
||||
* If the thread is interrupted, remember the interrupted state to
|
||||
* be restored before returning. Use ForkJoinPool.ManagedBlocker
|
||||
* so that the number of workers in case ForkJoinPool is used is
|
||||
* compensated when the thread blocks in waitFor().
|
||||
*
|
||||
* @return the Process
|
||||
*/
|
||||
private Process waitForInternal() {
|
||||
boolean interrupted = false;
|
||||
while (true) {
|
||||
try {
|
||||
ForkJoinPool.managedBlock(new ForkJoinPool.ManagedBlocker() {
|
||||
@Override
|
||||
public boolean block() throws InterruptedException {
|
||||
waitFor();
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isReleasable() {
|
||||
return !isAlive();
|
||||
}
|
||||
});
|
||||
break;
|
||||
} catch (InterruptedException x) {
|
||||
interrupted = true;
|
||||
}
|
||||
}
|
||||
if (interrupted) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a ProcessHandle for the Process.
|
||||
*
|
||||
* {@code Process} objects returned by {@link ProcessBuilder#start} and
|
||||
* {@link Runtime#exec} implement {@code toHandle} as the equivalent of
|
||||
* {@link ProcessHandle#of(long) ProcessHandle.of(pid)} including the
|
||||
* check for a SecurityManager and {@code RuntimePermission("manageProcess")}.
|
||||
*
|
||||
* @implSpec
|
||||
* This implementation throws an instance of
|
||||
* {@link java.lang.UnsupportedOperationException} and performs no other action.
|
||||
* Subclasses should override this method to provide a ProcessHandle for the
|
||||
* process. The methods {@link #getPid}, {@link #info}, {@link #children},
|
||||
* and {@link #allChildren}, unless overridden, operate on the ProcessHandle.
|
||||
*
|
||||
* @return Returns a ProcessHandle for the Process
|
||||
* @throws UnsupportedOperationException if the Process implementation
|
||||
* does not support this operation
|
||||
* @throws SecurityException if a security manager has been installed and
|
||||
* it denies RuntimePermission("manageProcess")
|
||||
* @since 1.9
|
||||
*/
|
||||
public ProcessHandle toHandle() {
|
||||
throw new UnsupportedOperationException(this.getClass()
|
||||
+ ".toHandle() not supported");
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a snapshot of information about the process.
|
||||
*
|
||||
* <p> An {@link ProcessHandle.Info} instance has various accessor methods
|
||||
* that return information about the process, if the process is alive and
|
||||
* the information is available, otherwise {@code null} is returned.
|
||||
*
|
||||
* @implSpec
|
||||
* This implementation returns information about the process as:
|
||||
* {@link #toHandle toHandle().info()}.
|
||||
*
|
||||
* @return a snapshot of information about the process, always non-null
|
||||
* @throws UnsupportedOperationException if the Process implementation
|
||||
* does not support this operation
|
||||
* @since 1.9
|
||||
*/
|
||||
public ProcessHandle.Info info() {
|
||||
return toHandle().info();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a snapshot of the direct children of the process.
|
||||
* A process that is {@link #isAlive not alive} has zero children.
|
||||
* <p>
|
||||
* <em>Note that processes are created and terminate asynchronously.
|
||||
* There is no guarantee that a process is {@link #isAlive alive}.
|
||||
* </em>
|
||||
*
|
||||
* @implSpec
|
||||
* This implementation returns the direct children as:
|
||||
* {@link #toHandle toHandle().children()}.
|
||||
*
|
||||
* @return a Stream of ProcessHandles for processes that are direct children
|
||||
* of the process
|
||||
* @throws UnsupportedOperationException if the Process implementation
|
||||
* does not support this operation
|
||||
* @throws SecurityException if a security manager has been installed and
|
||||
* it denies RuntimePermission("manageProcess")
|
||||
* @since 1.9
|
||||
*/
|
||||
public Stream<ProcessHandle> children() {
|
||||
return toHandle().children();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a snapshot of the direct and indirect children of the process.
|
||||
* A process that is {@link #isAlive not alive} has zero children.
|
||||
* <p>
|
||||
* <em>Note that processes are created and terminate asynchronously.
|
||||
* There is no guarantee that a process is {@link #isAlive alive}.
|
||||
* </em>
|
||||
*
|
||||
* @implSpec
|
||||
* This implementation returns all children as:
|
||||
* {@link #toHandle toHandle().allChildren()}.
|
||||
*
|
||||
* @return a Stream of ProcessHandles for processes that are direct and
|
||||
* indirect children of the process
|
||||
* @throws UnsupportedOperationException if the Process implementation
|
||||
* does not support this operation
|
||||
* @throws SecurityException if a security manager has been installed and
|
||||
* it denies RuntimePermission("manageProcess")
|
||||
* @since 1.9
|
||||
*/
|
||||
public Stream<ProcessHandle> allChildren() {
|
||||
return toHandle().allChildren();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
361
jdk/src/java.base/share/classes/java/lang/ProcessHandle.java
Normal file
361
jdk/src/java.base/share/classes/java/lang/ProcessHandle.java
Normal file
@ -0,0 +1,361 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.lang;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
* ProcessHandle identifies and provides control of native processes. Each
|
||||
* individual process can be monitored for liveness, list its children,
|
||||
* get information about the process or destroy it.
|
||||
* By comparison, {@link java.lang.Process Process} instances were started
|
||||
* by the current process and additionally provide access to the process
|
||||
* input, output, and error streams.
|
||||
* <p>
|
||||
* The native process ID is an identification number that the
|
||||
* operating system assigns to the process.
|
||||
* The range for process id values is dependent on the operating system.
|
||||
* For example, an embedded system might use a 16-bit value.
|
||||
* Status information about a process is retrieved from the native system
|
||||
* and may change asynchronously; processes may be created or terminate
|
||||
* spontaneously.
|
||||
* The time between when a process terminates and the process id
|
||||
* is reused for a new process is unpredictable.
|
||||
* Race conditions can exist between checking the status of a process and
|
||||
* acting upon it. When using ProcessHandles avoid assumptions
|
||||
* about the liveness or identity of the underlying process.
|
||||
* <p>
|
||||
* Each ProcessHandle identifies and allows control of a process in the native
|
||||
* system. ProcessHandles are returned from the factory methods {@link #current()},
|
||||
* {@link #of(long)},
|
||||
* {@link #children}, {@link #allChildren}, {@link #parent()} and
|
||||
* {@link #allProcesses()}.
|
||||
* <p>
|
||||
* The {@link Process} instances created by {@link ProcessBuilder} can be queried
|
||||
* for a ProcessHandle that provides information about the Process.
|
||||
* ProcessHandle references should not be freely distributed.
|
||||
*
|
||||
* <p>
|
||||
* A {@link java.util.concurrent.CompletableFuture} available from {@link #onExit}
|
||||
* can be used to wait for process termination, and possibly trigger dependent
|
||||
* actions.
|
||||
* <p>
|
||||
* The factory methods limit access to ProcessHandles using the
|
||||
* SecurityManager checking the {@link RuntimePermission RuntimePermission("manageProcess")}.
|
||||
* The ability to control processes is also restricted by the native system,
|
||||
* ProcessHandle provides no more access to, or control over, the native process
|
||||
* than would be allowed by a native application.
|
||||
* <p>
|
||||
* @implSpec
|
||||
* In the case where ProcessHandles cannot be supported then the factory
|
||||
* methods must consistently throw {@link java.lang.UnsupportedOperationException}.
|
||||
* The methods of this class throw {@link java.lang.UnsupportedOperationException}
|
||||
* if the operating system does not allow access to query or kill a process.
|
||||
*
|
||||
* @see Process
|
||||
* @since 1.9
|
||||
*/
|
||||
public interface ProcessHandle extends Comparable<ProcessHandle> {
|
||||
|
||||
/**
|
||||
* Returns the native process ID of the process. The native process ID is an
|
||||
* identification number that the operating system assigns to the process.
|
||||
*
|
||||
* @return the native process ID of the process
|
||||
* @throws UnsupportedOperationException if the implementation
|
||||
* does not support this operation
|
||||
*/
|
||||
long getPid();
|
||||
|
||||
/**
|
||||
* Returns an {@code Optional<ProcessHandle>} for an existing native process.
|
||||
*
|
||||
* @param pid a native process ID
|
||||
* @return an {@code Optional<ProcessHandle>} of the PID for the process;
|
||||
* the {@code Optional} is empty if the process does not exist
|
||||
* @throws SecurityException if a security manager has been installed and
|
||||
* it denies RuntimePermission("manageProcess")
|
||||
* @throws UnsupportedOperationException if the implementation
|
||||
* does not support this operation
|
||||
*/
|
||||
public static Optional<ProcessHandle> of(long pid) {
|
||||
return ProcessHandleImpl.get(pid);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a ProcessHandle for the current process. The ProcessHandle cannot be
|
||||
* used to destroy the current process, use {@link System#exit System.exit} instead.
|
||||
*
|
||||
* @return a ProcessHandle for the current process
|
||||
* @throws SecurityException if a security manager has been installed and
|
||||
* it denies RuntimePermission("manageProcess")
|
||||
* @throws UnsupportedOperationException if the implementation
|
||||
* does not support this operation
|
||||
*/
|
||||
public static ProcessHandle current() {
|
||||
return ProcessHandleImpl.current();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an {@code Optional<ProcessHandle>} for the parent process.
|
||||
* Note that Processes in a zombie state usually don't have a parent.
|
||||
*
|
||||
* @return an {@code Optional<ProcessHandle>} of the parent process;
|
||||
* the {@code Optional} is empty if the child process does not have a parent
|
||||
* or if the parent is not available, possibly due to operating system limitations
|
||||
* @throws SecurityException if a security manager has been installed and
|
||||
* it denies RuntimePermission("manageProcess")
|
||||
*/
|
||||
Optional<ProcessHandle> parent();
|
||||
|
||||
/**
|
||||
* Returns a snapshot of the current direct children of the process.
|
||||
* A process that is {@link #isAlive not alive} has zero children.
|
||||
* <p>
|
||||
* <em>Note that processes are created and terminate asynchronously.
|
||||
* There is no guarantee that a process is {@link #isAlive alive}.
|
||||
* </em>
|
||||
*
|
||||
* @return a Stream of ProcessHandles for processes that are direct children
|
||||
* of the process
|
||||
* @throws SecurityException if a security manager has been installed and
|
||||
* it denies RuntimePermission("manageProcess")
|
||||
*/
|
||||
Stream<ProcessHandle> children();
|
||||
|
||||
/**
|
||||
* Returns a snapshot of the current direct and indirect children of the process.
|
||||
* A process that is {@link #isAlive not alive} has zero children.
|
||||
* <p>
|
||||
* <em>Note that processes are created and terminate asynchronously.
|
||||
* There is no guarantee that a process is {@link #isAlive alive}.
|
||||
* </em>
|
||||
*
|
||||
* @return a Stream of ProcessHandles for processes that are direct and
|
||||
* indirect children of the process
|
||||
* @throws SecurityException if a security manager has been installed and
|
||||
* it denies RuntimePermission("manageProcess")
|
||||
*/
|
||||
Stream<ProcessHandle> allChildren();
|
||||
|
||||
/**
|
||||
* Returns a snapshot of all processes visible to the current process.
|
||||
* <p>
|
||||
* <em>Note that processes are created and terminate asynchronously. There
|
||||
* is no guarantee that a process in the stream is alive or that no other
|
||||
* processes may have been created since the inception of the snapshot.
|
||||
* </em>
|
||||
*
|
||||
* @return a Stream of ProcessHandles for all processes
|
||||
* @throws SecurityException if a security manager has been installed and
|
||||
* it denies RuntimePermission("manageProcess")
|
||||
* @throws UnsupportedOperationException if the implementation
|
||||
* does not support this operation
|
||||
*/
|
||||
static Stream<ProcessHandle> allProcesses() {
|
||||
return ProcessHandleImpl.children(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a snapshot of information about the process.
|
||||
*
|
||||
* <p> An {@code Info} instance has various accessor methods that return
|
||||
* information about the process, if the process is alive and the
|
||||
* information is available.
|
||||
*
|
||||
* @return a snapshot of information about the process, always non-null
|
||||
*/
|
||||
Info info();
|
||||
|
||||
/**
|
||||
* Information snapshot about the process.
|
||||
* The attributes of a process vary by operating system and are not available
|
||||
* in all implementations. Information about processes is limited
|
||||
* by the operating system privileges of the process making the request.
|
||||
* The return types are {@code Optional<T>} allowing explicit tests
|
||||
* and actions if the value is available.
|
||||
* @since 1.9
|
||||
*/
|
||||
public interface Info {
|
||||
/**
|
||||
* Returns the executable pathname of the process.
|
||||
*
|
||||
* @return an {@code Optional<String>} of the executable pathname
|
||||
* of the process
|
||||
*/
|
||||
public Optional<String> command();
|
||||
|
||||
/**
|
||||
* Returns an array of Strings of the arguments of the process.
|
||||
*
|
||||
* @return an {@code Optional<String[]>} of the arguments of the process
|
||||
*/
|
||||
public Optional<String[]> arguments();
|
||||
|
||||
/**
|
||||
* Returns the start time of the process.
|
||||
*
|
||||
* @return an {@code Optional<Instant>} of the start time of the process
|
||||
*/
|
||||
public Optional<Instant> startInstant();
|
||||
|
||||
/**
|
||||
* Returns the total cputime accumulated of the process.
|
||||
*
|
||||
* @return an {@code Optional<Duration>} for the accumulated total cputime
|
||||
*/
|
||||
public Optional<Duration> totalCpuDuration();
|
||||
|
||||
/**
|
||||
* Return the user of the process.
|
||||
*
|
||||
* @return an {@code Optional<String>} for the user of the process
|
||||
*/
|
||||
public Optional<String> user();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@code CompletableFuture<ProcessHandle>} for the termination
|
||||
* of the process.
|
||||
* The {@link java.util.concurrent.CompletableFuture} provides the ability
|
||||
* to trigger dependent functions or actions that may be run synchronously
|
||||
* or asynchronously upon process termination.
|
||||
* When the process terminates the CompletableFuture is
|
||||
* {@link java.util.concurrent.CompletableFuture#complete completed} regardless
|
||||
* of the exit status of the process.
|
||||
* The {@code onExit} method can be called multiple times to invoke
|
||||
* independent actions when the process exits.
|
||||
* <p>
|
||||
* Calling {@code onExit().get()} waits for the process to terminate and returns
|
||||
* the ProcessHandle. The future can be used to check if the process is
|
||||
* {@link java.util.concurrent.CompletableFuture#isDone done} or to
|
||||
* {@link java.util.concurrent.Future#get() wait} for it to terminate.
|
||||
* {@link java.util.concurrent.Future#cancel(boolean) Cancelling}
|
||||
* the CompleteableFuture does not affect the Process.
|
||||
* <p>
|
||||
* If the process is {@link #isAlive not alive} the {@link CompletableFuture}
|
||||
* returned has been {@link java.util.concurrent.CompletableFuture#complete completed}.
|
||||
*
|
||||
* @return a new {@code CompletableFuture<ProcessHandle>} for the ProcessHandle
|
||||
*
|
||||
* @throws IllegalStateException if the process is the current process
|
||||
*/
|
||||
CompletableFuture<ProcessHandle> onExit();
|
||||
|
||||
/**
|
||||
* Returns {@code true} if the implementation of {@link #destroy}
|
||||
* normally terminates the process.
|
||||
* Returns {@code false} if the implementation of {@code destroy}
|
||||
* forcibly and immediately terminates the process.
|
||||
*
|
||||
* @return {@code true} if the implementation of {@link #destroy}
|
||||
* normally terminates the process;
|
||||
* otherwise, {@link #destroy} forcibly terminates the process
|
||||
*/
|
||||
boolean supportsNormalTermination();
|
||||
|
||||
/**
|
||||
* Requests the process to be killed.
|
||||
* Whether the process represented by this {@code ProcessHandle} object is
|
||||
* {@link #supportsNormalTermination normally terminated} or not is
|
||||
* implementation dependent.
|
||||
* Forcible process destruction is defined as the immediate termination of the
|
||||
* process, whereas normal termination allows the process to shut down cleanly.
|
||||
* If the process is not alive, no action is taken.
|
||||
* The operating system access controls may prevent the process
|
||||
* from being killed.
|
||||
* <p>
|
||||
* The {@link java.util.concurrent.CompletableFuture} from {@link #onExit} is
|
||||
* {@link java.util.concurrent.CompletableFuture#complete completed}
|
||||
* when the process has terminated.
|
||||
* <p>
|
||||
* Note: The process may not terminate immediately.
|
||||
* For example, {@code isAlive()} may return true for a brief period
|
||||
* after {@code destroy()} is called.
|
||||
*
|
||||
* @return {@code true} if termination was successfully requested,
|
||||
* otherwise {@code false}
|
||||
* @throws IllegalStateException if the process is the current process
|
||||
*/
|
||||
boolean destroy();
|
||||
|
||||
/**
|
||||
* Requests the process to be killed forcibly.
|
||||
* The process represented by this {@code ProcessHandle} object is
|
||||
* forcibly terminated.
|
||||
* Forcible process destruction is defined as the immediate termination of the
|
||||
* process, whereas normal termination allows the process to shut down cleanly.
|
||||
* If the process is not alive, no action is taken.
|
||||
* The operating system access controls may prevent the process
|
||||
* from being killed.
|
||||
* <p>
|
||||
* The {@link java.util.concurrent.CompletableFuture} from {@link #onExit} is
|
||||
* {@link java.util.concurrent.CompletableFuture#complete completed}
|
||||
* when the process has terminated.
|
||||
* <p>
|
||||
* Note: The process may not terminate immediately.
|
||||
* For example, {@code isAlive()} may return true for a brief period
|
||||
* after {@code destroyForcibly()} is called.
|
||||
*
|
||||
* @return {@code true} if termination was successfully requested,
|
||||
* otherwise {@code false}
|
||||
* @throws IllegalStateException if the process is the current process
|
||||
*/
|
||||
boolean destroyForcibly();
|
||||
|
||||
/**
|
||||
* Tests whether the process represented by this {@code ProcessHandle} is alive.
|
||||
* Process termination is implementation and operating system specific.
|
||||
* The process is considered alive as long as the PID is valid.
|
||||
*
|
||||
* @return {@code true} if the process represented by this
|
||||
* {@code ProcessHandle} object has not yet terminated
|
||||
*/
|
||||
boolean isAlive();
|
||||
|
||||
/**
|
||||
* Compares this ProcessHandle with the specified ProcessHandle for order.
|
||||
* The order is not specified, but is consistent with {@link Object#equals},
|
||||
* which returns {@code true} if and only if two instances of ProcessHandle
|
||||
* are of the same implementation and represent the same system process.
|
||||
* Comparison is only supported among objects of same implementation.
|
||||
* If attempt is made to mutually compare two different implementations
|
||||
* of {@link ProcessHandle}s, {@link ClassCastException} is thrown.
|
||||
*
|
||||
* @param other the ProcessHandle to be compared
|
||||
* @return a negative integer, zero, or a positive integer as this object
|
||||
* is less than, equal to, or greater than the specified object.
|
||||
* @throws NullPointerException if the specified object is null
|
||||
* @throws ClassCastException if the specified object is not of same class
|
||||
* as this object
|
||||
*/
|
||||
@Override
|
||||
int compareTo(ProcessHandle other);
|
||||
|
||||
}
|
528
jdk/src/java.base/share/classes/java/lang/ProcessHandleImpl.java
Normal file
528
jdk/src/java.base/share/classes/java/lang/ProcessHandleImpl.java
Normal file
@ -0,0 +1,528 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.lang;
|
||||
|
||||
import java.security.PrivilegedAction;
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import java.util.Arrays;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import sun.misc.InnocuousThread;
|
||||
|
||||
import static java.security.AccessController.doPrivileged;
|
||||
|
||||
/**
|
||||
* ProcessHandleImpl is the implementation of ProcessHandle.
|
||||
*
|
||||
* @see Process
|
||||
* @since 1.9
|
||||
*/
|
||||
final class ProcessHandleImpl implements ProcessHandle {
|
||||
|
||||
/**
|
||||
* The thread pool of "process reaper" daemon threads.
|
||||
*/
|
||||
private static final Executor processReaperExecutor =
|
||||
doPrivileged((PrivilegedAction<Executor>) () -> {
|
||||
|
||||
ThreadGroup tg = Thread.currentThread().getThreadGroup();
|
||||
while (tg.getParent() != null) tg = tg.getParent();
|
||||
ThreadGroup systemThreadGroup = tg;
|
||||
|
||||
ThreadFactory threadFactory = grimReaper -> {
|
||||
// Our thread stack requirement is quite modest.
|
||||
Thread t = new Thread(systemThreadGroup, grimReaper,
|
||||
"process reaper", 32768);
|
||||
t.setDaemon(true);
|
||||
// A small attempt (probably futile) to avoid priority inversion
|
||||
t.setPriority(Thread.MAX_PRIORITY);
|
||||
return t;
|
||||
};
|
||||
|
||||
return Executors.newCachedThreadPool(threadFactory);
|
||||
});
|
||||
|
||||
private static class ExitCompletion extends CompletableFuture<Integer> {
|
||||
final boolean isReaping;
|
||||
|
||||
ExitCompletion(boolean isReaping) {
|
||||
this.isReaping = isReaping;
|
||||
}
|
||||
}
|
||||
|
||||
private static final ConcurrentMap<Long, ExitCompletion>
|
||||
completions = new ConcurrentHashMap<>();
|
||||
|
||||
/**
|
||||
* Returns a CompletableFuture that completes with process exit status when
|
||||
* the process completes.
|
||||
*
|
||||
* @param shouldReap true if the exit value should be reaped
|
||||
*/
|
||||
static CompletableFuture<Integer> completion(long pid, boolean shouldReap) {
|
||||
// check canonicalizing cache 1st
|
||||
ExitCompletion completion = completions.get(pid);
|
||||
// re-try until we get a completion that shouldReap => isReaping
|
||||
while (completion == null || (shouldReap && !completion.isReaping)) {
|
||||
ExitCompletion newCompletion = new ExitCompletion(shouldReap);
|
||||
if (completion == null) {
|
||||
completion = completions.putIfAbsent(pid, newCompletion);
|
||||
} else {
|
||||
completion = completions.replace(pid, completion, newCompletion)
|
||||
? null : completions.get(pid);
|
||||
}
|
||||
if (completion == null) {
|
||||
// newCompletion has just been installed successfully
|
||||
completion = newCompletion;
|
||||
// spawn a thread to wait for and deliver the exit value
|
||||
processReaperExecutor.execute(() -> {
|
||||
int exitValue = waitForProcessExit0(pid, shouldReap);
|
||||
newCompletion.complete(exitValue);
|
||||
// remove from cache afterwards
|
||||
completions.remove(pid, newCompletion);
|
||||
});
|
||||
}
|
||||
}
|
||||
return completion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<ProcessHandle> onExit() {
|
||||
if (this.equals(current)) {
|
||||
throw new IllegalStateException("onExit for current process not allowed");
|
||||
}
|
||||
|
||||
return ProcessHandleImpl.completion(getPid(), false)
|
||||
.handleAsync((exitStatus, unusedThrowable) -> this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for the process to exit, return the value.
|
||||
* Conditionally reap the value if requested
|
||||
* @param pid the processId
|
||||
* @param reapvalue if true, the value is retrieved,
|
||||
* else return the value and leave the process waitable
|
||||
*
|
||||
* @return the value or -1 if an error occurs
|
||||
*/
|
||||
private static native int waitForProcessExit0(long pid, boolean reapvalue);
|
||||
|
||||
/**
|
||||
* Cache the ProcessHandle of this process.
|
||||
*/
|
||||
private static final ProcessHandleImpl current =
|
||||
new ProcessHandleImpl(getCurrentPid0());
|
||||
|
||||
/**
|
||||
* The pid of this ProcessHandle.
|
||||
*/
|
||||
private final long pid;
|
||||
|
||||
/**
|
||||
* Private constructor. Instances are created by the {@code get(long)} factory.
|
||||
* @param pid the pid for this instance
|
||||
*/
|
||||
private ProcessHandleImpl(long pid) {
|
||||
this.pid = pid;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a ProcessHandle for an existing native process.
|
||||
*
|
||||
* @param pid the native process identifier
|
||||
* @return The ProcessHandle for the pid if the process is alive;
|
||||
* or {@code null} if the process ID does not exist in the native system.
|
||||
* @throws SecurityException if RuntimePermission("manageProcess") is not granted
|
||||
*/
|
||||
static Optional<ProcessHandle> get(long pid) {
|
||||
SecurityManager sm = System.getSecurityManager();
|
||||
if (sm != null) {
|
||||
sm.checkPermission(new RuntimePermission("manageProcess"));
|
||||
}
|
||||
return Optional.ofNullable(isAlive0(pid) ? new ProcessHandleImpl(pid) : null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a ProcessHandle corresponding known to exist pid.
|
||||
* Called from ProcessImpl, it does not perform a security check or check if the process is alive.
|
||||
* @param pid of the known to exist process
|
||||
* @return a ProcessHandle corresponding to an existing Process instance
|
||||
*/
|
||||
static ProcessHandle getUnchecked(long pid) {
|
||||
return new ProcessHandleImpl(pid);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the native process ID.
|
||||
* A {@code long} is used to be able to fit the system specific binary values
|
||||
* for the process.
|
||||
*
|
||||
* @return the native process ID
|
||||
*/
|
||||
@Override
|
||||
public long getPid() {
|
||||
return pid;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the ProcessHandle for the current native process.
|
||||
*
|
||||
* @return The ProcessHandle for the OS process.
|
||||
* @throws SecurityException if RuntimePermission("manageProcess") is not granted
|
||||
*/
|
||||
public static ProcessHandleImpl current() {
|
||||
SecurityManager sm = System.getSecurityManager();
|
||||
if (sm != null) {
|
||||
sm.checkPermission(new RuntimePermission("manageProcess"));
|
||||
}
|
||||
return current;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the pid of the current process.
|
||||
*
|
||||
* @return the pid of the current process
|
||||
*/
|
||||
private static native long getCurrentPid0();
|
||||
|
||||
/**
|
||||
* Returns a ProcessHandle for the parent process.
|
||||
*
|
||||
* @return a ProcessHandle of the parent process; {@code null} is returned
|
||||
* if the child process does not have a parent
|
||||
* @throws SecurityException if permission is not granted by the
|
||||
* security policy
|
||||
*/
|
||||
static Optional<ProcessHandle> parent(long pid) {
|
||||
SecurityManager sm = System.getSecurityManager();
|
||||
if (sm != null) {
|
||||
sm.checkPermission(new RuntimePermission("manageProcess"));
|
||||
}
|
||||
long ppid = parent0(pid);
|
||||
if (ppid <= 0) {
|
||||
return Optional.empty();
|
||||
}
|
||||
return get(ppid);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the parent of the native pid argument.
|
||||
*
|
||||
* @return the parent of the native pid; if any, otherwise -1
|
||||
*/
|
||||
private static native long parent0(long pid);
|
||||
|
||||
/**
|
||||
* Returns the number of pids filled in to the array.
|
||||
* @param pid if {@code pid} equals zero, then all known processes are returned;
|
||||
* otherwise only direct child process pids are returned
|
||||
* @param pids an allocated long array to receive the pids
|
||||
* @param ppids an allocated long array to receive the parent pids; may be null
|
||||
* @return if greater than or equals to zero is the number of pids in the array;
|
||||
* if greater than the length of the arrays, the arrays are too small
|
||||
*/
|
||||
private static native int getProcessPids0(long pid, long[] pids, long[] ppids);
|
||||
|
||||
/**
|
||||
* Destroy the process for this ProcessHandle.
|
||||
* @param pid the processs ID to destroy
|
||||
* @param force {@code true} if the process should be terminated forcibly;
|
||||
* else {@code false} for a normal termination
|
||||
*/
|
||||
static void destroyProcess(long pid, boolean force) {
|
||||
destroy0(pid, force);
|
||||
}
|
||||
|
||||
private static native boolean destroy0(long pid, boolean forcibly);
|
||||
|
||||
@Override
|
||||
public boolean destroy() {
|
||||
if (this.equals(current)) {
|
||||
throw new IllegalStateException("destroy of current process not allowed");
|
||||
}
|
||||
return destroy0(getPid(), false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean destroyForcibly() {
|
||||
if (this.equals(current)) {
|
||||
throw new IllegalStateException("destroy of current process not allowed");
|
||||
}
|
||||
return destroy0(getPid(), true);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean supportsNormalTermination() {
|
||||
return ProcessImpl.SUPPORTS_NORMAL_TERMINATION;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests whether the process represented by this {@code ProcessHandle} is alive.
|
||||
*
|
||||
* @return {@code true} if the process represented by this
|
||||
* {@code ProcessHandle} object has not yet terminated.
|
||||
* @since 1.9
|
||||
*/
|
||||
@Override
|
||||
public boolean isAlive() {
|
||||
return isAlive0(pid);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true or false depending on whether the pid is alive.
|
||||
* This must not reap the exitValue like the isAlive method above.
|
||||
*
|
||||
* @param pid the pid to check
|
||||
* @return true or false
|
||||
*/
|
||||
private static native boolean isAlive0(long pid);
|
||||
|
||||
@Override
|
||||
public Optional<ProcessHandle> parent() {
|
||||
return parent(pid);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<ProcessHandle> children() {
|
||||
return children(pid);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a Stream of the children of a process or all processes.
|
||||
*
|
||||
* @param pid the pid of the process for which to find the children;
|
||||
* 0 for all processes
|
||||
* @return a stream of ProcessHandles
|
||||
*/
|
||||
static Stream<ProcessHandle> children(long pid) {
|
||||
SecurityManager sm = System.getSecurityManager();
|
||||
if (sm != null) {
|
||||
sm.checkPermission(new RuntimePermission("manageProcess"));
|
||||
}
|
||||
int size = 100;
|
||||
long[] childpids = null;
|
||||
while (childpids == null || size > childpids.length) {
|
||||
childpids = new long[size];
|
||||
size = getProcessPids0(pid, childpids, null);
|
||||
}
|
||||
return Arrays.stream(childpids, 0, size).mapToObj((id) -> new ProcessHandleImpl(id));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<ProcessHandle> allChildren() {
|
||||
SecurityManager sm = System.getSecurityManager();
|
||||
if (sm != null) {
|
||||
sm.checkPermission(new RuntimePermission("manageProcess"));
|
||||
}
|
||||
int size = 100;
|
||||
long[] pids = null;
|
||||
long[] ppids = null;
|
||||
while (pids == null || size > pids.length) {
|
||||
pids = new long[size];
|
||||
ppids = new long[size];
|
||||
size = getProcessPids0(0, pids, ppids);
|
||||
}
|
||||
|
||||
int next = 0; // index of next process to check
|
||||
int count = -1; // count of subprocesses scanned
|
||||
long ppid = pid; // start looking for this parent
|
||||
do {
|
||||
// Scan from next to size looking for ppid
|
||||
// if found, exchange it to index next
|
||||
for (int i = next; i < size; i++) {
|
||||
if (ppids[i] == ppid) {
|
||||
swap(pids, i, next);
|
||||
swap(ppids, i, next);
|
||||
next++;
|
||||
}
|
||||
}
|
||||
ppid = pids[++count]; // pick up the next pid to scan for
|
||||
} while (count < next);
|
||||
|
||||
return Arrays.stream(pids, 0, count).mapToObj((id) -> new ProcessHandleImpl(id));
|
||||
}
|
||||
|
||||
// Swap two elements in an array
|
||||
private static void swap(long[] array, int x, int y) {
|
||||
long v = array[x];
|
||||
array[x] = array[y];
|
||||
array[y] = v;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProcessHandle.Info info() {
|
||||
return ProcessHandleImpl.Info.info(pid);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(ProcessHandle other) {
|
||||
return Long.compare(pid, ((ProcessHandleImpl) other).pid);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Long.toString(pid);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Long.hashCode(pid);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
return (obj instanceof ProcessHandleImpl) &&
|
||||
(pid == ((ProcessHandleImpl) obj).pid);
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of ProcessHandle.Info.
|
||||
* Information snapshot about a process.
|
||||
* The attributes of a process vary by operating system and not available
|
||||
* in all implementations. Additionally, information about other processes
|
||||
* is limited by the operating system privileges of the process making the request.
|
||||
* If a value is not available, either a {@code null} or {@code -1} is stored.
|
||||
* The accessor methods return {@code null} if the value is not available.
|
||||
*/
|
||||
static class Info implements ProcessHandle.Info {
|
||||
static {
|
||||
initIDs();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialization of JNI fieldIDs.
|
||||
*/
|
||||
private static native void initIDs();
|
||||
|
||||
/**
|
||||
* Fill in this Info instance with information about the native process.
|
||||
* If values are not available the native code does not modify the field.
|
||||
* @param pid of the native process
|
||||
*/
|
||||
private native void info0(long pid);
|
||||
|
||||
String command;
|
||||
String[] arguments;
|
||||
long startTime;
|
||||
long totalTime;
|
||||
String user;
|
||||
|
||||
Info() {
|
||||
command = null;
|
||||
arguments = null;
|
||||
startTime = -1L;
|
||||
totalTime = -1L;
|
||||
user = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the Info object with the fields from the process.
|
||||
* Whatever fields are provided by native are returned.
|
||||
*
|
||||
* @param pid the native process identifier
|
||||
* @return ProcessHandle.Info non-null; individual fields may be null
|
||||
* or -1 if not available.
|
||||
*/
|
||||
public static ProcessHandle.Info info(long pid) {
|
||||
Info info = new Info();
|
||||
info.info0(pid);
|
||||
return info;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> command() {
|
||||
return Optional.ofNullable(command);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String[]> arguments() {
|
||||
return Optional.ofNullable(arguments);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<Instant> startInstant() {
|
||||
return (startTime > 0)
|
||||
? Optional.of(Instant.ofEpochMilli(startTime))
|
||||
: Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<Duration> totalCpuDuration() {
|
||||
return (totalTime != -1)
|
||||
? Optional.of(Duration.ofNanos(totalTime))
|
||||
: Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> user() {
|
||||
return Optional.ofNullable(user);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder(60);
|
||||
sb.append('[');
|
||||
if (user != null) {
|
||||
sb.append("user: ");
|
||||
sb.append(user());
|
||||
}
|
||||
if (command != null) {
|
||||
if (sb.length() != 0) sb.append(", ");
|
||||
sb.append("cmd: ");
|
||||
sb.append(command);
|
||||
}
|
||||
if (arguments != null && arguments.length > 0) {
|
||||
if (sb.length() != 0) sb.append(", ");
|
||||
sb.append("args: ");
|
||||
sb.append(Arrays.toString(arguments));
|
||||
}
|
||||
if (startTime != -1) {
|
||||
if (sb.length() != 0) sb.append(", ");
|
||||
sb.append("startTime: ");
|
||||
sb.append(startInstant());
|
||||
}
|
||||
if (totalTime != -1) {
|
||||
if (sb.length() != 0) sb.append(", ");
|
||||
sb.append("totalTime: ");
|
||||
sb.append(totalCpuDuration().toString());
|
||||
}
|
||||
sb.append(']');
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -333,6 +333,12 @@ import java.util.StringTokenizer;
|
||||
* "../../../technotes/guides/plugin/developer_guide/rsa_how.html#use">
|
||||
* usePolicy Permission</a>.</td>
|
||||
* </tr>
|
||||
* <tr>
|
||||
* <td>manageProcess</td>
|
||||
* <td>Native process termination and information about processes
|
||||
* {@link ProcessHandle}.</td>
|
||||
* <td>Allows code to identify and terminate processes that it did not create.</td>
|
||||
* </tr>
|
||||
*
|
||||
* <tr>
|
||||
* <td>localeServiceProvider</td>
|
||||
|
@ -2247,8 +2247,29 @@ public final class String
|
||||
* @since 1.5
|
||||
*/
|
||||
public String replace(CharSequence target, CharSequence replacement) {
|
||||
return Pattern.compile(target.toString(), Pattern.LITERAL).matcher(
|
||||
this).replaceAll(Matcher.quoteReplacement(replacement.toString()));
|
||||
String starget = target.toString();
|
||||
String srepl = replacement.toString();
|
||||
int j = indexOf(starget);
|
||||
if (j < 0) {
|
||||
return this;
|
||||
}
|
||||
int targLen = starget.length();
|
||||
int targLen1 = Math.max(targLen, 1);
|
||||
final char[] value = this.value;
|
||||
final char[] replValue = srepl.value;
|
||||
int newLenHint = value.length - targLen + replValue.length;
|
||||
if (newLenHint < 0) {
|
||||
throw new OutOfMemoryError();
|
||||
}
|
||||
StringBuilder sb = new StringBuilder(newLenHint);
|
||||
int i = 0;
|
||||
do {
|
||||
sb.append(value, i, j - i)
|
||||
.append(replValue);
|
||||
i = j + targLen;
|
||||
} while (j < value.length && (j = indexOf(starget, j + targLen1)) > 0);
|
||||
|
||||
return sb.append(value, i, value.length - i).toString();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -312,11 +312,16 @@ abstract class AbstractPlainSocketImpl extends SocketImpl
|
||||
ret = socketGetOption(opt, null);
|
||||
return ret;
|
||||
case IP_TOS:
|
||||
ret = socketGetOption(opt, null);
|
||||
if (ret == -1) { // ipv6 tos
|
||||
return trafficClass;
|
||||
} else {
|
||||
return ret;
|
||||
try {
|
||||
ret = socketGetOption(opt, null);
|
||||
if (ret == -1) { // ipv6 tos
|
||||
return trafficClass;
|
||||
} else {
|
||||
return ret;
|
||||
}
|
||||
} catch (SocketException se) {
|
||||
// TODO - should make better effort to read TOS or TCLASS
|
||||
return trafficClass; // ipv6 tos
|
||||
}
|
||||
case SO_KEEPALIVE:
|
||||
ret = socketGetOption(opt, null);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user