Merge
This commit is contained in:
commit
cc04ffc7b3
@ -54,15 +54,4 @@ $(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/module/ModuleLoaderMap.java:
|
||||
|
||||
GENSRC_JAVA_BASE += $(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/module/ModuleLoaderMap.java
|
||||
|
||||
$(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/vm/cds/resources/ModuleLoaderMap.dat: \
|
||||
$(TOPDIR)/src/java.base/share/classes/jdk/internal/vm/cds/resources/ModuleLoaderMap.dat \
|
||||
$(VARDEPS_FILE) $(BUILD_TOOLS_JDK)
|
||||
$(MKDIR) -p $(@D)
|
||||
$(RM) $@ $@.tmp
|
||||
$(TOOL_GENCLASSLOADERMAP) -boot $(BOOT_MODULES_LIST) \
|
||||
-platform $(PLATFORM_MODULES_LIST) -o $@.tmp $<
|
||||
$(MV) $@.tmp $@
|
||||
|
||||
GENSRC_JAVA_BASE += $(SUPPORT_OUTPUTDIR)/gensrc/java.base/jdk/internal/vm/cds/resources/ModuleLoaderMap.dat
|
||||
|
||||
################################################################################
|
||||
|
@ -77,30 +77,22 @@ public class GenModuleLoaderMap {
|
||||
throw new IllegalArgumentException(source + " not exist");
|
||||
}
|
||||
|
||||
boolean needsQuotes = outfile.toString().contains(".java.tmp");
|
||||
|
||||
try (BufferedWriter bw = Files.newBufferedWriter(outfile, StandardCharsets.UTF_8);
|
||||
PrintWriter writer = new PrintWriter(bw)) {
|
||||
for (String line : Files.readAllLines(source)) {
|
||||
if (line.contains("@@BOOT_MODULE_NAMES@@")) {
|
||||
line = patch(line, "@@BOOT_MODULE_NAMES@@", bootModules, needsQuotes);
|
||||
line = patch(line, "@@BOOT_MODULE_NAMES@@", bootModules);
|
||||
} else if (line.contains("@@PLATFORM_MODULE_NAMES@@")) {
|
||||
line = patch(line, "@@PLATFORM_MODULE_NAMES@@", platformModules, needsQuotes);
|
||||
line = patch(line, "@@PLATFORM_MODULE_NAMES@@", platformModules);
|
||||
}
|
||||
writer.println(line);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static String patch(String s, String tag, Stream<String> stream, boolean needsQuotes) {
|
||||
String mns = null;
|
||||
if (needsQuotes) {
|
||||
mns = stream.sorted()
|
||||
.collect(Collectors.joining("\",\n \""));
|
||||
} else {
|
||||
mns = stream.sorted()
|
||||
.collect(Collectors.joining("\n"));
|
||||
}
|
||||
private static String patch(String s, String tag, Stream<String> stream) {
|
||||
String mns = stream.sorted()
|
||||
.collect(Collectors.joining("\",\n \""));
|
||||
return s.replace(tag, mns);
|
||||
}
|
||||
|
||||
|
@ -2840,6 +2840,44 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
|
||||
bind(L_done);
|
||||
}
|
||||
|
||||
// Code for BigInteger::mulAdd instrinsic
|
||||
// out = r0
|
||||
// in = r1
|
||||
// offset = r2 (already out.length-offset)
|
||||
// len = r3
|
||||
// k = r4
|
||||
//
|
||||
// pseudo code from java implementation:
|
||||
// carry = 0;
|
||||
// offset = out.length-offset - 1;
|
||||
// for (int j=len-1; j >= 0; j--) {
|
||||
// product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry;
|
||||
// out[offset--] = (int)product;
|
||||
// carry = product >>> 32;
|
||||
// }
|
||||
// return (int)carry;
|
||||
void MacroAssembler::mul_add(Register out, Register in, Register offset,
|
||||
Register len, Register k) {
|
||||
Label LOOP, END;
|
||||
// pre-loop
|
||||
cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches
|
||||
csel(out, zr, out, Assembler::EQ);
|
||||
br(Assembler::EQ, END);
|
||||
add(in, in, len, LSL, 2); // in[j+1] address
|
||||
add(offset, out, offset, LSL, 2); // out[offset + 1] address
|
||||
mov(out, zr); // used to keep carry now
|
||||
BIND(LOOP);
|
||||
ldrw(rscratch1, Address(pre(in, -4)));
|
||||
madd(rscratch1, rscratch1, k, out);
|
||||
ldrw(rscratch2, Address(pre(offset, -4)));
|
||||
add(rscratch1, rscratch1, rscratch2);
|
||||
strw(rscratch1, Address(offset));
|
||||
lsr(out, rscratch1, 32);
|
||||
subs(len, len, 1);
|
||||
br(Assembler::NE, LOOP);
|
||||
BIND(END);
|
||||
}
|
||||
|
||||
/**
|
||||
* Emits code to update CRC-32 with a byte value according to constants in table
|
||||
*
|
||||
|
@ -1265,6 +1265,7 @@ public:
|
||||
void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z,
|
||||
Register zlen, Register tmp1, Register tmp2, Register tmp3,
|
||||
Register tmp4, Register tmp5, Register tmp6, Register tmp7);
|
||||
void mul_add(Register out, Register in, Register offs, Register len, Register k);
|
||||
// ISB may be needed because of a safepoint
|
||||
void maybe_isb() { isb(); }
|
||||
|
||||
|
@ -3607,6 +3607,63 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
address generate_squareToLen() {
|
||||
// squareToLen algorithm for sizes 1..127 described in java code works
|
||||
// faster than multiply_to_len on some CPUs and slower on others, but
|
||||
// multiply_to_len shows a bit better overall results
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "squareToLen");
|
||||
address start = __ pc();
|
||||
|
||||
const Register x = r0;
|
||||
const Register xlen = r1;
|
||||
const Register z = r2;
|
||||
const Register zlen = r3;
|
||||
const Register y = r4; // == x
|
||||
const Register ylen = r5; // == xlen
|
||||
|
||||
const Register tmp1 = r10;
|
||||
const Register tmp2 = r11;
|
||||
const Register tmp3 = r12;
|
||||
const Register tmp4 = r13;
|
||||
const Register tmp5 = r14;
|
||||
const Register tmp6 = r15;
|
||||
const Register tmp7 = r16;
|
||||
|
||||
RegSet spilled_regs = RegSet::of(y, ylen);
|
||||
BLOCK_COMMENT("Entry:");
|
||||
__ enter();
|
||||
__ push(spilled_regs, sp);
|
||||
__ mov(y, x);
|
||||
__ mov(ylen, xlen);
|
||||
__ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
|
||||
__ pop(spilled_regs, sp);
|
||||
__ leave();
|
||||
__ ret(lr);
|
||||
return start;
|
||||
}
|
||||
|
||||
address generate_mulAdd() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "mulAdd");
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
const Register out = r0;
|
||||
const Register in = r1;
|
||||
const Register offset = r2;
|
||||
const Register len = r3;
|
||||
const Register k = r4;
|
||||
|
||||
BLOCK_COMMENT("Entry:");
|
||||
__ enter();
|
||||
__ mul_add(out, in, offset, len, k);
|
||||
__ leave();
|
||||
__ ret(lr);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
void ghash_multiply(FloatRegister result_lo, FloatRegister result_hi,
|
||||
FloatRegister a, FloatRegister b, FloatRegister a1_xor_a0,
|
||||
FloatRegister tmp1, FloatRegister tmp2, FloatRegister tmp3, FloatRegister tmp4) {
|
||||
@ -4913,6 +4970,14 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_multiplyToLen = generate_multiplyToLen();
|
||||
}
|
||||
|
||||
if (UseSquareToLenIntrinsic) {
|
||||
StubRoutines::_squareToLen = generate_squareToLen();
|
||||
}
|
||||
|
||||
if (UseMulAddIntrinsic) {
|
||||
StubRoutines::_mulAdd = generate_mulAdd();
|
||||
}
|
||||
|
||||
if (UseMontgomeryMultiplyIntrinsic) {
|
||||
StubCodeMark mark(this, "StubRoutines", "montgomeryMultiply");
|
||||
MontgomeryMultiplyGenerator g(_masm, /*squaring*/false);
|
||||
|
@ -340,6 +340,14 @@ void VM_Version::get_processor_features() {
|
||||
UseMultiplyToLenIntrinsic = true;
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
|
||||
UseSquareToLenIntrinsic = true;
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
|
||||
UseMulAddIntrinsic = true;
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) {
|
||||
UseBarriersForVolatile = (_features & CPU_DMB_ATOMICS) != 0;
|
||||
}
|
||||
|
@ -2175,7 +2175,8 @@ class Assembler : public AbstractAssembler {
|
||||
inline void vsbox( VectorRegister d, VectorRegister a);
|
||||
|
||||
// SHA (introduced with Power 8)
|
||||
// Not yet implemented.
|
||||
inline void vshasigmad(VectorRegister d, VectorRegister a, bool st, int six);
|
||||
inline void vshasigmaw(VectorRegister d, VectorRegister a, bool st, int six);
|
||||
|
||||
// Vector Binary Polynomial Multiplication (introduced with Power 8)
|
||||
inline void vpmsumb( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
@ -2286,6 +2287,10 @@ class Assembler : public AbstractAssembler {
|
||||
inline void lvsl( VectorRegister d, Register s2);
|
||||
inline void lvsr( VectorRegister d, Register s2);
|
||||
|
||||
// Endianess specific concatenation of 2 loaded vectors.
|
||||
inline void load_perm(VectorRegister perm, Register addr);
|
||||
inline void vec_perm(VectorRegister first_dest, VectorRegister second, VectorRegister perm);
|
||||
|
||||
// RegisterOrConstant versions.
|
||||
// These emitters choose between the versions using two registers and
|
||||
// those with register and immediate, depending on the content of roc.
|
||||
|
@ -926,7 +926,8 @@ inline void Assembler::vncipherlast(VectorRegister d, VectorRegister a, VectorRe
|
||||
inline void Assembler::vsbox( VectorRegister d, VectorRegister a) { emit_int32( VSBOX_OPCODE | vrt(d) | vra(a) ); }
|
||||
|
||||
// SHA (introduced with Power 8)
|
||||
// Not yet implemented.
|
||||
inline void Assembler::vshasigmad(VectorRegister d, VectorRegister a, bool st, int six) { emit_int32( VSHASIGMAD_OPCODE | vrt(d) | vra(a) | vst(st) | vsix(six)); }
|
||||
inline void Assembler::vshasigmaw(VectorRegister d, VectorRegister a, bool st, int six) { emit_int32( VSHASIGMAW_OPCODE | vrt(d) | vra(a) | vst(st) | vsix(six)); }
|
||||
|
||||
// Vector Binary Polynomial Multiplication (introduced with Power 8)
|
||||
inline void Assembler::vpmsumb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
@ -1035,6 +1036,22 @@ inline void Assembler::stvxl( VectorRegister d, Register s2) { emit_int32( STVXL
|
||||
inline void Assembler::lvsl( VectorRegister d, Register s2) { emit_int32( LVSL_OPCODE | vrt(d) | rb(s2)); }
|
||||
inline void Assembler::lvsr( VectorRegister d, Register s2) { emit_int32( LVSR_OPCODE | vrt(d) | rb(s2)); }
|
||||
|
||||
inline void Assembler::load_perm(VectorRegister perm, Register addr) {
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
lvsr(perm, addr);
|
||||
#else
|
||||
lvsl(perm, addr);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void Assembler::vec_perm(VectorRegister first_dest, VectorRegister second, VectorRegister perm) {
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
vperm(first_dest, second, first_dest, perm);
|
||||
#else
|
||||
vperm(first_dest, first_dest, second, perm);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void Assembler::load_const(Register d, void* x, Register tmp) {
|
||||
load_const(d, (long)x, tmp);
|
||||
}
|
||||
|
@ -866,6 +866,40 @@ class MacroAssembler: public Assembler {
|
||||
void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
|
||||
bool invertCRC);
|
||||
|
||||
// SHA-2 auxiliary functions and public interfaces
|
||||
private:
|
||||
void sha256_deque(const VectorRegister src,
|
||||
const VectorRegister dst1, const VectorRegister dst2, const VectorRegister dst3);
|
||||
void sha256_load_h_vec(const VectorRegister a, const VectorRegister e, const Register hptr);
|
||||
void sha256_round(const VectorRegister* hs, const int total_hs, int& h_cnt, const VectorRegister kpw);
|
||||
void sha256_load_w_plus_k_vec(const Register buf_in, const VectorRegister* ws,
|
||||
const int total_ws, const Register k, const VectorRegister* kpws,
|
||||
const int total_kpws);
|
||||
void sha256_calc_4w(const VectorRegister w0, const VectorRegister w1,
|
||||
const VectorRegister w2, const VectorRegister w3, const VectorRegister kpw0,
|
||||
const VectorRegister kpw1, const VectorRegister kpw2, const VectorRegister kpw3,
|
||||
const Register j, const Register k);
|
||||
void sha256_update_sha_state(const VectorRegister a, const VectorRegister b,
|
||||
const VectorRegister c, const VectorRegister d, const VectorRegister e,
|
||||
const VectorRegister f, const VectorRegister g, const VectorRegister h,
|
||||
const Register hptr);
|
||||
|
||||
void sha512_load_w_vec(const Register buf_in, const VectorRegister* ws, const int total_ws);
|
||||
void sha512_update_sha_state(const Register state, const VectorRegister* hs, const int total_hs);
|
||||
void sha512_round(const VectorRegister* hs, const int total_hs, int& h_cnt, const VectorRegister kpw);
|
||||
void sha512_load_h_vec(const Register state, const VectorRegister* hs, const int total_hs);
|
||||
void sha512_calc_2w(const VectorRegister w0, const VectorRegister w1,
|
||||
const VectorRegister w2, const VectorRegister w3,
|
||||
const VectorRegister w4, const VectorRegister w5,
|
||||
const VectorRegister w6, const VectorRegister w7,
|
||||
const VectorRegister kpw0, const VectorRegister kpw1, const Register j,
|
||||
const VectorRegister vRb, const Register k);
|
||||
|
||||
public:
|
||||
void sha256(bool multi_block);
|
||||
void sha512(bool multi_block);
|
||||
|
||||
|
||||
//
|
||||
// Debugging
|
||||
//
|
||||
|
1136
src/hotspot/cpu/ppc/macroAssembler_ppc_sha.cpp
Normal file
1136
src/hotspot/cpu/ppc/macroAssembler_ppc_sha.cpp
Normal file
File diff suppressed because it is too large
Load Diff
@ -3095,6 +3095,28 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
address generate_sha256_implCompress(bool multi_block, const char *name) {
|
||||
assert(UseSHA, "need SHA instructions");
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ function_entry();
|
||||
|
||||
__ sha256 (multi_block);
|
||||
|
||||
__ blr();
|
||||
return start;
|
||||
}
|
||||
|
||||
address generate_sha512_implCompress(bool multi_block, const char *name) {
|
||||
assert(UseSHA, "need SHA instructions");
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ function_entry();
|
||||
|
||||
__ sha512 (multi_block);
|
||||
|
||||
__ blr();
|
||||
return start;
|
||||
}
|
||||
|
||||
void generate_arraycopy_stubs() {
|
||||
// Note: the disjoint stubs must be generated first, some of
|
||||
// the conjoint stubs use them.
|
||||
@ -3781,6 +3803,14 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
|
||||
}
|
||||
|
||||
if (UseSHA256Intrinsics) {
|
||||
StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress");
|
||||
StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB");
|
||||
}
|
||||
if (UseSHA512Intrinsics) {
|
||||
StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress");
|
||||
StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB");
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -34,7 +34,7 @@ static bool returns_to_call_stub(address return_pc) { return return_pc == _call_
|
||||
|
||||
enum platform_dependent_constants {
|
||||
code_size1 = 20000, // simply increase if too small (assembler will crash if too small)
|
||||
code_size2 = 20000 // simply increase if too small (assembler will crash if too small)
|
||||
code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
|
||||
};
|
||||
|
||||
// CRC32 Intrinsics.
|
||||
|
@ -113,7 +113,7 @@ void VM_Version::initialize() {
|
||||
// Create and print feature-string.
|
||||
char buf[(num_features+1) * 16]; // Max 16 chars per feature.
|
||||
jio_snprintf(buf, sizeof(buf),
|
||||
"ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
"ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
(has_fsqrt() ? " fsqrt" : ""),
|
||||
(has_isel() ? " isel" : ""),
|
||||
(has_lxarxeh() ? " lxarxeh" : ""),
|
||||
@ -130,7 +130,8 @@ void VM_Version::initialize() {
|
||||
(has_mfdscr() ? " mfdscr" : ""),
|
||||
(has_vsx() ? " vsx" : ""),
|
||||
(has_ldbrx() ? " ldbrx" : ""),
|
||||
(has_stdbrx() ? " stdbrx" : "")
|
||||
(has_stdbrx() ? " stdbrx" : ""),
|
||||
(has_vshasig() ? " sha" : "")
|
||||
// Make sure number of %s matches num_features!
|
||||
);
|
||||
_features_string = os::strdup(buf);
|
||||
@ -247,17 +248,43 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UseFMA, true);
|
||||
}
|
||||
|
||||
if (UseSHA) {
|
||||
warning("SHA instructions are not available on this CPU");
|
||||
if (has_vshasig()) {
|
||||
if (FLAG_IS_DEFAULT(UseSHA)) {
|
||||
UseSHA = true;
|
||||
}
|
||||
} else if (UseSHA) {
|
||||
if (!FLAG_IS_DEFAULT(UseSHA))
|
||||
warning("SHA instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseSHA, false);
|
||||
}
|
||||
if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
|
||||
warning("SHA intrinsics are not available on this CPU");
|
||||
|
||||
if (UseSHA1Intrinsics) {
|
||||
warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseSHA && has_vshasig()) {
|
||||
if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
|
||||
}
|
||||
} else if (UseSHA256Intrinsics) {
|
||||
warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
|
||||
}
|
||||
|
||||
if (UseSHA && has_vshasig()) {
|
||||
if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
|
||||
}
|
||||
} else if (UseSHA512Intrinsics) {
|
||||
warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
|
||||
}
|
||||
|
||||
if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseSHA, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
|
||||
UseSquareToLenIntrinsic = true;
|
||||
}
|
||||
@ -663,6 +690,7 @@ void VM_Version::determine_features() {
|
||||
a->lxvd2x(VSR0, R3_ARG1); // code[14] -> vsx
|
||||
a->ldbrx(R7, R3_ARG1, R4_ARG2); // code[15] -> ldbrx
|
||||
a->stdbrx(R7, R3_ARG1, R4_ARG2); // code[16] -> stdbrx
|
||||
a->vshasigmaw(VR0, VR1, 1, 0xF); // code[17] -> vshasig
|
||||
a->blr();
|
||||
|
||||
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
|
||||
@ -714,6 +742,7 @@ void VM_Version::determine_features() {
|
||||
if (code[feature_cntr++]) features |= vsx_m;
|
||||
if (code[feature_cntr++]) features |= ldbrx_m;
|
||||
if (code[feature_cntr++]) features |= stdbrx_m;
|
||||
if (code[feature_cntr++]) features |= vshasig_m;
|
||||
|
||||
// Print the detection code.
|
||||
if (PrintAssembly) {
|
||||
|
@ -49,6 +49,7 @@ protected:
|
||||
vsx,
|
||||
ldbrx,
|
||||
stdbrx,
|
||||
vshasig,
|
||||
num_features // last entry to count features
|
||||
};
|
||||
enum Feature_Flag_Set {
|
||||
@ -64,6 +65,7 @@ protected:
|
||||
vand_m = (1 << vand ),
|
||||
lqarx_m = (1 << lqarx ),
|
||||
vcipher_m = (1 << vcipher),
|
||||
vshasig_m = (1 << vshasig),
|
||||
vpmsumb_m = (1 << vpmsumb),
|
||||
tcheck_m = (1 << tcheck ),
|
||||
mfdscr_m = (1 << mfdscr ),
|
||||
@ -106,6 +108,7 @@ public:
|
||||
static bool has_vsx() { return (_features & vsx_m) != 0; }
|
||||
static bool has_ldbrx() { return (_features & ldbrx_m) != 0; }
|
||||
static bool has_stdbrx() { return (_features & stdbrx_m) != 0; }
|
||||
static bool has_vshasig() { return (_features & vshasig_m) != 0; }
|
||||
static bool has_mtfprd() { return has_vpmsumb(); } // alias for P8
|
||||
|
||||
// Assembler testing
|
||||
|
@ -148,13 +148,15 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
// Note that xchg_ptr doesn't necessarily do an acquire
|
||||
// (see synchronizer.cpp).
|
||||
|
||||
unsigned int old_value;
|
||||
T old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
@ -182,15 +184,18 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
|
||||
"memory"
|
||||
);
|
||||
|
||||
return (jint) old_value;
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
// Note that xchg_ptr doesn't necessarily do an acquire
|
||||
// (see synchronizer.cpp).
|
||||
|
||||
long old_value;
|
||||
T old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
@ -218,11 +223,7 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
|
||||
"memory"
|
||||
);
|
||||
|
||||
return (intptr_t) old_value;
|
||||
}
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
|
||||
|
@ -61,7 +61,11 @@ inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) co
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
__asm__ volatile ( "xchgl (%2),%0"
|
||||
: "=r" (exchange_value)
|
||||
: "0" (exchange_value), "r" (dest)
|
||||
@ -69,10 +73,6 @@ inline jint Atomic::xchg (jint exchange_value, volatile jint* des
|
||||
return exchange_value;
|
||||
}
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
@ -118,7 +118,11 @@ inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) co
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
__asm__ __volatile__ ("xchgq (%2),%0"
|
||||
: "=r" (exchange_value)
|
||||
: "0" (exchange_value), "r" (dest)
|
||||
@ -144,10 +148,6 @@ inline jlong Atomic::load(const volatile jlong* src) { return *src; }
|
||||
|
||||
#else // !AMD64
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
// defined in bsd_x86.s
|
||||
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
|
||||
|
@ -87,7 +87,7 @@ static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
|
||||
|
||||
/* Atomically write VALUE into `*PTR' and returns the previous
|
||||
contents of `*PTR'. */
|
||||
static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
|
||||
static inline int m68k_lock_test_and_set(int newval, volatile int *ptr) {
|
||||
for (;;) {
|
||||
// Loop until success.
|
||||
int prev = *ptr;
|
||||
@ -148,7 +148,7 @@ static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
|
||||
|
||||
/* Atomically write VALUE into `*PTR' and returns the previous
|
||||
contents of `*PTR'. */
|
||||
static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
|
||||
static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
|
||||
for (;;) {
|
||||
// Loop until a __kernel_cmpxchg succeeds.
|
||||
int prev = *ptr;
|
||||
@ -207,18 +207,22 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
}
|
||||
|
||||
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
#ifdef ARM
|
||||
return arm_lock_test_and_set(dest, exchange_value);
|
||||
return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
|
||||
#else
|
||||
#ifdef M68K
|
||||
return m68k_lock_test_and_set(dest, exchange_value);
|
||||
return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest);
|
||||
#else
|
||||
// __sync_lock_test_and_set is a bizarrely named atomic exchange
|
||||
// operation. Note that some platforms only support this with the
|
||||
// limitation that the only valid value to store is the immediate
|
||||
// constant 1. There is a test for this in JNI_CreateJavaVM().
|
||||
jint result = __sync_lock_test_and_set (dest, exchange_value);
|
||||
T result = __sync_lock_test_and_set (dest, exchange_value);
|
||||
// All atomic operations are expected to be full memory barriers
|
||||
// (see atomic.hpp). However, __sync_lock_test_and_set is not
|
||||
// a full memory barrier, but an acquire barrier. Hence, this added
|
||||
@ -229,24 +233,14 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
|
||||
#endif // ARM
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
|
||||
volatile intptr_t* dest) {
|
||||
#ifdef ARM
|
||||
return arm_lock_test_and_set(dest, exchange_value);
|
||||
#else
|
||||
#ifdef M68K
|
||||
return m68k_lock_test_and_set(dest, exchange_value);
|
||||
#else
|
||||
intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T result = __sync_lock_test_and_set (dest, exchange_value);
|
||||
__sync_synchronize();
|
||||
return result;
|
||||
#endif // M68K
|
||||
#endif // ARM
|
||||
}
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return (void *) xchg_ptr((intptr_t) exchange_value,
|
||||
(volatile intptr_t*) dest);
|
||||
}
|
||||
|
||||
// No direct support for cmpxchg of bytes; emulate using int.
|
||||
|
@ -57,19 +57,16 @@ struct Atomic::PlatformAdd
|
||||
}
|
||||
};
|
||||
|
||||
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest)
|
||||
{
|
||||
jint res = __sync_lock_test_and_set (dest, exchange_value);
|
||||
template<size_t byte_size>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(byte_size == sizeof(T));
|
||||
T res = __sync_lock_test_and_set(dest, exchange_value);
|
||||
FULL_MEM_BARRIER;
|
||||
return res;
|
||||
}
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest)
|
||||
{
|
||||
return (void *) xchg_ptr((intptr_t) exchange_value,
|
||||
(volatile intptr_t*) dest);
|
||||
}
|
||||
|
||||
template<size_t byte_size>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
|
||||
@ -90,13 +87,6 @@ inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
|
||||
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
|
||||
{
|
||||
intptr_t res = __sync_lock_test_and_set (dest, exchange_value);
|
||||
FULL_MEM_BARRIER;
|
||||
return res;
|
||||
}
|
||||
|
||||
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
|
||||
|
||||
#endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
|
||||
|
@ -141,11 +141,15 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
: "memory");
|
||||
return val;
|
||||
}
|
||||
#endif // AARCH64
|
||||
#endif
|
||||
|
||||
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
#ifdef AARCH64
|
||||
jint old_val;
|
||||
T old_val;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
@ -157,13 +161,17 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
|
||||
: "memory");
|
||||
return old_val;
|
||||
#else
|
||||
return (*os::atomic_xchg_func)(exchange_value, dest);
|
||||
return xchg_using_helper<jint>(os::atomic_xchg_func, exchange_value, dest);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
#ifdef AARCH64
|
||||
intptr_t old_val;
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T old_val;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
@ -174,14 +182,8 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
|
||||
: [new_val] "r" (exchange_value), [dest] "r" (dest)
|
||||
: "memory");
|
||||
return old_val;
|
||||
#else
|
||||
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
// The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
|
||||
|
||||
|
@ -146,12 +146,14 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
return result;
|
||||
}
|
||||
|
||||
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
// Note that xchg_ptr doesn't necessarily do an acquire
|
||||
// (see synchronizer.cpp).
|
||||
|
||||
unsigned int old_value;
|
||||
T old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
@ -179,15 +181,18 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
|
||||
"memory"
|
||||
);
|
||||
|
||||
return (jint) old_value;
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
// Note that xchg_ptr doesn't necessarily do an acquire
|
||||
// (see synchronizer.cpp).
|
||||
|
||||
long old_value;
|
||||
T old_value;
|
||||
const uint64_t zero = 0;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
@ -215,11 +220,7 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
|
||||
"memory"
|
||||
);
|
||||
|
||||
return (intptr_t) old_value;
|
||||
}
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
|
||||
|
@ -208,8 +208,12 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest) const {
|
||||
//
|
||||
// The return value is the (unchanged) value from memory as it was when the
|
||||
// replacement succeeded.
|
||||
inline jint Atomic::xchg (jint xchg_val, volatile jint* dest) {
|
||||
unsigned int old;
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
T old;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" LLGF %[old],%[mem] \n\t" // get old value
|
||||
@ -219,16 +223,20 @@ inline jint Atomic::xchg (jint xchg_val, volatile jint* dest) {
|
||||
: [old] "=&d" (old) // write-only, prev value irrelevant
|
||||
, [mem] "+Q" (*dest) // read/write, memory to be updated atomically
|
||||
//---< inputs >---
|
||||
: [upd] "d" (xchg_val) // read-only, value to be written to memory
|
||||
: [upd] "d" (exchange_value) // read-only, value to be written to memory
|
||||
//---< clobbered >---
|
||||
: "cc", "memory"
|
||||
);
|
||||
|
||||
return (jint)old;
|
||||
return old;
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t xchg_val, volatile intptr_t* dest) {
|
||||
unsigned long old;
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T old;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" LG %[old],%[mem] \n\t" // get old value
|
||||
@ -238,16 +246,12 @@ inline intptr_t Atomic::xchg_ptr(intptr_t xchg_val, volatile intptr_t* dest) {
|
||||
: [old] "=&d" (old) // write-only, init from memory
|
||||
, [mem] "+Q" (*dest) // read/write, memory to be updated atomically
|
||||
//---< inputs >---
|
||||
: [upd] "d" (xchg_val) // read-only, value to be written to memory
|
||||
: [upd] "d" (exchange_value) // read-only, value to be written to memory
|
||||
//---< clobbered >---
|
||||
: "cc", "memory"
|
||||
);
|
||||
|
||||
return (intptr_t)old;
|
||||
}
|
||||
|
||||
inline void *Atomic::xchg_ptr(void *exchange_value, volatile void *dest) {
|
||||
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
||||
return old;
|
||||
}
|
||||
|
||||
//----------------
|
||||
|
@ -95,9 +95,12 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
||||
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
|
||||
intptr_t rv = exchange_value;
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
T rv = exchange_value;
|
||||
__asm__ volatile(
|
||||
" swap [%2],%1\n\t"
|
||||
: "=r" (rv)
|
||||
@ -106,8 +109,12 @@ inline jint Atomic::xchg (jint exchange_value, volatile jint* des
|
||||
return rv;
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
intptr_t rv = exchange_value;
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T rv = exchange_value;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
" mov %1, %%o3\n\t"
|
||||
@ -123,10 +130,6 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
|
||||
return rv;
|
||||
}
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
||||
}
|
||||
|
||||
// No direct support for cmpxchg of bytes; emulate using int.
|
||||
template<>
|
||||
struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
|
||||
|
@ -61,7 +61,11 @@ inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) co
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
__asm__ volatile ( "xchgl (%2),%0"
|
||||
: "=r" (exchange_value)
|
||||
: "0" (exchange_value), "r" (dest)
|
||||
@ -69,10 +73,6 @@ inline jint Atomic::xchg (jint exchange_value, volatile jint* des
|
||||
return exchange_value;
|
||||
}
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
@ -118,7 +118,11 @@ inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) co
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
__asm__ __volatile__ ("xchgq (%2),%0"
|
||||
: "=r" (exchange_value)
|
||||
: "0" (exchange_value), "r" (dest)
|
||||
@ -144,10 +148,6 @@ inline jlong Atomic::load(const volatile jlong* src) { return *src; }
|
||||
|
||||
#else // !AMD64
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
// defined in linux_x86.s
|
||||
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong);
|
||||
|
@ -87,7 +87,7 @@ static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
|
||||
|
||||
/* Atomically write VALUE into `*PTR' and returns the previous
|
||||
contents of `*PTR'. */
|
||||
static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
|
||||
static inline int m68k_lock_test_and_set(int newval, volatile int *ptr) {
|
||||
for (;;) {
|
||||
// Loop until success.
|
||||
int prev = *ptr;
|
||||
@ -148,7 +148,7 @@ static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
|
||||
|
||||
/* Atomically write VALUE into `*PTR' and returns the previous
|
||||
contents of `*PTR'. */
|
||||
static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
|
||||
static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
|
||||
for (;;) {
|
||||
// Loop until a __kernel_cmpxchg succeeds.
|
||||
int prev = *ptr;
|
||||
@ -201,18 +201,22 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
}
|
||||
|
||||
inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
#ifdef ARM
|
||||
return arm_lock_test_and_set(dest, exchange_value);
|
||||
return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
|
||||
#else
|
||||
#ifdef M68K
|
||||
return m68k_lock_test_and_set(dest, exchange_value);
|
||||
return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest);
|
||||
#else
|
||||
// __sync_lock_test_and_set is a bizarrely named atomic exchange
|
||||
// operation. Note that some platforms only support this with the
|
||||
// limitation that the only valid value to store is the immediate
|
||||
// constant 1. There is a test for this in JNI_CreateJavaVM().
|
||||
jint result = __sync_lock_test_and_set (dest, exchange_value);
|
||||
T result = __sync_lock_test_and_set (dest, exchange_value);
|
||||
// All atomic operations are expected to be full memory barriers
|
||||
// (see atomic.hpp). However, __sync_lock_test_and_set is not
|
||||
// a full memory barrier, but an acquire barrier. Hence, this added
|
||||
@ -223,24 +227,14 @@ inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
|
||||
#endif // ARM
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
|
||||
volatile intptr_t* dest) {
|
||||
#ifdef ARM
|
||||
return arm_lock_test_and_set(dest, exchange_value);
|
||||
#else
|
||||
#ifdef M68K
|
||||
return m68k_lock_test_and_set(dest, exchange_value);
|
||||
#else
|
||||
intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T result = __sync_lock_test_and_set (dest, exchange_value);
|
||||
__sync_synchronize();
|
||||
return result;
|
||||
#endif // M68K
|
||||
#endif // ARM
|
||||
}
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return (void *) xchg_ptr((intptr_t) exchange_value,
|
||||
(volatile intptr_t*) dest);
|
||||
}
|
||||
|
||||
// No direct support for cmpxchg of bytes; emulate using int.
|
||||
|
@ -43,16 +43,6 @@ inline void Atomic::store(jlong store_value, jlong* dest) { *dest = store_value;
|
||||
inline void Atomic::store(jlong store_value, volatile jlong* dest) { *dest = store_value; }
|
||||
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
|
||||
|
||||
|
||||
// This is the interface to the atomic instructions in solaris_sparc.il.
|
||||
// It's very messy because we need to support v8 and these instructions
|
||||
// are illegal there. When sparc v8 is dropped, we can drop out lots of
|
||||
// this code. Also compiler2 does not support v8 so the conditional code
|
||||
// omits the instruction set check.
|
||||
|
||||
extern "C" jint _Atomic_swap32(jint exchange_value, volatile jint* dest);
|
||||
extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest);
|
||||
|
||||
// Implement ADD using a CAS loop.
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd VALUE_OBJ_CLASS_SPEC {
|
||||
@ -69,16 +59,30 @@ struct Atomic::PlatformAdd VALUE_OBJ_CLASS_SPEC {
|
||||
}
|
||||
};
|
||||
|
||||
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
|
||||
return _Atomic_swap32(exchange_value, dest);
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
__asm__ volatile ( "swap [%2],%0"
|
||||
: "=r" (exchange_value)
|
||||
: "0" (exchange_value), "r" (dest)
|
||||
: "memory");
|
||||
return exchange_value;
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
return _Atomic_swap64(exchange_value, dest);
|
||||
}
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
T old_value = *dest;
|
||||
while (true) {
|
||||
T result = cmpxchg(exchange_value, dest, old_value);
|
||||
if (result == old_value) break;
|
||||
old_value = result;
|
||||
}
|
||||
return old_value;
|
||||
}
|
||||
|
||||
// No direct support for cmpxchg of bytes; emulate using int.
|
||||
|
@ -32,47 +32,6 @@
|
||||
.end
|
||||
|
||||
|
||||
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
|
||||
//
|
||||
// Arguments:
|
||||
// exchange_value: O0
|
||||
// dest: O1
|
||||
//
|
||||
// Results:
|
||||
// O0: the value previously stored in dest
|
||||
|
||||
.inline _Atomic_swap32, 2
|
||||
.volatile
|
||||
swap [%o1],%o0
|
||||
.nonvolatile
|
||||
.end
|
||||
|
||||
|
||||
// Support for intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t * dest).
|
||||
//
|
||||
// 64-bit
|
||||
//
|
||||
// Arguments:
|
||||
// exchange_value: O0
|
||||
// dest: O1
|
||||
//
|
||||
// Results:
|
||||
// O0: the value previously stored in dest
|
||||
|
||||
.inline _Atomic_swap64, 2
|
||||
.volatile
|
||||
1:
|
||||
mov %o0, %o3
|
||||
ldx [%o1], %o2
|
||||
casx [%o1], %o2, %o3
|
||||
cmp %o2, %o3
|
||||
bne %xcc, 1b
|
||||
nop
|
||||
mov %o2, %o0
|
||||
.nonvolatile
|
||||
.end
|
||||
|
||||
|
||||
// Support for jlong Atomic::load and Atomic::store on v9.
|
||||
//
|
||||
// void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)
|
||||
|
@ -84,8 +84,26 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
reinterpret_cast<jlong volatile*>(dest)));
|
||||
}
|
||||
|
||||
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
|
||||
return _Atomic_xchg(exchange_value, dest);
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
return PrimitiveConversions::cast<T>(
|
||||
_Atomic_xchg(PrimitiveConversions::cast<jint>(exchange_value),
|
||||
reinterpret_cast<jint volatile*>(dest)));
|
||||
}
|
||||
|
||||
extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return PrimitiveConversions::cast<T>(
|
||||
_Atomic_xchg_long(PrimitiveConversions::cast<jlong>(exchange_value),
|
||||
reinterpret_cast<jlong volatile*>(dest)));
|
||||
}
|
||||
|
||||
// Not using cmpxchg_using_helper here, because some configurations of
|
||||
@ -135,16 +153,6 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
|
||||
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
|
||||
extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
return (intptr_t)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
|
||||
}
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return (void*)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
|
||||
}
|
||||
|
||||
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
|
||||
|
||||
#endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
|
||||
|
@ -81,17 +81,19 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
|
||||
}
|
||||
|
||||
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
|
||||
return (jint)(*os::atomic_xchg_func)(exchange_value, dest);
|
||||
}
|
||||
#define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
|
||||
template<> \
|
||||
template<typename T> \
|
||||
inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \
|
||||
T volatile* dest) const { \
|
||||
STATIC_ASSERT(ByteSize == sizeof(T)); \
|
||||
return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest);
|
||||
}
|
||||
DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func)
|
||||
DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_ptr_func)
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest);
|
||||
}
|
||||
#undef DEFINE_STUB_XCHG
|
||||
|
||||
#define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \
|
||||
template<> \
|
||||
@ -128,7 +130,11 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
}
|
||||
}
|
||||
|
||||
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
// alternative for InterlockedExchange
|
||||
__asm {
|
||||
mov eax, exchange_value;
|
||||
@ -137,14 +143,6 @@ inline jint Atomic::xchg (jint exchange_value, volatile jint* des
|
||||
}
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
|
||||
}
|
||||
|
||||
inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
||||
|
@ -802,7 +802,6 @@ void ClassLoader::setup_search_path(const char *class_path, bool bootstrap_searc
|
||||
if (DumpSharedSpaces) {
|
||||
JImageFile *jimage = _jrt_entry->jimage();
|
||||
assert(jimage != NULL, "No java runtime image file present");
|
||||
ClassLoader::initialize_module_loader_map(jimage);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -1144,61 +1143,6 @@ int ClassLoader::crc32(int crc, const char* buf, int len) {
|
||||
return (*Crc32)(crc, (const jbyte*)buf, len);
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void ClassLoader::initialize_module_loader_map(JImageFile* jimage) {
|
||||
if (!DumpSharedSpaces) {
|
||||
return; // only needed for CDS dump time
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
jlong size;
|
||||
JImageLocationRef location = (*JImageFindResource)(jimage, JAVA_BASE_NAME, get_jimage_version_string(), MODULE_LOADER_MAP, &size);
|
||||
if (location == 0) {
|
||||
vm_exit_during_initialization(
|
||||
"Cannot find ModuleLoaderMap location from modules jimage.", NULL);
|
||||
}
|
||||
char* buffer = NEW_RESOURCE_ARRAY(char, size + 1);
|
||||
buffer[size] = '\0';
|
||||
jlong read = (*JImageGetResource)(jimage, location, buffer, size);
|
||||
if (read != size) {
|
||||
vm_exit_during_initialization(
|
||||
"Cannot find ModuleLoaderMap resource from modules jimage.", NULL);
|
||||
}
|
||||
char* char_buf = (char*)buffer;
|
||||
int buflen = (int)strlen(char_buf);
|
||||
char* begin_ptr = char_buf;
|
||||
char* end_ptr = strchr(begin_ptr, '\n');
|
||||
bool process_boot_modules = false;
|
||||
_boot_modules_array = new (ResourceObj::C_HEAP, mtModule)
|
||||
GrowableArray<char*>(INITIAL_BOOT_MODULES_ARRAY_SIZE, true);
|
||||
_platform_modules_array = new (ResourceObj::C_HEAP, mtModule)
|
||||
GrowableArray<char*>(INITIAL_PLATFORM_MODULES_ARRAY_SIZE, true);
|
||||
while (end_ptr != NULL && (end_ptr - char_buf) < buflen) {
|
||||
// Allocate a buffer from the C heap to be appended to the _boot_modules_array
|
||||
// or the _platform_modules_array.
|
||||
char* temp_name = NEW_C_HEAP_ARRAY(char, (size_t)(end_ptr - begin_ptr + 1), mtInternal);
|
||||
strncpy(temp_name, begin_ptr, end_ptr - begin_ptr);
|
||||
temp_name[end_ptr - begin_ptr] = '\0';
|
||||
if (strncmp(temp_name, "BOOT", 4) == 0) {
|
||||
process_boot_modules = true;
|
||||
FREE_C_HEAP_ARRAY(char, temp_name);
|
||||
} else if (strncmp(temp_name, "PLATFORM", 8) == 0) {
|
||||
process_boot_modules = false;
|
||||
FREE_C_HEAP_ARRAY(char, temp_name);
|
||||
} else {
|
||||
// module name
|
||||
if (process_boot_modules) {
|
||||
_boot_modules_array->append(temp_name);
|
||||
} else {
|
||||
_platform_modules_array->append(temp_name);
|
||||
}
|
||||
}
|
||||
begin_ptr = ++end_ptr;
|
||||
end_ptr = strchr(begin_ptr, '\n');
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Function add_package extracts the package from the fully qualified class name
|
||||
// and checks if the package is in the boot loader's package entry table. If so,
|
||||
// then it sets the classpath_index in the package entry record.
|
||||
@ -1290,58 +1234,6 @@ objArrayOop ClassLoader::get_system_packages(TRAPS) {
|
||||
return result();
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
s2 ClassLoader::module_to_classloader(const char* module_name) {
|
||||
|
||||
assert(DumpSharedSpaces, "dump time only");
|
||||
assert(_boot_modules_array != NULL, "_boot_modules_array is NULL");
|
||||
assert(_platform_modules_array != NULL, "_platform_modules_array is NULL");
|
||||
|
||||
int array_size = _boot_modules_array->length();
|
||||
for (int i = 0; i < array_size; i++) {
|
||||
if (strcmp(module_name, _boot_modules_array->at(i)) == 0) {
|
||||
return BOOT_LOADER;
|
||||
}
|
||||
}
|
||||
|
||||
array_size = _platform_modules_array->length();
|
||||
for (int i = 0; i < array_size; i++) {
|
||||
if (strcmp(module_name, _platform_modules_array->at(i)) == 0) {
|
||||
return PLATFORM_LOADER;
|
||||
}
|
||||
}
|
||||
|
||||
return APP_LOADER;
|
||||
}
|
||||
|
||||
s2 ClassLoader::classloader_type(Symbol* class_name, ClassPathEntry* e, int classpath_index, TRAPS) {
|
||||
assert(DumpSharedSpaces, "Only used for CDS dump time");
|
||||
|
||||
// obtain the classloader type based on the class name.
|
||||
// First obtain the package name based on the class name. Then obtain
|
||||
// the classloader type based on the package name from the jimage using
|
||||
// a jimage API. If the classloader type cannot be found from the
|
||||
// jimage, it is determined by the class path entry.
|
||||
jshort loader_type = ClassLoader::APP_LOADER;
|
||||
if (e->is_jrt()) {
|
||||
ResourceMark rm;
|
||||
TempNewSymbol pkg_name = InstanceKlass::package_from_name(class_name, CHECK_0);
|
||||
if (pkg_name != NULL) {
|
||||
const char* pkg_name_C_string = (const char*)(pkg_name->as_C_string());
|
||||
ClassPathImageEntry* cpie = (ClassPathImageEntry*)e;
|
||||
JImageFile* jimage = cpie->jimage();
|
||||
char* module_name = (char*)(*JImagePackageToModule)(jimage, pkg_name_C_string);
|
||||
if (module_name != NULL) {
|
||||
loader_type = ClassLoader::module_to_classloader(module_name);
|
||||
}
|
||||
}
|
||||
} else if (ClassLoaderExt::is_boot_classpath(classpath_index)) {
|
||||
loader_type = ClassLoader::BOOT_LOADER;
|
||||
}
|
||||
return loader_type;
|
||||
}
|
||||
#endif
|
||||
|
||||
// caller needs ResourceMark
|
||||
const char* ClassLoader::file_name_for_class_name(const char* class_name,
|
||||
int class_name_len) {
|
||||
|
@ -37,13 +37,6 @@
|
||||
// Name of boot "modules" image
|
||||
#define MODULES_IMAGE_NAME "modules"
|
||||
|
||||
// Name of the resource containing mapping from module names to defining class loader type
|
||||
#define MODULE_LOADER_MAP "jdk/internal/vm/cds/resources/ModuleLoaderMap.dat"
|
||||
|
||||
// Initial sizes of the following arrays are based on the generated ModuleLoaderMap.dat
|
||||
#define INITIAL_BOOT_MODULES_ARRAY_SIZE 30
|
||||
#define INITIAL_PLATFORM_MODULES_ARRAY_SIZE 15
|
||||
|
||||
// Class path entry (directory or zip file)
|
||||
|
||||
class JImageFile;
|
||||
@ -439,10 +432,6 @@ class ClassLoader: AllStatic {
|
||||
static bool check_shared_paths_misc_info(void* info, int size);
|
||||
static void exit_with_path_failure(const char* error, const char* message);
|
||||
|
||||
static s2 module_to_classloader(const char* module_name);
|
||||
static void initialize_module_loader_map(JImageFile* jimage);
|
||||
static s2 classloader_type(Symbol* class_name, ClassPathEntry* e,
|
||||
int classpath_index, TRAPS);
|
||||
static void record_shared_class_loader_type(InstanceKlass* ik, const ClassFileStream* stream);
|
||||
#endif
|
||||
static JImageLocationRef jimage_find_resource(JImageFile* jf, const char* module_name,
|
||||
|
@ -332,7 +332,7 @@ public:
|
||||
static void disable_compilation_forever() {
|
||||
UseCompiler = false;
|
||||
AlwaysCompileLoopMethods = false;
|
||||
Atomic::xchg(shutdown_compilation, &_should_compile_new_jobs);
|
||||
Atomic::xchg(jint(shutdown_compilation), &_should_compile_new_jobs);
|
||||
}
|
||||
|
||||
static bool is_compilation_disabled_forever() {
|
||||
|
@ -96,7 +96,7 @@ bool MethodMatcher::canonicalize(char * line, const char *& error_msg) {
|
||||
bool have_colon = (colon != NULL);
|
||||
if (have_colon) {
|
||||
// Don't allow multiple '::'
|
||||
if (colon + 2 != '\0') {
|
||||
if (colon[2] != '\0') {
|
||||
if (strstr(colon+2, "::")) {
|
||||
error_msg = "Method pattern only allows one '::' allowed";
|
||||
return false;
|
||||
|
@ -233,7 +233,6 @@ class MetaspaceObj {
|
||||
void print_address_on(outputStream* st) const; // nonvirtual address printing
|
||||
|
||||
#define METASPACE_OBJ_TYPES_DO(f) \
|
||||
f(Unknown) \
|
||||
f(Class) \
|
||||
f(Symbol) \
|
||||
f(TypeArrayU1) \
|
||||
|
@ -165,7 +165,7 @@ public:
|
||||
}
|
||||
|
||||
void print(size_t total_bytes) const {
|
||||
tty->print_cr("%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
|
||||
tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
|
||||
_name, used(), perc(used(), total_bytes), reserved(), perc(used(), reserved()), p2i(_base));
|
||||
}
|
||||
void print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
|
||||
@ -1405,7 +1405,7 @@ void VM_PopulateDumpSharedSpace::print_region_stats() {
|
||||
print_heap_region_stats(_string_regions, "st", total_reserved);
|
||||
print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
|
||||
|
||||
tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
|
||||
tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
|
||||
total_bytes, total_reserved, total_u_perc);
|
||||
}
|
||||
|
||||
@ -1416,7 +1416,7 @@ void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion
|
||||
char* start = (char*)heap_mem->at(i).start();
|
||||
size_t size = heap_mem->at(i).byte_size();
|
||||
char* top = start + size;
|
||||
tty->print_cr("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100%% used] at " INTPTR_FORMAT,
|
||||
tty->print_cr("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
|
||||
name, i, size, size/double(total_size)*100.0, size, p2i(start));
|
||||
|
||||
}
|
||||
|
@ -377,6 +377,7 @@ static SpecialFlag const special_jvm_flags[] = {
|
||||
// --- Non-alias flags - sorted by obsolete_in then expired_in:
|
||||
{ "MaxGCMinorPauseMillis", JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
{ "UseConcMarkSweepGC", JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
{ "AssumeMP", JDK_Version::jdk(10),JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
{ "MonitorInUseLists", JDK_Version::jdk(10),JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
{ "MaxRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
{ "MinRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
@ -4476,16 +4477,6 @@ jint Arguments::apply_ergo() {
|
||||
|
||||
set_shared_spaces_flags();
|
||||
|
||||
#if defined(SPARC)
|
||||
// BIS instructions require 'membar' instruction regardless of the number
|
||||
// of CPUs because in virtualized/container environments which might use only 1
|
||||
// CPU, BIS instructions may produce incorrect results.
|
||||
|
||||
if (FLAG_IS_DEFAULT(AssumeMP)) {
|
||||
FLAG_SET_DEFAULT(AssumeMP, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Check the GC selections again.
|
||||
if (!check_gc_consistency()) {
|
||||
return JNI_EINVAL;
|
||||
|
@ -116,10 +116,19 @@ class Atomic : AllStatic {
|
||||
// Performs atomic exchange of *dest with exchange_value. Returns old
|
||||
// prior value of *dest. xchg*() provide:
|
||||
// <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
|
||||
inline static jint xchg (jint exchange_value, volatile jint* dest);
|
||||
inline static unsigned int xchg (unsigned int exchange_value, volatile unsigned int* dest);
|
||||
inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
|
||||
inline static void* xchg_ptr(void* exchange_value, volatile void* dest);
|
||||
// The type T must be either a pointer type convertible to or equal
|
||||
// to D, an integral/enum type equal to D, or a type equal to D that
|
||||
// is primitive convertible using PrimitiveConversions.
|
||||
template<typename T, typename D>
|
||||
inline static D xchg(T exchange_value, volatile D* dest);
|
||||
|
||||
inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
return xchg(exchange_value, dest);
|
||||
}
|
||||
|
||||
inline static void* xchg_ptr(void* exchange_value, volatile void* dest) {
|
||||
return xchg(exchange_value, reinterpret_cast<void* volatile*>(dest));
|
||||
}
|
||||
|
||||
// Performs atomic compare of *dest and compare_value, and exchanges
|
||||
// *dest with exchange_value if the comparison succeeded. Returns prior
|
||||
@ -280,6 +289,45 @@ private:
|
||||
public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
|
||||
struct CmpxchgByteUsingInt;
|
||||
private:
|
||||
|
||||
// Dispatch handler for xchg. Provides type-based validity
|
||||
// checking and limited conversions around calls to the
|
||||
// platform-specific implementation layer provided by
|
||||
// PlatformXchg.
|
||||
template<typename T, typename D, typename Enable = void>
|
||||
struct XchgImpl;
|
||||
|
||||
// Platform-specific implementation of xchg. Support for sizes
|
||||
// of 4, and sizeof(intptr_t) are required. The class is a function
|
||||
// object that must be default constructable, with these requirements:
|
||||
//
|
||||
// - dest is of type T*.
|
||||
// - exchange_value is of type T.
|
||||
// - platform_xchg is an object of type PlatformXchg<sizeof(T)>.
|
||||
//
|
||||
// Then
|
||||
// platform_xchg(exchange_value, dest)
|
||||
// must be a valid expression, returning a result convertible to T.
|
||||
//
|
||||
// A default definition is provided, which declares a function template
|
||||
// T operator()(T, T volatile*, T, cmpxchg_memory_order) const
|
||||
//
|
||||
// For each required size, a platform must either provide an
|
||||
// appropriate definition of that function, or must entirely
|
||||
// specialize the class template for that size.
|
||||
template<size_t byte_size> struct PlatformXchg;
|
||||
|
||||
// Support for platforms that implement some variants of xchg
|
||||
// using a (typically out of line) non-template helper function.
|
||||
// The generic arguments passed to PlatformXchg need to be
|
||||
// translated to the appropriate type for the helper function, the
|
||||
// helper invoked on the translated arguments, and the result
|
||||
// translated back. Type is the parameter / return type of the
|
||||
// helper function.
|
||||
template<typename Type, typename Fn, typename T>
|
||||
static T xchg_using_helper(Fn fn,
|
||||
T exchange_value,
|
||||
T volatile* dest);
|
||||
};
|
||||
|
||||
template<typename From, typename To>
|
||||
@ -353,6 +401,18 @@ struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC {
|
||||
cmpxchg_memory_order order) const;
|
||||
};
|
||||
|
||||
// Define the class before including platform file, which may specialize
|
||||
// the operator definition. No generic definition of specializations
|
||||
// of the operator template are provided, nor are there any generic
|
||||
// specializations of the class. The platform file is responsible for
|
||||
// providing those.
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformXchg VALUE_OBJ_CLASS_SPEC {
|
||||
template<typename T>
|
||||
T operator()(T exchange_value,
|
||||
T volatile* dest) const;
|
||||
};
|
||||
|
||||
// platform specific in-line definitions - must come before shared definitions
|
||||
|
||||
#include OS_CPU_HEADER(atomic)
|
||||
@ -594,9 +654,75 @@ inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value,
|
||||
return PrimitiveConversions::cast<T>(cur_as_bytes[offset]);
|
||||
}
|
||||
|
||||
inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
|
||||
assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
|
||||
return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
|
||||
// Handle xchg for integral and enum types.
|
||||
//
|
||||
// All the involved types must be identical.
|
||||
template<typename T>
|
||||
struct Atomic::XchgImpl<
|
||||
T, T,
|
||||
typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
|
||||
VALUE_OBJ_CLASS_SPEC
|
||||
{
|
||||
T operator()(T exchange_value, T volatile* dest) const {
|
||||
// Forward to the platform handler for the size of T.
|
||||
return PlatformXchg<sizeof(T)>()(exchange_value, dest);
|
||||
}
|
||||
};
|
||||
|
||||
// Handle xchg for pointer types.
|
||||
//
|
||||
// The exchange_value must be implicitly convertible to the
|
||||
// destination's type; it must be type-correct to store the
|
||||
// exchange_value in the destination.
|
||||
template<typename T, typename D>
|
||||
struct Atomic::XchgImpl<
|
||||
T*, D*,
|
||||
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
|
||||
VALUE_OBJ_CLASS_SPEC
|
||||
{
|
||||
D* operator()(T* exchange_value, D* volatile* dest) const {
|
||||
// Allow derived to base conversion, and adding cv-qualifiers.
|
||||
D* new_value = exchange_value;
|
||||
return PlatformXchg<sizeof(D*)>()(new_value, dest);
|
||||
}
|
||||
};
|
||||
|
||||
// Handle xchg for types that have a translator.
|
||||
//
|
||||
// All the involved types must be identical.
|
||||
//
|
||||
// This translates the original call into a call on the decayed
|
||||
// arguments, and returns the recovered result of that translated
|
||||
// call.
|
||||
template<typename T>
|
||||
struct Atomic::XchgImpl<
|
||||
T, T,
|
||||
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
|
||||
VALUE_OBJ_CLASS_SPEC
|
||||
{
|
||||
T operator()(T exchange_value, T volatile* dest) const {
|
||||
typedef PrimitiveConversions::Translate<T> Translator;
|
||||
typedef typename Translator::Decayed Decayed;
|
||||
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
|
||||
return Translator::recover(
|
||||
xchg(Translator::decay(exchange_value),
|
||||
reinterpret_cast<Decayed volatile*>(dest)));
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Type, typename Fn, typename T>
|
||||
inline T Atomic::xchg_using_helper(Fn fn,
|
||||
T exchange_value,
|
||||
T volatile* dest) {
|
||||
STATIC_ASSERT(sizeof(Type) == sizeof(T));
|
||||
return PrimitiveConversions::cast<T>(
|
||||
fn(PrimitiveConversions::cast<Type>(exchange_value),
|
||||
reinterpret_cast<Type volatile*>(dest)));
|
||||
}
|
||||
|
||||
template<typename T, typename D>
|
||||
inline D Atomic::xchg(T exchange_value, volatile D* dest) {
|
||||
return XchgImpl<T, D>()(exchange_value, dest);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_RUNTIME_ATOMIC_HPP
|
||||
|
@ -592,8 +592,8 @@ public:
|
||||
range(8, 256) \
|
||||
constraint(ObjectAlignmentInBytesConstraintFunc,AtParse) \
|
||||
\
|
||||
product(bool, AssumeMP, false, \
|
||||
"Instruct the VM to assume multiple processors are available") \
|
||||
product(bool, AssumeMP, true, \
|
||||
"(Deprecated) Instruct the VM to assume multiple processors are available")\
|
||||
\
|
||||
/* UseMembar is theoretically a temp flag used for memory barrier */ \
|
||||
/* removal testing. It was supposed to be removed before FCS but has */ \
|
||||
|
@ -213,7 +213,7 @@ class os: AllStatic {
|
||||
// the bootstrap routine for the stub generator needs to check
|
||||
// the processor count directly and leave the bootstrap routine
|
||||
// in place until called after initialization has ocurred.
|
||||
return (_processor_count != 1) || AssumeMP;
|
||||
return AssumeMP || (_processor_count != 1);
|
||||
}
|
||||
static julong available_memory();
|
||||
static julong physical_memory();
|
||||
|
@ -2726,8 +2726,12 @@ typedef RehashableHashtable<Symbol*, mtSymbol> RehashableSymbolHashtable;
|
||||
/* JVMCI */ \
|
||||
/****************/ \
|
||||
\
|
||||
declare_preprocessor_constant("INCLUDE_JVMCI", INCLUDE_JVMCI)
|
||||
|
||||
declare_preprocessor_constant("INCLUDE_JVMCI", INCLUDE_JVMCI) \
|
||||
\
|
||||
/****************/ \
|
||||
/* VMRegImpl */ \
|
||||
/****************/ \
|
||||
declare_constant(VMRegImpl::stack_slot_size)
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
// VM_LONG_CONSTANTS
|
||||
|
@ -1,4 +0,0 @@
|
||||
BOOT
|
||||
@@BOOT_MODULE_NAMES@@
|
||||
PLATFORM
|
||||
@@PLATFORM_MODULE_NAMES@@
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2006, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,6 +37,7 @@ public class VMRegImpl {
|
||||
private static int stack0Val;
|
||||
private static Address stack0Addr;
|
||||
private static AddressField regNameField;
|
||||
private static int stackSlotSize;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
@ -53,6 +54,7 @@ public class VMRegImpl {
|
||||
stack0Val = (int) stack0Addr.hashCode();
|
||||
stack0 = new VMReg(stack0Val);
|
||||
regNameField = type.getAddressField("regName[0]");
|
||||
stackSlotSize = db.lookupIntConstant("VMRegImpl::stack_slot_size");
|
||||
}
|
||||
|
||||
public static VMReg getStack0() {
|
||||
@ -67,4 +69,8 @@ public class VMRegImpl {
|
||||
long addrSize = VM.getVM().getAddressSize();
|
||||
return CStringUtilities.getString(regName.getAddressAt(index * addrSize));
|
||||
}
|
||||
|
||||
public static int getStackSlotSize() {
|
||||
return stackSlotSize;
|
||||
}
|
||||
}
|
||||
|
@ -269,13 +269,12 @@ public class ConstantPool extends Metadata implements ClassConstants {
|
||||
|
||||
public static int decodeInvokedynamicIndex(int i) { Assert.that(isInvokedynamicIndex(i), ""); return ~i; }
|
||||
|
||||
// The invokedynamic points at the object index. The object map points at
|
||||
// the cpCache index and the cpCache entry points at the original constant
|
||||
// pool index.
|
||||
// The invokedynamic points at a CP cache entry. This entry points back
|
||||
// at the original CP entry (CONSTANT_InvokeDynamic) and also (via f2) at an entry
|
||||
// in the resolved_references array (which provides the appendix argument).
|
||||
public int invokedynamicCPCacheIndex(int index) {
|
||||
Assert.that(isInvokedynamicIndex(index), "should be a invokedynamic index");
|
||||
int rawIndex = decodeInvokedynamicIndex(index);
|
||||
return referenceMap().at(rawIndex);
|
||||
return decodeInvokedynamicIndex(index);
|
||||
}
|
||||
|
||||
ConstantPoolCacheEntry invokedynamicCPCacheEntryAt(int index) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -430,7 +430,7 @@ public abstract class Frame implements Cloneable {
|
||||
// If it is passed in a register, it got spilled in the stub frame.
|
||||
return regMap.getLocation(reg);
|
||||
} else {
|
||||
long spOffset = VM.getVM().getAddressSize() * reg.minus(stack0);
|
||||
long spOffset = reg.reg2Stack() * VM.getVM().getVMRegImplInfo().getStackSlotSize();
|
||||
return getUnextendedSP().addOffsetTo(spOffset);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -84,4 +84,8 @@ public class VMReg {
|
||||
public boolean greaterThanOrEqual(VMReg arg) { return value >= arg.value; }
|
||||
|
||||
public int minus(VMReg arg) { return value - arg.value; }
|
||||
|
||||
public int reg2Stack() {
|
||||
return value - VM.getVM().getVMRegImplInfo().getStack0().getValue();
|
||||
}
|
||||
}
|
||||
|
@ -64,6 +64,7 @@ gc/g1/logging/TestG1LoggingFailure.java 8169634 generic-all
|
||||
gc/g1/humongousObjects/TestHeapCounters.java 8178918 generic-all
|
||||
gc/stress/gclocker/TestGCLockerWithG1.java 8179226 generic-all
|
||||
gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java 8177765 generic-all
|
||||
gc/logging/TestPrintReferences.java 8188245 generic-all
|
||||
|
||||
#############################################################################
|
||||
|
||||
|
@ -52,7 +52,8 @@ requires.properties= \
|
||||
vm.rtm.cpu \
|
||||
vm.rtm.os \
|
||||
vm.aot \
|
||||
vm.cds
|
||||
vm.cds \
|
||||
vm.graal.enabled
|
||||
|
||||
# Minimum jtreg version
|
||||
requiredVersion=4.2 b08
|
||||
|
@ -25,7 +25,7 @@
|
||||
* @test
|
||||
* @bug 8072016
|
||||
* @summary Infinite deoptimization/recompilation cycles in case of arraycopy with tightly coupled allocation
|
||||
* @requires vm.flavor == "server" & !vm.emulatedClient
|
||||
* @requires vm.flavor == "server" & !vm.emulatedClient & !vm.graal.enabled
|
||||
* @library /test/lib /
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
|
@ -26,6 +26,7 @@
|
||||
* @bug 8004741
|
||||
* @summary Missing compiled exception handle table entry for multidimensional array allocation
|
||||
*
|
||||
* @requires !vm.graal.enabled
|
||||
* @run main/othervm -Xmx64m -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions
|
||||
* -XX:-TieredCompilation -XX:+StressCompiledExceptionHandlers
|
||||
* -XX:+SafepointALot -XX:GuaranteedSafepointInterval=100
|
||||
|
@ -27,7 +27,7 @@
|
||||
* @summary Tests jcmd to be able to add a directive to compile only specified methods
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* @library /test/lib /
|
||||
* @requires vm.flavor != "minimal"
|
||||
* @requires vm.flavor != "minimal" & !vm.graal.enabled
|
||||
*
|
||||
* @build sun.hotspot.WhiteBox
|
||||
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
|
||||
|
@ -25,6 +25,8 @@
|
||||
* @test
|
||||
* @bug 8137167
|
||||
* @summary Tests LogCompilation executed standalone without log commands or directives
|
||||
*
|
||||
* @requires !vm.graal.enabled
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* @library /test/lib /
|
||||
*
|
||||
|
@ -26,7 +26,7 @@
|
||||
* @library /test/lib /
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @requires vm.cpu.features ~= ".*aes.*"
|
||||
* @requires vm.cpu.features ~= ".*aes.*" & !vm.graal.enabled
|
||||
* @build sun.hotspot.WhiteBox
|
||||
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
|
@ -24,6 +24,8 @@
|
||||
/*
|
||||
* @test
|
||||
* @bug 8138651
|
||||
*
|
||||
* @requires !vm.graal.enabled
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* @library /test/lib /
|
||||
*
|
||||
|
@ -25,7 +25,7 @@
|
||||
* @test NullCheckDroppingsTest
|
||||
* @bug 8054492
|
||||
* @summary Casting can result in redundant null checks in generated code
|
||||
* @requires vm.flavor == "server" & !vm.emulatedClient
|
||||
* @requires vm.flavor == "server" & !vm.emulatedClient & !vm.graal.enabled
|
||||
* @library /test/lib
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
|
@ -42,7 +42,8 @@ public class GenericTestCaseForOtherCPU extends
|
||||
new OrPredicate(Platform::isAArch64,
|
||||
new OrPredicate(Platform::isS390x,
|
||||
new OrPredicate(Platform::isSparc,
|
||||
new OrPredicate(Platform::isX64, Platform::isX86))))));
|
||||
new OrPredicate(Platform::isPPC,
|
||||
new OrPredicate(Platform::isX64, Platform::isX86)))))));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -28,7 +28,7 @@
|
||||
* @summary Test that C2 flag UseCountedLoopSafepoints ensures a safepoint is kept in a CountedLoop
|
||||
* @library /test/lib /
|
||||
* @requires vm.compMode != "Xint" & vm.flavor == "server" & (vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel == 4) & vm.debug == true
|
||||
* @requires !vm.emulatedClient
|
||||
* @requires !vm.emulatedClient & !vm.graal.enabled
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* @build sun.hotspot.WhiteBox
|
||||
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
|
||||
|
@ -71,23 +71,27 @@ public class IntrinsicPredicates {
|
||||
= new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha256" }, null),
|
||||
new OrPredicate(new CPUSpecificPredicate("s390.*", new String[] { "sha256" }, null),
|
||||
new OrPredicate(new CPUSpecificPredicate("sparc.*", new String[] { "sha256" }, null),
|
||||
new OrPredicate(new CPUSpecificPredicate("ppc64.*", new String[] { "sha" }, null),
|
||||
new OrPredicate(new CPUSpecificPredicate("ppc64le.*", new String[] { "sha" }, null),
|
||||
// x86 variants
|
||||
new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "sha" }, null),
|
||||
new OrPredicate(new CPUSpecificPredicate("i386.*", new String[] { "sha" }, null),
|
||||
new OrPredicate(new CPUSpecificPredicate("x86.*", new String[] { "sha" }, null),
|
||||
new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "avx2", "bmi2" }, null),
|
||||
new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null))))))));
|
||||
new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null))))))))));
|
||||
|
||||
public static final BooleanSupplier SHA512_INSTRUCTION_AVAILABLE
|
||||
= new OrPredicate(new CPUSpecificPredicate("aarch64.*", new String[] { "sha512" }, null),
|
||||
new OrPredicate(new CPUSpecificPredicate("s390.*", new String[] { "sha512" }, null),
|
||||
new OrPredicate(new CPUSpecificPredicate("sparc.*", new String[] { "sha512" }, null),
|
||||
new OrPredicate(new CPUSpecificPredicate("ppc64.*", new String[] { "sha" }, null),
|
||||
new OrPredicate(new CPUSpecificPredicate("ppc64le.*", new String[] { "sha" }, null),
|
||||
// x86 variants
|
||||
new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "sha" }, null),
|
||||
new OrPredicate(new CPUSpecificPredicate("i386.*", new String[] { "sha" }, null),
|
||||
new OrPredicate(new CPUSpecificPredicate("x86.*", new String[] { "sha" }, null),
|
||||
new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "avx2", "bmi2" }, null),
|
||||
new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null))))))));
|
||||
new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null))))))))));
|
||||
|
||||
public static final BooleanSupplier ANY_SHA_INSTRUCTION_AVAILABLE
|
||||
= new OrPredicate(IntrinsicPredicates.SHA1_INSTRUCTION_AVAILABLE,
|
||||
|
@ -46,6 +46,7 @@ public class VMDeprecatedOptions {
|
||||
{"MaxRAMFraction", "8"},
|
||||
{"MinRAMFraction", "2"},
|
||||
{"InitialRAMFraction", "64"},
|
||||
{"AssumeMP", "false"},
|
||||
|
||||
// deprecated alias flags (see also aliased_jvm_flags):
|
||||
{"DefaultMaxRAMFraction", "4"},
|
||||
|
@ -64,7 +64,7 @@ public class SpaceUtilizationCheck {
|
||||
static void test(String... extra_options) throws Exception {
|
||||
OutputAnalyzer output = CDSTestUtils.createArchive(extra_options);
|
||||
CDSTestUtils.checkDump(output);
|
||||
Pattern pattern = Pattern.compile("^(..) space: *([0-9]+).* out of *([0-9]+) bytes .* at 0x([0-9a0-f]+)");
|
||||
Pattern pattern = Pattern.compile("^(..) *space: *([0-9]+).* out of *([0-9]+) bytes .* at 0x([0-9a0-f]+)");
|
||||
WhiteBox wb = WhiteBox.getWhiteBox();
|
||||
long reserve_alignment = wb.metaspaceReserveAlignment();
|
||||
System.out.println("Metaspace::reserve_alignment() = " + reserve_alignment);
|
||||
|
@ -50,7 +50,7 @@ public class PatchModuleCDS {
|
||||
"-Xlog:class+path=info",
|
||||
"-version");
|
||||
new OutputAnalyzer(pb.start())
|
||||
.shouldContain("ro space:"); // Make sure archive got created.
|
||||
.shouldContain("ro space:"); // Make sure archive got created.
|
||||
|
||||
// Case 2: Test that directory in --patch-module is supported for CDS dumping
|
||||
// Create a class file in the module java.base.
|
||||
@ -73,7 +73,7 @@ public class PatchModuleCDS {
|
||||
"-Xlog:class+path=info",
|
||||
"-version");
|
||||
new OutputAnalyzer(pb.start())
|
||||
.shouldContain("ro space:"); // Make sure archive got created.
|
||||
.shouldContain("ro space:"); // Make sure archive got created.
|
||||
|
||||
// Case 3a: Test CDS dumping with jar file in --patch-module
|
||||
BasicJarBuilder.build("javanaming", "javax/naming/spi/NamingManager");
|
||||
@ -87,7 +87,7 @@ public class PatchModuleCDS {
|
||||
"-Xlog:class+path=info",
|
||||
"PatchModuleMain", "javax.naming.spi.NamingManager");
|
||||
new OutputAnalyzer(pb.start())
|
||||
.shouldContain("ro space:"); // Make sure archive got created.
|
||||
.shouldContain("ro space:"); // Make sure archive got created.
|
||||
|
||||
// Case 3b: Test CDS run with jar file in --patch-module
|
||||
pb = ProcessTools.createJavaProcessBuilder(
|
||||
|
@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import sun.jvm.hotspot.HotSpotAgent;
|
||||
import sun.jvm.hotspot.utilities.ReversePtrsAnalysis;
|
||||
|
||||
import jdk.test.lib.apps.LingeredApp;
|
||||
import jdk.test.lib.Asserts;
|
||||
import jdk.test.lib.JDKToolFinder;
|
||||
import jdk.test.lib.JDKToolLauncher;
|
||||
import jdk.test.lib.Platform;
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
import jdk.test.lib.process.ProcessTools;
|
||||
import jdk.test.lib.Utils;
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @library /test/lib
|
||||
* @requires os.family != "mac"
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* jdk.hotspot.agent/sun.jvm.hotspot
|
||||
* jdk.hotspot.agent/sun.jvm.hotspot.utilities
|
||||
* @run main/othervm TestRevPtrsForInvokeDynamic
|
||||
*/
|
||||
|
||||
public class TestRevPtrsForInvokeDynamic {
|
||||
|
||||
private static LingeredAppWithInvokeDynamic theApp = null;
|
||||
|
||||
private static void computeReversePointers(String pid) throws Exception {
|
||||
HotSpotAgent agent = new HotSpotAgent();
|
||||
|
||||
try {
|
||||
agent.attach(Integer.parseInt(pid));
|
||||
ReversePtrsAnalysis analysis = new ReversePtrsAnalysis();
|
||||
analysis.run();
|
||||
} finally {
|
||||
agent.detach();
|
||||
}
|
||||
}
|
||||
|
||||
private static void createAnotherToAttach(long lingeredAppPid)
|
||||
throws Exception {
|
||||
String[] toolArgs = {
|
||||
"--add-modules=jdk.hotspot.agent",
|
||||
"--add-exports=jdk.hotspot.agent/sun.jvm.hotspot=ALL-UNNAMED",
|
||||
"--add-exports=jdk.hotspot.agent/sun.jvm.hotspot.utilities=ALL-UNNAMED",
|
||||
"TestRevPtrsForInvokeDynamic",
|
||||
Long.toString(lingeredAppPid)
|
||||
};
|
||||
|
||||
// Start a new process to attach to the lingered app
|
||||
ProcessBuilder processBuilder = ProcessTools.createJavaProcessBuilder(toolArgs);
|
||||
OutputAnalyzer SAOutput = ProcessTools.executeProcess(processBuilder);
|
||||
SAOutput.shouldHaveExitValue(0);
|
||||
System.out.println(SAOutput.getOutput());
|
||||
}
|
||||
|
||||
public static void main (String... args) throws Exception {
|
||||
if (!Platform.shouldSAAttach()) {
|
||||
System.out.println(
|
||||
"SA attach not expected to work - test skipped.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (args == null || args.length == 0) {
|
||||
try {
|
||||
List<String> vmArgs = new ArrayList<String>();
|
||||
vmArgs.add("-XX:+UsePerfData");
|
||||
vmArgs.addAll(Utils.getVmOptions());
|
||||
|
||||
theApp = new LingeredAppWithInvokeDynamic();
|
||||
LingeredApp.startApp(vmArgs, theApp);
|
||||
createAnotherToAttach(theApp.getPid());
|
||||
} finally {
|
||||
LingeredApp.stopApp(theApp);
|
||||
}
|
||||
} else {
|
||||
computeReversePointers(args[0]);
|
||||
}
|
||||
}
|
||||
}
|
@ -73,6 +73,8 @@ public class VMProps implements Callable<Map<String, String>> {
|
||||
map.put("vm.aot", vmAOT());
|
||||
// vm.cds is true if the VM is compiled with cds support.
|
||||
map.put("vm.cds", vmCDS());
|
||||
// vm.graal.enabled is true if Graal is used as JIT
|
||||
map.put("vm.graal.enabled", isGraalEnabled());
|
||||
vmGC(map); // vm.gc.X = true/false
|
||||
|
||||
VMProps.dump(map);
|
||||
@ -292,6 +294,41 @@ public class VMProps implements Callable<Map<String, String>> {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if Graal is used as JIT compiler.
|
||||
*
|
||||
* @return true if Graal is used as JIT compiler.
|
||||
*/
|
||||
protected String isGraalEnabled() {
|
||||
// Graal is enabled if following conditions are true:
|
||||
// - we are not in Interpreter mode
|
||||
// - UseJVMCICompiler flag is true
|
||||
// - jvmci.Compiler variable is equal to 'graal'
|
||||
// - TieredCompilation is not used or TieredStopAtLevel is greater than 3
|
||||
|
||||
Boolean useCompiler = WB.getBooleanVMFlag("UseCompiler");
|
||||
if (useCompiler == null || !useCompiler)
|
||||
return "false";
|
||||
|
||||
Boolean useJvmciComp = WB.getBooleanVMFlag("UseJVMCICompiler");
|
||||
if (useJvmciComp == null || !useJvmciComp)
|
||||
return "false";
|
||||
|
||||
// This check might be redundant but let's keep it for now.
|
||||
String jvmciCompiler = System.getProperty("jvmci.Compiler");
|
||||
if (jvmciCompiler == null || !jvmciCompiler.equals("graal")) {
|
||||
return "false";
|
||||
}
|
||||
|
||||
Boolean tieredCompilation = WB.getBooleanVMFlag("TieredCompilation");
|
||||
Long compLevel = WB.getIntxVMFlag("TieredStopAtLevel");
|
||||
// if TieredCompilation is enabled and compilation level is <= 3 then no Graal is used
|
||||
if (tieredCompilation != null && tieredCompilation && compLevel != null && compLevel <= 3)
|
||||
return "false";
|
||||
|
||||
return "true";
|
||||
}
|
||||
|
||||
/**
|
||||
* Dumps the map to the file if the file name is given as the property.
|
||||
* This functionality could be helpful to know context in the real
|
||||
|
Loading…
Reference in New Issue
Block a user