Merge
This commit is contained in:
commit
f7c235467c
@ -216,3 +216,4 @@ cb51fb4789ac0b8be4056482077ddfb8f3bd3805 jdk8-b91
|
||||
3a36c926a7aafa9d4a892a45ef3678e87ad8359b jdk8-b92
|
||||
27c51c6e31c1ef36afa0e6efb031f9b13f26c12b jdk8-b93
|
||||
50d2bde060f2a9bbbe4da0c8986e20aca61f2e2e jdk8-b94
|
||||
785d07fe38901ecc1b7e0145e53e1c3da9361fee jdk8-b95
|
||||
|
@ -355,12 +355,24 @@
|
||||
</li>
|
||||
<li>
|
||||
Install a
|
||||
<a name="bootjdk">Bootstrap JDK</a>
|
||||
<br>
|
||||
<a name="bootjdk">Bootstrap JDK</a>.
|
||||
All OpenJDK builds require access to a previously released
|
||||
JDK, this is often called a bootstrap JDK.
|
||||
Currently, for this JDK release we require
|
||||
JDK 7 Update 7 or newer.
|
||||
JDK called the <i>bootstrap JDK</i> or <i>boot JDK.</i>
|
||||
The general rule is that the bootstrap JDK
|
||||
must be an instance of the previous major
|
||||
release of the JDK. In addition, there may be
|
||||
a requirement to use a release at or beyond a
|
||||
particular update level.
|
||||
<br> <br>
|
||||
|
||||
<b><i>Building JDK 8 requires use of a version
|
||||
of JDK 7 that is at Update 7 or newer. JDK 8
|
||||
developers should not use JDK 8 as the boot
|
||||
JDK, to ensure that JDK 8 dependencies are
|
||||
not introduced into the parts of the system
|
||||
that are built with JDK 7.</i></b>
|
||||
|
||||
<br> <br>
|
||||
The JDK 7 binaries can be downloaded from Oracle's
|
||||
<a href="http://www.oracle.com/technetwork/java/javase/downloads/index.html"
|
||||
target="_blank">JDK 7 download site</a>.
|
||||
|
@ -183,7 +183,7 @@ bootcycle-images-only: start-make
|
||||
test: images test-only
|
||||
test-only: start-make
|
||||
@$(call TargetEnter)
|
||||
@($(CD) $(SRC_ROOT)/test && $(BUILD_LOG_WRAPPER) $(MAKE) -j1 -k JT_HOME=$(JT_HOME) MAKEFLAGS= PRODUCT_HOME=$(JDK_IMAGE_DIR) JPRT_JAVA_HOME=$(JDK_IMAGE_DIR) ALT_OUTPUTDIR=$(OUTPUT_ROOT) $(TEST)) || true
|
||||
@($(CD) $(SRC_ROOT)/test && $(BUILD_LOG_WRAPPER) $(MAKE) -j1 -k MAKEFLAGS= JT_HOME=$(JT_HOME) PRODUCT_HOME=$(JDK_IMAGE_DIR) JPRT_JAVA_HOME=$(JDK_IMAGE_DIR) ALT_OUTPUTDIR=$(OUTPUT_ROOT) CONCURRENCY=$(JOBS) $(TEST)) || true
|
||||
@$(call TargetExit)
|
||||
|
||||
# Stores the tips for each repository. This file is be used when constructing the jdk image and can be
|
||||
@ -192,7 +192,7 @@ source-tips: $(OUTPUT_ROOT)/source_tips
|
||||
$(OUTPUT_ROOT)/source_tips: FRC
|
||||
@$(MKDIR) -p $(@D)
|
||||
@$(RM) $@
|
||||
@$(if $(HG),$(call GetSourceTips),$(ECHO) "hg not installed" > $@)
|
||||
@$(call GetSourceTips)
|
||||
|
||||
|
||||
# Remove everything, except the output from configure.
|
||||
|
@ -351,3 +351,5 @@ b786c04b7be15194febe88dc1f0c9443e737a84b hs25-b35
|
||||
3c78a14da19d26d6937af5f98b97e2a21c653b04 hs25-b36
|
||||
1beed1f6f9edefe47ba8ed1355fbd3e7606b8288 jdk8-b94
|
||||
69689078dff8b21e6df30870464f5d736eebdf72 hs25-b37
|
||||
5d65c078cd0ac455aa5e58a09844c7acce54b487 jdk8-b95
|
||||
2cc5a9d1ba66dfdff578918b393c727bd9450210 hs25-b38
|
||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
|
||||
|
||||
HS_MAJOR_VER=25
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=37
|
||||
HS_BUILD_NUMBER=38
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=8
|
||||
|
@ -214,7 +214,7 @@ ifeq ($(USE_CLANG), true)
|
||||
WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
|
||||
endif
|
||||
|
||||
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function
|
||||
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value
|
||||
|
||||
ifeq ($(USE_CLANG),)
|
||||
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
|
||||
|
@ -57,7 +57,6 @@ class Assembler : public AbstractAssembler {
|
||||
fbp_op2 = 5,
|
||||
br_op2 = 2,
|
||||
bp_op2 = 1,
|
||||
cb_op2 = 7, // V8
|
||||
sethi_op2 = 4
|
||||
};
|
||||
|
||||
@ -145,7 +144,6 @@ class Assembler : public AbstractAssembler {
|
||||
ldsh_op3 = 0x0a,
|
||||
ldx_op3 = 0x0b,
|
||||
|
||||
ldstub_op3 = 0x0d,
|
||||
stx_op3 = 0x0e,
|
||||
swap_op3 = 0x0f,
|
||||
|
||||
@ -163,15 +161,6 @@ class Assembler : public AbstractAssembler {
|
||||
|
||||
prefetch_op3 = 0x2d,
|
||||
|
||||
|
||||
ldc_op3 = 0x30,
|
||||
ldcsr_op3 = 0x31,
|
||||
lddc_op3 = 0x33,
|
||||
stc_op3 = 0x34,
|
||||
stcsr_op3 = 0x35,
|
||||
stdcq_op3 = 0x36,
|
||||
stdc_op3 = 0x37,
|
||||
|
||||
casa_op3 = 0x3c,
|
||||
casxa_op3 = 0x3e,
|
||||
|
||||
@ -574,17 +563,11 @@ class Assembler : public AbstractAssembler {
|
||||
static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
|
||||
|
||||
// instruction only in v9
|
||||
static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); }
|
||||
|
||||
// instruction only in v8
|
||||
static void v8_only() { assert( VM_Version::v8_instructions_work(), "This instruction only works on SPARC V8"); }
|
||||
static void v9_only() { } // do nothing
|
||||
|
||||
// instruction deprecated in v9
|
||||
static void v9_dep() { } // do nothing for now
|
||||
|
||||
// some float instructions only exist for single prec. on v8
|
||||
static void v8_s_only(FloatRegisterImpl::Width w) { if (w != FloatRegisterImpl::S) v9_only(); }
|
||||
|
||||
// v8 has no CC field
|
||||
static void v8_no_cc(CC cc) { if (cc) v9_only(); }
|
||||
|
||||
@ -730,11 +713,6 @@ public:
|
||||
inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
|
||||
inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
|
||||
|
||||
// pp 121 (V8)
|
||||
|
||||
inline void cb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
|
||||
inline void cb( Condition c, bool a, Label& L );
|
||||
|
||||
// pp 149
|
||||
|
||||
inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
|
||||
@ -775,8 +753,8 @@ public:
|
||||
|
||||
// pp 157
|
||||
|
||||
void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
|
||||
void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
|
||||
void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
|
||||
void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
|
||||
|
||||
// pp 159
|
||||
|
||||
@ -794,21 +772,11 @@ public:
|
||||
|
||||
// pp 162
|
||||
|
||||
void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
|
||||
void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
|
||||
|
||||
void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
|
||||
void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
|
||||
|
||||
// page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fnegs is the only instruction available
|
||||
// on v8 to do negation of single, double and quad precision floats.
|
||||
|
||||
void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x05) | fs2(sd, w)); }
|
||||
|
||||
void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
|
||||
|
||||
// page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fabss is the only instruction available
|
||||
// on v8 to do abs operation on single/double/quad precision floats.
|
||||
|
||||
void fabs( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x09) | fs2(sd, w)); }
|
||||
void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
|
||||
|
||||
// pp 163
|
||||
|
||||
@ -839,11 +807,6 @@ public:
|
||||
void impdep1( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); }
|
||||
void impdep2( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); }
|
||||
|
||||
// pp 149 (v8)
|
||||
|
||||
void cpop1( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_int32( op(arith_op) | fcn(crd) | op3(impdep1_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
|
||||
void cpop2( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_int32( op(arith_op) | fcn(crd) | op3(impdep2_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
|
||||
|
||||
// pp 170
|
||||
|
||||
void jmpl( Register s1, Register s2, Register d );
|
||||
@ -860,16 +823,6 @@ public:
|
||||
inline void ldxfsr( Register s1, Register s2 );
|
||||
inline void ldxfsr( Register s1, int simm13a);
|
||||
|
||||
// pp 94 (v8)
|
||||
|
||||
inline void ldc( Register s1, Register s2, int crd );
|
||||
inline void ldc( Register s1, int simm13a, int crd);
|
||||
inline void lddc( Register s1, Register s2, int crd );
|
||||
inline void lddc( Register s1, int simm13a, int crd);
|
||||
inline void ldcsr( Register s1, Register s2, int crd );
|
||||
inline void ldcsr( Register s1, int simm13a, int crd);
|
||||
|
||||
|
||||
// 173
|
||||
|
||||
void ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
|
||||
@ -910,18 +863,6 @@ public:
|
||||
void lduwa( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void ldxa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
|
||||
void ldxa( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void ldda( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
|
||||
void ldda( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
|
||||
// pp 179
|
||||
|
||||
inline void ldstub( Register s1, Register s2, Register d );
|
||||
inline void ldstub( Register s1, int simm13a, Register d);
|
||||
|
||||
// pp 180
|
||||
|
||||
void ldstuba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
|
||||
void ldstuba( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
|
||||
// pp 181
|
||||
|
||||
@ -992,11 +933,6 @@ public:
|
||||
void smulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
|
||||
void smulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
|
||||
// pp 199
|
||||
|
||||
void mulscc( Register s1, Register s2, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | rs2(s2) ); }
|
||||
void mulscc( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
|
||||
// pp 201
|
||||
|
||||
void nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); }
|
||||
@ -1116,17 +1052,6 @@ public:
|
||||
void stda( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
|
||||
void stda( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
|
||||
// pp 97 (v8)
|
||||
|
||||
inline void stc( int crd, Register s1, Register s2 );
|
||||
inline void stc( int crd, Register s1, int simm13a);
|
||||
inline void stdc( int crd, Register s1, Register s2 );
|
||||
inline void stdc( int crd, Register s1, int simm13a);
|
||||
inline void stcsr( int crd, Register s1, Register s2 );
|
||||
inline void stcsr( int crd, Register s1, int simm13a);
|
||||
inline void stdcq( int crd, Register s1, Register s2 );
|
||||
inline void stdcq( int crd, Register s1, int simm13a);
|
||||
|
||||
// pp 230
|
||||
|
||||
void sub( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
@ -1153,20 +1078,16 @@ public:
|
||||
|
||||
void taddcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
void taddcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void taddcctv( Register s1, Register s2, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | rs2(s2) ); }
|
||||
void taddcctv( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
|
||||
// pp 235
|
||||
|
||||
void tsubcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
void tsubcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void tsubcctv( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | rs2(s2) ); }
|
||||
void tsubcctv( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
|
||||
// pp 237
|
||||
|
||||
void trap( Condition c, CC cc, Register s1, Register s2 ) { v8_no_cc(cc); emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
|
||||
void trap( Condition c, CC cc, Register s1, int trapa ) { v8_no_cc(cc); emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
|
||||
void trap( Condition c, CC cc, Register s1, Register s2 ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
|
||||
void trap( Condition c, CC cc, Register s1, int trapa ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
|
||||
// simple uncond. trap
|
||||
void trap( int trapa ) { trap( always, icc, G0, trapa ); }
|
||||
|
||||
|
@ -63,9 +63,6 @@ inline void Assembler::fb( Condition c, bool a, Label& L ) { fb(c, a, target(L))
|
||||
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); }
|
||||
inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
|
||||
|
||||
inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
|
||||
inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); }
|
||||
|
||||
inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); }
|
||||
inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
|
||||
|
||||
@ -88,18 +85,9 @@ inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHol
|
||||
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
|
||||
|
||||
inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_int32( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
|
||||
inline void Assembler::ldc( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::ldc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
inline void Assembler::lddc( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::lddc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
inline void Assembler::ldcsr( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::ldcsr( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
|
||||
inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
|
||||
@ -119,9 +107,6 @@ inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only();
|
||||
inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
|
||||
inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
|
||||
inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_int32( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
|
||||
inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); }
|
||||
|
||||
@ -132,8 +117,6 @@ inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rs
|
||||
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
|
||||
inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_int32( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
|
||||
@ -152,17 +135,6 @@ inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only();
|
||||
inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
|
||||
// v8 p 99
|
||||
|
||||
inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::stc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
inline void Assembler::stdc( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::stdc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
inline void Assembler::stcsr( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
|
||||
// pp 231
|
||||
|
||||
inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
|
||||
|
@ -597,13 +597,6 @@ void LIR_Assembler::emit_op3(LIR_Op3* op) {
|
||||
|
||||
__ sra(Rdividend, 31, Rscratch);
|
||||
__ wry(Rscratch);
|
||||
if (!VM_Version::v9_instructions_work()) {
|
||||
// v9 doesn't require these nops
|
||||
__ nop();
|
||||
__ nop();
|
||||
__ nop();
|
||||
__ nop();
|
||||
}
|
||||
|
||||
add_debug_info_for_div0_here(op->info());
|
||||
|
||||
@ -652,10 +645,6 @@ void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
|
||||
case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
|
||||
case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
|
||||
default : ShouldNotReachHere();
|
||||
};
|
||||
|
||||
if (!VM_Version::v9_instructions_work()) {
|
||||
__ nop();
|
||||
}
|
||||
__ fb( acond, false, Assembler::pn, *(op->label()));
|
||||
} else {
|
||||
@ -725,9 +714,6 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
|
||||
Label L;
|
||||
// result must be 0 if value is NaN; test by comparing value to itself
|
||||
__ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
|
||||
if (!VM_Version::v9_instructions_work()) {
|
||||
__ nop();
|
||||
}
|
||||
__ fb(Assembler::f_unordered, true, Assembler::pn, L);
|
||||
__ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
|
||||
__ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
|
||||
@ -1909,7 +1895,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
|
||||
switch (code) {
|
||||
case lir_add: __ add (lreg, rreg, res); break;
|
||||
case lir_sub: __ sub (lreg, rreg, res); break;
|
||||
case lir_mul: __ mult (lreg, rreg, res); break;
|
||||
case lir_mul: __ mulx (lreg, rreg, res); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
@ -1924,7 +1910,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
|
||||
switch (code) {
|
||||
case lir_add: __ add (lreg, simm13, res); break;
|
||||
case lir_sub: __ sub (lreg, simm13, res); break;
|
||||
case lir_mul: __ mult (lreg, simm13, res); break;
|
||||
case lir_mul: __ mulx (lreg, simm13, res); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
} else {
|
||||
@ -1936,7 +1922,7 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
|
||||
switch (code) {
|
||||
case lir_add: __ add (lreg, (int)con, res); break;
|
||||
case lir_sub: __ sub (lreg, (int)con, res); break;
|
||||
case lir_mul: __ mult (lreg, (int)con, res); break;
|
||||
case lir_mul: __ mulx (lreg, (int)con, res); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
@ -3234,48 +3220,26 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
|
||||
Register base = mem_addr->base()->as_register();
|
||||
if (src->is_register() && dest->is_address()) {
|
||||
// G4 is high half, G5 is low half
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
// clear the top bits of G5, and scale up G4
|
||||
__ srl (src->as_register_lo(), 0, G5);
|
||||
__ sllx(src->as_register_hi(), 32, G4);
|
||||
// combine the two halves into the 64 bits of G4
|
||||
__ or3(G4, G5, G4);
|
||||
null_check_offset = __ offset();
|
||||
if (idx == noreg) {
|
||||
__ stx(G4, base, disp);
|
||||
} else {
|
||||
__ stx(G4, base, idx);
|
||||
}
|
||||
// clear the top bits of G5, and scale up G4
|
||||
__ srl (src->as_register_lo(), 0, G5);
|
||||
__ sllx(src->as_register_hi(), 32, G4);
|
||||
// combine the two halves into the 64 bits of G4
|
||||
__ or3(G4, G5, G4);
|
||||
null_check_offset = __ offset();
|
||||
if (idx == noreg) {
|
||||
__ stx(G4, base, disp);
|
||||
} else {
|
||||
__ mov (src->as_register_hi(), G4);
|
||||
__ mov (src->as_register_lo(), G5);
|
||||
null_check_offset = __ offset();
|
||||
if (idx == noreg) {
|
||||
__ std(G4, base, disp);
|
||||
} else {
|
||||
__ std(G4, base, idx);
|
||||
}
|
||||
__ stx(G4, base, idx);
|
||||
}
|
||||
} else if (src->is_address() && dest->is_register()) {
|
||||
null_check_offset = __ offset();
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
if (idx == noreg) {
|
||||
__ ldx(base, disp, G5);
|
||||
} else {
|
||||
__ ldx(base, idx, G5);
|
||||
}
|
||||
__ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
|
||||
__ mov (G5, dest->as_register_lo()); // copy low half into lo
|
||||
if (idx == noreg) {
|
||||
__ ldx(base, disp, G5);
|
||||
} else {
|
||||
if (idx == noreg) {
|
||||
__ ldd(base, disp, G4);
|
||||
} else {
|
||||
__ ldd(base, idx, G4);
|
||||
}
|
||||
// G4 is high half, G5 is low half
|
||||
__ mov (G4, dest->as_register_hi());
|
||||
__ mov (G5, dest->as_register_lo());
|
||||
__ ldx(base, idx, G5);
|
||||
}
|
||||
__ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
|
||||
__ mov (G5, dest->as_register_lo()); // copy low half into lo
|
||||
} else {
|
||||
Unimplemented();
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
|
||||
|
||||
// compare object markOop with Rmark and if equal exchange Rscratch with object markOop
|
||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||
casx_under_lock(mark_addr.base(), Rmark, Rscratch, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
||||
cas_ptr(mark_addr.base(), Rmark, Rscratch);
|
||||
// if compare/exchange succeeded we found an unlocked object and we now have locked it
|
||||
// hence we are done
|
||||
cmp(Rmark, Rscratch);
|
||||
@ -149,7 +149,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
|
||||
|
||||
// Check if it is still a light weight lock, this is is true if we see
|
||||
// the stack address of the basicLock in the markOop of the object
|
||||
casx_under_lock(mark_addr.base(), Rbox, Rmark, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
||||
cas_ptr(mark_addr.base(), Rbox, Rmark);
|
||||
cmp(Rbox, Rmark);
|
||||
|
||||
brx(Assembler::notEqual, false, Assembler::pn, slow_case);
|
||||
@ -276,7 +276,7 @@ void C1_MacroAssembler::initialize_object(
|
||||
sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
|
||||
initialize_body(t1, t2);
|
||||
#ifndef _LP64
|
||||
} else if (VM_Version::v9_instructions_work() && con_size_in_bytes < threshold * 2) {
|
||||
} else if (con_size_in_bytes < threshold * 2) {
|
||||
// on v9 we can do double word stores to fill twice as much space.
|
||||
assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
|
||||
assert(con_size_in_bytes % 8 == 0, "double word aligned");
|
||||
|
@ -30,5 +30,4 @@
|
||||
|
||||
void Compile::pd_compiler2_init() {
|
||||
guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" );
|
||||
guarantee( VM_Version::v9_instructions_work(), "Server compiler does not run on V8 systems" );
|
||||
}
|
||||
|
@ -30,8 +30,7 @@
|
||||
}
|
||||
|
||||
static const char* pd_cpu_opts() {
|
||||
return (VM_Version::v9_instructions_work()?
|
||||
(VM_Version::v8_instructions_work()? "" : "v9only") : "v8only");
|
||||
return "v9only";
|
||||
}
|
||||
|
||||
#endif // CPU_SPARC_VM_DISASSEMBLER_SPARC_HPP
|
||||
|
@ -110,8 +110,5 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS
|
||||
\
|
||||
product(uintx, ArraycopyDstPrefetchDistance, 0, \
|
||||
"Distance to prefetch destination array in arracopy") \
|
||||
\
|
||||
develop(intx, V8AtomicOperationUnderLockSpinCount, 50, \
|
||||
"Number of times to spin wait on a v8 atomic operation lock") \
|
||||
|
||||
#endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP
|
||||
|
@ -1210,8 +1210,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object)
|
||||
st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
|
||||
// compare and exchange object_addr, markOop | 1, stack address of basicLock
|
||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||
casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
|
||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
||||
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
|
||||
|
||||
// if the compare and exchange succeeded we are done (we saw an unlocked object)
|
||||
cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);
|
||||
@ -1291,8 +1290,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
|
||||
// we expect to see the stack address of the basicLock in case the
|
||||
// lock is still a light weight lock (lock_reg)
|
||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||
casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg,
|
||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
||||
cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg);
|
||||
cmp(lock_reg, displaced_header_reg);
|
||||
brx(Assembler::equal, true, Assembler::pn, done);
|
||||
delayed()->st_ptr(G0, lockobj_addr); // free entry
|
||||
|
@ -118,7 +118,6 @@ int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) {
|
||||
case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
|
||||
case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
|
||||
case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
|
||||
case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
|
||||
case bpr_op2: {
|
||||
if (is_cbcond(inst)) {
|
||||
m = wdisp10(word_aligned_ones, 0);
|
||||
@ -149,7 +148,6 @@ int MacroAssembler::branch_destination(int inst, int pos) {
|
||||
case bp_op2: r = inv_wdisp( inst, pos, 19); break;
|
||||
case fb_op2: r = inv_wdisp( inst, pos, 22); break;
|
||||
case br_op2: r = inv_wdisp( inst, pos, 22); break;
|
||||
case cb_op2: r = inv_wdisp( inst, pos, 22); break;
|
||||
case bpr_op2: {
|
||||
if (is_cbcond(inst)) {
|
||||
r = inv_wdisp10(inst, pos);
|
||||
@ -325,12 +323,6 @@ void MacroAssembler::breakpoint_trap() {
|
||||
trap(ST_RESERVED_FOR_USER_0);
|
||||
}
|
||||
|
||||
// flush windows (except current) using flushw instruction if avail.
|
||||
void MacroAssembler::flush_windows() {
|
||||
if (VM_Version::v9_instructions_work()) flushw();
|
||||
else flush_windows_trap();
|
||||
}
|
||||
|
||||
// Write serialization page so VM thread can do a pseudo remote membar
|
||||
// We use the current thread pointer to calculate a thread specific
|
||||
// offset to write to within the page. This minimizes bus traffic
|
||||
@ -358,88 +350,6 @@ void MacroAssembler::leave() {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
void MacroAssembler::mult(Register s1, Register s2, Register d) {
|
||||
if(VM_Version::v9_instructions_work()) {
|
||||
mulx (s1, s2, d);
|
||||
} else {
|
||||
smul (s1, s2, d);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::mult(Register s1, int simm13a, Register d) {
|
||||
if(VM_Version::v9_instructions_work()) {
|
||||
mulx (s1, simm13a, d);
|
||||
} else {
|
||||
smul (s1, simm13a, d);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
|
||||
const Register s1 = G3_scratch;
|
||||
const Register s2 = G4_scratch;
|
||||
Label get_psr_test;
|
||||
// Get the condition codes the V8 way.
|
||||
read_ccr_trap(s1);
|
||||
mov(ccr_save, s2);
|
||||
// This is a test of V8 which has icc but not xcc
|
||||
// so mask off the xcc bits
|
||||
and3(s2, 0xf, s2);
|
||||
// Compare condition codes from the V8 and V9 ways.
|
||||
subcc(s2, s1, G0);
|
||||
br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
|
||||
delayed()->breakpoint_trap();
|
||||
bind(get_psr_test);
|
||||
}
|
||||
|
||||
void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
|
||||
const Register s1 = G3_scratch;
|
||||
const Register s2 = G4_scratch;
|
||||
Label set_psr_test;
|
||||
// Write out the saved condition codes the V8 way
|
||||
write_ccr_trap(ccr_save, s1, s2);
|
||||
// Read back the condition codes using the V9 instruction
|
||||
rdccr(s1);
|
||||
mov(ccr_save, s2);
|
||||
// This is a test of V8 which has icc but not xcc
|
||||
// so mask off the xcc bits
|
||||
and3(s2, 0xf, s2);
|
||||
and3(s1, 0xf, s1);
|
||||
// Compare the V8 way with the V9 way.
|
||||
subcc(s2, s1, G0);
|
||||
br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
|
||||
delayed()->breakpoint_trap();
|
||||
bind(set_psr_test);
|
||||
}
|
||||
#else
|
||||
#define read_ccr_v8_assert(x)
|
||||
#define write_ccr_v8_assert(x)
|
||||
#endif // ASSERT
|
||||
|
||||
void MacroAssembler::read_ccr(Register ccr_save) {
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
rdccr(ccr_save);
|
||||
// Test code sequence used on V8. Do not move above rdccr.
|
||||
read_ccr_v8_assert(ccr_save);
|
||||
} else {
|
||||
read_ccr_trap(ccr_save);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::write_ccr(Register ccr_save) {
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
// Test code sequence used on V8. Do not move below wrccr.
|
||||
write_ccr_v8_assert(ccr_save);
|
||||
wrccr(ccr_save);
|
||||
} else {
|
||||
const Register temp_reg1 = G3_scratch;
|
||||
const Register temp_reg2 = G4_scratch;
|
||||
write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Calls to C land
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -465,8 +375,8 @@ void MacroAssembler::get_thread() {
|
||||
#ifdef ASSERT
|
||||
AddressLiteral last_get_thread_addrlit(&last_get_thread);
|
||||
set(last_get_thread_addrlit, L3);
|
||||
inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
|
||||
st_ptr(L4, L3, 0);
|
||||
rdpc(L4);
|
||||
inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0);
|
||||
#endif
|
||||
call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
|
||||
delayed()->nop();
|
||||
@ -1327,7 +1237,7 @@ void RegistersForDebugging::print(outputStream* s) {
|
||||
|
||||
void RegistersForDebugging::save_registers(MacroAssembler* a) {
|
||||
a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
|
||||
a->flush_windows();
|
||||
a->flushw();
|
||||
int i;
|
||||
for (i = 0; i < 8; ++i) {
|
||||
a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i));
|
||||
@ -1338,7 +1248,7 @@ void RegistersForDebugging::save_registers(MacroAssembler* a) {
|
||||
for (i = 0; i < 32; ++i) {
|
||||
a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
|
||||
}
|
||||
for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
|
||||
for (i = 0; i < 64; i += 2) {
|
||||
a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
|
||||
}
|
||||
}
|
||||
@ -1350,7 +1260,7 @@ void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) {
|
||||
for (int j = 0; j < 32; ++j) {
|
||||
a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
|
||||
}
|
||||
for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
|
||||
for (int k = 0; k < 64; k += 2) {
|
||||
a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
|
||||
}
|
||||
}
|
||||
@ -1465,8 +1375,6 @@ address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL };
|
||||
// the high bits of the O-regs if they contain Long values. Acts as a 'leaf'
|
||||
// call.
|
||||
void MacroAssembler::verify_oop_subroutine() {
|
||||
assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
|
||||
|
||||
// Leaf call; no frame.
|
||||
Label succeed, fail, null_or_fail;
|
||||
|
||||
@ -1870,26 +1778,17 @@ void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low,
|
||||
// And the equals case for the high part does not need testing,
|
||||
// since that triplet is reached only after finding the high halves differ.
|
||||
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
mov(-1, Rresult);
|
||||
ba(done); delayed()-> movcc(greater, false, icc, 1, Rresult);
|
||||
} else {
|
||||
br(less, true, pt, done); delayed()-> set(-1, Rresult);
|
||||
br(greater, true, pt, done); delayed()-> set( 1, Rresult);
|
||||
}
|
||||
mov(-1, Rresult);
|
||||
ba(done);
|
||||
delayed()->movcc(greater, false, icc, 1, Rresult);
|
||||
|
||||
bind( check_low_parts );
|
||||
bind(check_low_parts);
|
||||
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
mov( -1, Rresult);
|
||||
movcc(equal, false, icc, 0, Rresult);
|
||||
movcc(greaterUnsigned, false, icc, 1, Rresult);
|
||||
} else {
|
||||
set(-1, Rresult);
|
||||
br(equal, true, pt, done); delayed()->set( 0, Rresult);
|
||||
br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
|
||||
}
|
||||
bind( done );
|
||||
mov( -1, Rresult);
|
||||
movcc(equal, false, icc, 0, Rresult);
|
||||
movcc(greaterUnsigned, false, icc, 1, Rresult);
|
||||
|
||||
bind(done);
|
||||
}
|
||||
|
||||
void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
|
||||
@ -2117,119 +2016,24 @@ void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in
|
||||
void MacroAssembler::float_cmp( bool is_float, int unordered_result,
|
||||
FloatRegister Fa, FloatRegister Fb,
|
||||
Register Rresult) {
|
||||
|
||||
fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
|
||||
|
||||
Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less;
|
||||
Condition eq = f_equal;
|
||||
Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater;
|
||||
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
|
||||
mov(-1, Rresult);
|
||||
movcc(eq, true, fcc0, 0, Rresult);
|
||||
movcc(gt, true, fcc0, 1, Rresult);
|
||||
|
||||
if (is_float) {
|
||||
fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb);
|
||||
} else {
|
||||
Label done;
|
||||
fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb);
|
||||
}
|
||||
|
||||
set( -1, Rresult );
|
||||
//fb(lt, true, pn, done); delayed()->set( -1, Rresult );
|
||||
fb( eq, true, pn, done); delayed()->set( 0, Rresult );
|
||||
fb( gt, true, pn, done); delayed()->set( 1, Rresult );
|
||||
|
||||
bind (done);
|
||||
if (unordered_result == 1) {
|
||||
mov( -1, Rresult);
|
||||
movcc(f_equal, true, fcc0, 0, Rresult);
|
||||
movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult);
|
||||
} else {
|
||||
mov( -1, Rresult);
|
||||
movcc(f_equal, true, fcc0, 0, Rresult);
|
||||
movcc(f_greater, true, fcc0, 1, Rresult);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
|
||||
{
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
Assembler::fneg(w, s, d);
|
||||
} else {
|
||||
if (w == FloatRegisterImpl::S) {
|
||||
Assembler::fneg(w, s, d);
|
||||
} else if (w == FloatRegisterImpl::D) {
|
||||
// number() does a sanity check on the alignment.
|
||||
assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
|
||||
((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
|
||||
|
||||
Assembler::fneg(FloatRegisterImpl::S, s, d);
|
||||
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
||||
} else {
|
||||
assert(w == FloatRegisterImpl::Q, "Invalid float register width");
|
||||
|
||||
// number() does a sanity check on the alignment.
|
||||
assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
|
||||
((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
|
||||
|
||||
Assembler::fneg(FloatRegisterImpl::S, s, d);
|
||||
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
||||
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
|
||||
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
|
||||
{
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
Assembler::fmov(w, s, d);
|
||||
} else {
|
||||
if (w == FloatRegisterImpl::S) {
|
||||
Assembler::fmov(w, s, d);
|
||||
} else if (w == FloatRegisterImpl::D) {
|
||||
// number() does a sanity check on the alignment.
|
||||
assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
|
||||
((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
|
||||
|
||||
Assembler::fmov(FloatRegisterImpl::S, s, d);
|
||||
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
||||
} else {
|
||||
assert(w == FloatRegisterImpl::Q, "Invalid float register width");
|
||||
|
||||
// number() does a sanity check on the alignment.
|
||||
assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
|
||||
((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
|
||||
|
||||
Assembler::fmov(FloatRegisterImpl::S, s, d);
|
||||
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
||||
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
|
||||
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
|
||||
{
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
Assembler::fabs(w, s, d);
|
||||
} else {
|
||||
if (w == FloatRegisterImpl::S) {
|
||||
Assembler::fabs(w, s, d);
|
||||
} else if (w == FloatRegisterImpl::D) {
|
||||
// number() does a sanity check on the alignment.
|
||||
assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
|
||||
((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
|
||||
|
||||
Assembler::fabs(FloatRegisterImpl::S, s, d);
|
||||
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
||||
} else {
|
||||
assert(w == FloatRegisterImpl::Q, "Invalid float register width");
|
||||
|
||||
// number() does a sanity check on the alignment.
|
||||
assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
|
||||
((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
|
||||
|
||||
Assembler::fabs(FloatRegisterImpl::S, s, d);
|
||||
Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
|
||||
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
|
||||
Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::save_all_globals_into_locals() {
|
||||
mov(G1,L1);
|
||||
mov(G2,L2);
|
||||
@ -2250,135 +2054,6 @@ void MacroAssembler::restore_globals_from_locals() {
|
||||
mov(L7,G7);
|
||||
}
|
||||
|
||||
// Use for 64 bit operation.
|
||||
void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
|
||||
{
|
||||
// store ptr_reg as the new top value
|
||||
#ifdef _LP64
|
||||
casx(top_ptr_reg, top_reg, ptr_reg);
|
||||
#else
|
||||
cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
|
||||
#endif // _LP64
|
||||
}
|
||||
|
||||
// [RGV] This routine does not handle 64 bit operations.
|
||||
// use casx_under_lock() or casx directly!!!
|
||||
void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
|
||||
{
|
||||
// store ptr_reg as the new top value
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
cas(top_ptr_reg, top_reg, ptr_reg);
|
||||
} else {
|
||||
|
||||
// If the register is not an out nor global, it is not visible
|
||||
// after the save. Allocate a register for it, save its
|
||||
// value in the register save area (the save may not flush
|
||||
// registers to the save area).
|
||||
|
||||
Register top_ptr_reg_after_save;
|
||||
Register top_reg_after_save;
|
||||
Register ptr_reg_after_save;
|
||||
|
||||
if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
|
||||
top_ptr_reg_after_save = top_ptr_reg->after_save();
|
||||
} else {
|
||||
Address reg_save_addr = top_ptr_reg->address_in_saved_window();
|
||||
top_ptr_reg_after_save = L0;
|
||||
st(top_ptr_reg, reg_save_addr);
|
||||
}
|
||||
|
||||
if (top_reg->is_out() || top_reg->is_global()) {
|
||||
top_reg_after_save = top_reg->after_save();
|
||||
} else {
|
||||
Address reg_save_addr = top_reg->address_in_saved_window();
|
||||
top_reg_after_save = L1;
|
||||
st(top_reg, reg_save_addr);
|
||||
}
|
||||
|
||||
if (ptr_reg->is_out() || ptr_reg->is_global()) {
|
||||
ptr_reg_after_save = ptr_reg->after_save();
|
||||
} else {
|
||||
Address reg_save_addr = ptr_reg->address_in_saved_window();
|
||||
ptr_reg_after_save = L2;
|
||||
st(ptr_reg, reg_save_addr);
|
||||
}
|
||||
|
||||
const Register& lock_reg = L3;
|
||||
const Register& lock_ptr_reg = L4;
|
||||
const Register& value_reg = L5;
|
||||
const Register& yield_reg = L6;
|
||||
const Register& yieldall_reg = L7;
|
||||
|
||||
save_frame();
|
||||
|
||||
if (top_ptr_reg_after_save == L0) {
|
||||
ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
|
||||
}
|
||||
|
||||
if (top_reg_after_save == L1) {
|
||||
ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
|
||||
}
|
||||
|
||||
if (ptr_reg_after_save == L2) {
|
||||
ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
|
||||
}
|
||||
|
||||
Label(retry_get_lock);
|
||||
Label(not_same);
|
||||
Label(dont_yield);
|
||||
|
||||
assert(lock_addr, "lock_address should be non null for v8");
|
||||
set((intptr_t)lock_addr, lock_ptr_reg);
|
||||
// Initialize yield counter
|
||||
mov(G0,yield_reg);
|
||||
mov(G0, yieldall_reg);
|
||||
set(StubRoutines::Sparc::locked, lock_reg);
|
||||
|
||||
bind(retry_get_lock);
|
||||
cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield);
|
||||
|
||||
if(use_call_vm) {
|
||||
Untested("Need to verify global reg consistancy");
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
|
||||
} else {
|
||||
// Save the regs and make space for a C call
|
||||
save(SP, -96, SP);
|
||||
save_all_globals_into_locals();
|
||||
call(CAST_FROM_FN_PTR(address,os::yield_all));
|
||||
delayed()->mov(yieldall_reg, O0);
|
||||
restore_globals_from_locals();
|
||||
restore();
|
||||
}
|
||||
|
||||
// reset the counter
|
||||
mov(G0,yield_reg);
|
||||
add(yieldall_reg, 1, yieldall_reg);
|
||||
|
||||
bind(dont_yield);
|
||||
// try to get lock
|
||||
Assembler::swap(lock_ptr_reg, 0, lock_reg);
|
||||
|
||||
// did we get the lock?
|
||||
cmp(lock_reg, StubRoutines::Sparc::unlocked);
|
||||
br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
|
||||
delayed()->add(yield_reg,1,yield_reg);
|
||||
|
||||
// yes, got lock. do we have the same top?
|
||||
ld(top_ptr_reg_after_save, 0, value_reg);
|
||||
cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same);
|
||||
|
||||
// yes, same top.
|
||||
st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
|
||||
membar(Assembler::StoreStore);
|
||||
|
||||
bind(not_same);
|
||||
mov(value_reg, ptr_reg_after_save);
|
||||
st(lock_reg, lock_ptr_reg, 0); // unlock
|
||||
|
||||
restore();
|
||||
}
|
||||
}
|
||||
|
||||
RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
|
||||
Register tmp,
|
||||
int offset) {
|
||||
@ -2970,7 +2645,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
|
||||
markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
|
||||
mark_reg);
|
||||
or3(G2_thread, mark_reg, temp_reg);
|
||||
casn(mark_addr.base(), mark_reg, temp_reg);
|
||||
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
|
||||
// If the biasing toward our thread failed, this means that
|
||||
// another thread succeeded in biasing it toward itself and we
|
||||
// need to revoke that bias. The revocation will occur in the
|
||||
@ -2998,7 +2673,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
|
||||
load_klass(obj_reg, temp_reg);
|
||||
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
|
||||
or3(G2_thread, temp_reg, temp_reg);
|
||||
casn(mark_addr.base(), mark_reg, temp_reg);
|
||||
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
|
||||
// If the biasing toward our thread failed, this means that
|
||||
// another thread succeeded in biasing it toward itself and we
|
||||
// need to revoke that bias. The revocation will occur in the
|
||||
@ -3027,7 +2702,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
|
||||
// bits in this situation. Should attempt to preserve them.
|
||||
load_klass(obj_reg, temp_reg);
|
||||
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
|
||||
casn(mark_addr.base(), mark_reg, temp_reg);
|
||||
cas_ptr(mark_addr.base(), mark_reg, temp_reg);
|
||||
// Fall through to the normal CAS-based lock, because no matter what
|
||||
// the result of the above CAS, some thread must have succeeded in
|
||||
// removing the bias bit from the object's header.
|
||||
@ -3058,15 +2733,6 @@ void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg,
|
||||
}
|
||||
|
||||
|
||||
// CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
|
||||
// Solaris/SPARC's "as". Another apt name would be cas_ptr()
|
||||
|
||||
void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
|
||||
casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
||||
}
|
||||
|
||||
|
||||
|
||||
// compiler_lock_object() and compiler_unlock_object() are direct transliterations
|
||||
// of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments.
|
||||
// The code could be tightened up considerably.
|
||||
@ -3129,8 +2795,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||
|
||||
// compare object markOop with Rmark and if equal exchange Rscratch with object markOop
|
||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||
casx_under_lock(mark_addr.base(), Rmark, Rscratch,
|
||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
||||
cas_ptr(mark_addr.base(), Rmark, Rscratch);
|
||||
|
||||
// if compare/exchange succeeded we found an unlocked object and we now have locked it
|
||||
// hence we are done
|
||||
@ -3176,7 +2841,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||
mov(Rbox, Rscratch);
|
||||
or3(Rmark, markOopDesc::unlocked_value, Rmark);
|
||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||
casn(mark_addr.base(), Rmark, Rscratch);
|
||||
cas_ptr(mark_addr.base(), Rmark, Rscratch);
|
||||
cmp(Rmark, Rscratch);
|
||||
brx(Assembler::equal, false, Assembler::pt, done);
|
||||
delayed()->sub(Rscratch, SP, Rscratch);
|
||||
@ -3207,7 +2872,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||
// Invariant: if we acquire the lock then _recursions should be 0.
|
||||
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
|
||||
mov(G2_thread, Rscratch);
|
||||
casn(Rmark, G0, Rscratch);
|
||||
cas_ptr(Rmark, G0, Rscratch);
|
||||
cmp(Rscratch, G0);
|
||||
// Intentional fall-through into done
|
||||
} else {
|
||||
@ -3240,7 +2905,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||
mov(0, Rscratch);
|
||||
or3(Rmark, markOopDesc::unlocked_value, Rmark);
|
||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||
casn(mark_addr.base(), Rmark, Rscratch);
|
||||
cas_ptr(mark_addr.base(), Rmark, Rscratch);
|
||||
// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
|
||||
cmp(Rscratch, Rmark);
|
||||
brx(Assembler::notZero, false, Assembler::pn, Recursive);
|
||||
@ -3266,7 +2931,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||
// the fast-path stack-lock code from the interpreter and always passed
|
||||
// control to the "slow" operators in synchronizer.cpp.
|
||||
|
||||
// RScratch contains the fetched obj->mark value from the failed CASN.
|
||||
// RScratch contains the fetched obj->mark value from the failed CAS.
|
||||
#ifdef _LP64
|
||||
sub(Rscratch, STACK_BIAS, Rscratch);
|
||||
#endif
|
||||
@ -3300,7 +2965,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
|
||||
// Invariant: if we acquire the lock then _recursions should be 0.
|
||||
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
|
||||
mov(G2_thread, Rscratch);
|
||||
casn(Rmark, G0, Rscratch);
|
||||
cas_ptr(Rmark, G0, Rscratch);
|
||||
cmp(Rscratch, G0);
|
||||
// ST box->displaced_header = NonZero.
|
||||
// Any non-zero value suffices:
|
||||
@ -3336,8 +3001,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
|
||||
// Check if it is still a light weight lock, this is is true if we see
|
||||
// the stack address of the basicLock in the markOop of the object
|
||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||
casx_under_lock(mark_addr.base(), Rbox, Rmark,
|
||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
||||
cas_ptr(mark_addr.base(), Rbox, Rmark);
|
||||
ba(done);
|
||||
delayed()->cmp(Rbox, Rmark);
|
||||
bind(done);
|
||||
@ -3398,7 +3062,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
|
||||
delayed()->andcc(G0, G0, G0);
|
||||
add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
|
||||
mov(G2_thread, Rscratch);
|
||||
casn(Rmark, G0, Rscratch);
|
||||
cas_ptr(Rmark, G0, Rscratch);
|
||||
// invert icc.zf and goto done
|
||||
br_notnull(Rscratch, false, Assembler::pt, done);
|
||||
delayed()->cmp(G0, G0);
|
||||
@ -3440,7 +3104,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
|
||||
// A prototype implementation showed excellent results, although
|
||||
// the scavenger and timeout code was rather involved.
|
||||
|
||||
casn(mark_addr.base(), Rbox, Rscratch);
|
||||
cas_ptr(mark_addr.base(), Rbox, Rscratch);
|
||||
cmp(Rbox, Rscratch);
|
||||
// Intentional fall through into done ...
|
||||
|
||||
@ -3540,7 +3204,8 @@ void MacroAssembler::eden_allocate(
|
||||
|
||||
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
||||
// No allocation in the shared eden.
|
||||
ba_short(slow_case);
|
||||
ba(slow_case);
|
||||
delayed()->nop();
|
||||
} else {
|
||||
// get eden boundaries
|
||||
// note: we need both top & top_addr!
|
||||
@ -3583,7 +3248,7 @@ void MacroAssembler::eden_allocate(
|
||||
// Compare obj with the value at top_addr; if still equal, swap the value of
|
||||
// end with the value at top_addr. If not equal, read the value at top_addr
|
||||
// into end.
|
||||
casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
||||
cas_ptr(top_addr, obj, end);
|
||||
// if someone beat us on the allocation, try again, otherwise continue
|
||||
cmp(obj, end);
|
||||
brx(Assembler::notEqual, false, Assembler::pn, retry);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -963,7 +963,7 @@ public:
|
||||
inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
|
||||
|
||||
using Assembler::swap;
|
||||
inline void swap(Address& a, Register d, int offset = 0);
|
||||
inline void swap(const Address& a, Register d, int offset = 0);
|
||||
|
||||
// address pseudos: make these names unlike instruction names to avoid confusion
|
||||
inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
|
||||
@ -1056,13 +1056,6 @@ public:
|
||||
|
||||
void breakpoint_trap();
|
||||
void breakpoint_trap(Condition c, CC cc);
|
||||
void flush_windows_trap();
|
||||
void clean_windows_trap();
|
||||
void get_psr_trap();
|
||||
void set_psr_trap();
|
||||
|
||||
// V8/V9 flush_windows
|
||||
void flush_windows();
|
||||
|
||||
// Support for serializing memory accesses between threads
|
||||
void serialize_memory(Register thread, Register tmp1, Register tmp2);
|
||||
@ -1071,14 +1064,6 @@ public:
|
||||
void enter();
|
||||
void leave();
|
||||
|
||||
// V8/V9 integer multiply
|
||||
void mult(Register s1, Register s2, Register d);
|
||||
void mult(Register s1, int simm13a, Register d);
|
||||
|
||||
// V8/V9 read and write of condition codes.
|
||||
void read_ccr(Register d);
|
||||
void write_ccr(Register s);
|
||||
|
||||
// Manipulation of C++ bools
|
||||
// These are idioms to flag the need for care with accessing bools but on
|
||||
// this platform we assume byte size
|
||||
@ -1162,21 +1147,6 @@ public:
|
||||
// check_and_forward_exception to handle exceptions when it is safe
|
||||
void check_and_forward_exception(Register scratch_reg);
|
||||
|
||||
private:
|
||||
// For V8
|
||||
void read_ccr_trap(Register ccr_save);
|
||||
void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2);
|
||||
|
||||
#ifdef ASSERT
|
||||
// For V8 debugging. Uses V8 instruction sequence and checks
|
||||
// result with V9 insturctions rdccr and wrccr.
|
||||
// Uses Gscatch and Gscatch2
|
||||
void read_ccr_v8_assert(Register ccr_save);
|
||||
void write_ccr_v8_assert(Register ccr_save);
|
||||
#endif // ASSERT
|
||||
|
||||
public:
|
||||
|
||||
// Write to card table for - register is destroyed afterwards.
|
||||
void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
|
||||
|
||||
@ -1314,20 +1284,9 @@ public:
|
||||
FloatRegister Fa, FloatRegister Fb,
|
||||
Register Rresult);
|
||||
|
||||
void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
|
||||
void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); }
|
||||
void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
|
||||
void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
|
||||
|
||||
void save_all_globals_into_locals();
|
||||
void restore_globals_from_locals();
|
||||
|
||||
void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
|
||||
address lock_addr=0, bool use_call_vm=false);
|
||||
void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
|
||||
address lock_addr=0, bool use_call_vm=false);
|
||||
void casn (Register addr_reg, Register cmp_reg, Register set_reg) ;
|
||||
|
||||
// These set the icc condition code to equal if the lock succeeded
|
||||
// and notEqual if it failed and requires a slow case
|
||||
void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -229,10 +229,7 @@ inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Registe
|
||||
// Use the right branch for the platform
|
||||
|
||||
inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
|
||||
if (VM_Version::v9_instructions_work())
|
||||
Assembler::bp(c, a, icc, p, d, rt);
|
||||
else
|
||||
Assembler::br(c, a, d, rt);
|
||||
Assembler::bp(c, a, icc, p, d, rt);
|
||||
}
|
||||
|
||||
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
|
||||
@ -268,10 +265,7 @@ inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L
|
||||
}
|
||||
|
||||
inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
|
||||
if (VM_Version::v9_instructions_work())
|
||||
fbp(c, a, fcc0, p, d, rt);
|
||||
else
|
||||
Assembler::fb(c, a, d, rt);
|
||||
fbp(c, a, fcc0, p, d, rt);
|
||||
}
|
||||
|
||||
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
|
||||
@ -334,7 +328,7 @@ inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder co
|
||||
|
||||
// prefetch instruction
|
||||
inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
|
||||
if (VM_Version::v9_instructions_work())
|
||||
Assembler::bp( never, true, xcc, pt, d, rt );
|
||||
Assembler::bp( never, true, xcc, pt, d, rt );
|
||||
}
|
||||
inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
|
||||
@ -344,15 +338,7 @@ inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
|
||||
// returns delta from gotten pc to addr after
|
||||
inline int MacroAssembler::get_pc( Register d ) {
|
||||
int x = offset();
|
||||
if (VM_Version::v9_instructions_work())
|
||||
rdpc(d);
|
||||
else {
|
||||
Label lbl;
|
||||
Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
|
||||
if (d == O7) delayed()->nop();
|
||||
else delayed()->mov(O7, d);
|
||||
bind(lbl);
|
||||
}
|
||||
rdpc(d);
|
||||
return offset() - x;
|
||||
}
|
||||
|
||||
@ -646,41 +632,26 @@ inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, Fl
|
||||
// returns if membar generates anything, obviously this code should mirror
|
||||
// membar below.
|
||||
inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
|
||||
if( !os::is_MP() ) return false; // Not needed on single CPU
|
||||
if( VM_Version::v9_instructions_work() ) {
|
||||
const Membar_mask_bits effective_mask =
|
||||
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
|
||||
return (effective_mask != 0);
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
if (!os::is_MP())
|
||||
return false; // Not needed on single CPU
|
||||
const Membar_mask_bits effective_mask =
|
||||
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
|
||||
return (effective_mask != 0);
|
||||
}
|
||||
|
||||
inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
|
||||
// Uniprocessors do not need memory barriers
|
||||
if (!os::is_MP()) return;
|
||||
if (!os::is_MP())
|
||||
return;
|
||||
// Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
|
||||
// 8.4.4.3, a.31 and a.50.
|
||||
if( VM_Version::v9_instructions_work() ) {
|
||||
// Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
|
||||
// of the mmask subfield of const7a that does anything that isn't done
|
||||
// implicitly is StoreLoad.
|
||||
const Membar_mask_bits effective_mask =
|
||||
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
|
||||
if ( effective_mask != 0 ) {
|
||||
Assembler::membar( effective_mask );
|
||||
}
|
||||
} else {
|
||||
// stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
|
||||
// do not issue the stbar because to my knowledge all v8 machines implement TSO,
|
||||
// which guarantees that all stores behave as if an stbar were issued just after
|
||||
// each one of them. On these machines, stbar ought to be a nop. There doesn't
|
||||
// appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
|
||||
// it can't be specified by stbar, nor have I come up with a way to simulate it.
|
||||
//
|
||||
// Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
|
||||
// space. Put one here to be on the safe side.
|
||||
Assembler::ldstub(SP, 0, G0);
|
||||
// Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
|
||||
// of the mmask subfield of const7a that does anything that isn't done
|
||||
// implicitly is StoreLoad.
|
||||
const Membar_mask_bits effective_mask =
|
||||
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
|
||||
if (effective_mask != 0) {
|
||||
Assembler::membar(effective_mask);
|
||||
}
|
||||
}
|
||||
|
||||
@ -748,7 +719,7 @@ inline void MacroAssembler::sub(Register s1, RegisterOrConstant s2, Register d,
|
||||
if (offset != 0) sub(d, offset, d);
|
||||
}
|
||||
|
||||
inline void MacroAssembler::swap(Address& a, Register d, int offset) {
|
||||
inline void MacroAssembler::swap(const Address& a, Register d, int offset) {
|
||||
relocate(a.rspec(offset));
|
||||
if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d ); }
|
||||
else { swap(a.base(), a.disp() + offset, d); }
|
||||
|
@ -162,7 +162,7 @@ void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
|
||||
int i1 = ((int*)code_buffer)[1];
|
||||
int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
|
||||
assert(inv_op(*contention_addr) == Assembler::arith_op ||
|
||||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
|
||||
*contention_addr == nop_instruction(),
|
||||
"must not interfere with original call");
|
||||
// The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
|
||||
n_call->set_long_at(1*BytesPerInstWord, i1);
|
||||
@ -181,7 +181,7 @@ void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
|
||||
// Make sure the first-patched instruction, which may co-exist
|
||||
// briefly with the call, will do something harmless.
|
||||
assert(inv_op(*contention_addr) == Assembler::arith_op ||
|
||||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
|
||||
*contention_addr == nop_instruction(),
|
||||
"must not interfere with original call");
|
||||
}
|
||||
|
||||
@ -933,11 +933,7 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
|
||||
int code_size = 1 * BytesPerInstWord;
|
||||
CodeBuffer cb(verified_entry, code_size + 1);
|
||||
MacroAssembler* a = new MacroAssembler(&cb);
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
|
||||
} else {
|
||||
a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
|
||||
}
|
||||
a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
|
||||
ICache::invalidate_range(verified_entry, code_size);
|
||||
}
|
||||
|
||||
@ -1024,7 +1020,7 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer)
|
||||
int i1 = ((int*)code_buffer)[1];
|
||||
int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
|
||||
assert(inv_op(*contention_addr) == Assembler::arith_op ||
|
||||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
|
||||
*contention_addr == nop_instruction(),
|
||||
"must not interfere with original call");
|
||||
// The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
|
||||
h_jump->set_long_at(1*BytesPerInstWord, i1);
|
||||
@ -1043,6 +1039,6 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer)
|
||||
// Make sure the first-patched instruction, which may co-exist
|
||||
// briefly with the call, will do something harmless.
|
||||
assert(inv_op(*contention_addr) == Assembler::arith_op ||
|
||||
*contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
|
||||
*contention_addr == nop_instruction(),
|
||||
"must not interfere with original call");
|
||||
}
|
||||
|
@ -70,8 +70,7 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
|
||||
bool is_zombie() {
|
||||
int x = long_at(0);
|
||||
return is_op3(x,
|
||||
VM_Version::v9_instructions_work() ?
|
||||
Assembler::ldsw_op3 : Assembler::lduw_op3,
|
||||
Assembler::ldsw_op3,
|
||||
Assembler::ldst_op)
|
||||
&& Assembler::inv_rs1(x) == G0
|
||||
&& Assembler::inv_rd(x) == O7;
|
||||
|
@ -249,12 +249,10 @@ class FloatRegisterImpl: public AbstractRegisterImpl {
|
||||
|
||||
case D:
|
||||
assert(c < 64 && (c & 1) == 0, "bad double float register");
|
||||
assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform");
|
||||
return (c & 0x1e) | ((c & 0x20) >> 5);
|
||||
|
||||
case Q:
|
||||
assert(c < 64 && (c & 3) == 0, "bad quad float register");
|
||||
assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform");
|
||||
return (c & 0x1c) | ((c & 0x20) >> 5);
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
|
@ -2459,7 +2459,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Finally just about ready to make the JNI call
|
||||
|
||||
__ flush_windows();
|
||||
__ flushw();
|
||||
if (inner_frame_created) {
|
||||
__ restore();
|
||||
} else {
|
||||
|
@ -2778,10 +2778,7 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
|
||||
Register Rold = reg_to_register_object($old$$reg);
|
||||
Register Rnew = reg_to_register_object($new$$reg);
|
||||
|
||||
// casx_under_lock picks 1 of 3 encodings:
|
||||
// For 32-bit pointers you get a 32-bit CAS
|
||||
// For 64-bit pointers you get a 64-bit CASX
|
||||
__ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
|
||||
__ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
|
||||
__ cmp( Rold, Rnew );
|
||||
%}
|
||||
|
||||
@ -3067,7 +3064,7 @@ enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI r
|
||||
AddressLiteral last_rethrow_addrlit(&last_rethrow);
|
||||
__ sethi(last_rethrow_addrlit, L1);
|
||||
Address addr(L1, last_rethrow_addrlit.low10());
|
||||
__ get_pc(L2);
|
||||
__ rdpc(L2);
|
||||
__ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to
|
||||
__ st_ptr(L2, addr);
|
||||
__ restore();
|
||||
|
@ -566,7 +566,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
|
||||
address start = __ pc();
|
||||
|
||||
__ flush_windows();
|
||||
__ flushw();
|
||||
__ retl(false);
|
||||
__ delayed()->add( FP, STACK_BIAS, O0 );
|
||||
// The returned value must be a stack pointer whose register save area
|
||||
@ -575,67 +575,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
// Helper functions for v8 atomic operations.
|
||||
//
|
||||
void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
|
||||
if (mark_oop_reg == noreg) {
|
||||
address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
|
||||
__ set((intptr_t)lock_ptr, lock_ptr_reg);
|
||||
} else {
|
||||
assert(scratch_reg != noreg, "just checking");
|
||||
address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
|
||||
__ set((intptr_t)lock_ptr, lock_ptr_reg);
|
||||
__ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
|
||||
__ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
|
||||
}
|
||||
}
|
||||
|
||||
void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
|
||||
|
||||
get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
|
||||
__ set(StubRoutines::Sparc::locked, lock_reg);
|
||||
// Initialize yield counter
|
||||
__ mov(G0,yield_reg);
|
||||
|
||||
__ BIND(retry);
|
||||
__ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield);
|
||||
|
||||
// This code can only be called from inside the VM, this
|
||||
// stub is only invoked from Atomic::add(). We do not
|
||||
// want to use call_VM, because _last_java_sp and such
|
||||
// must already be set.
|
||||
//
|
||||
// Save the regs and make space for a C call
|
||||
__ save(SP, -96, SP);
|
||||
__ save_all_globals_into_locals();
|
||||
BLOCK_COMMENT("call os::naked_sleep");
|
||||
__ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
|
||||
__ delayed()->nop();
|
||||
__ restore_globals_from_locals();
|
||||
__ restore();
|
||||
// reset the counter
|
||||
__ mov(G0,yield_reg);
|
||||
|
||||
__ BIND(dontyield);
|
||||
|
||||
// try to get lock
|
||||
__ swap(lock_ptr_reg, 0, lock_reg);
|
||||
|
||||
// did we get the lock?
|
||||
__ cmp(lock_reg, StubRoutines::Sparc::unlocked);
|
||||
__ br(Assembler::notEqual, true, Assembler::pn, retry);
|
||||
__ delayed()->add(yield_reg,1,yield_reg);
|
||||
|
||||
// yes, got lock. do the operation here.
|
||||
}
|
||||
|
||||
void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
|
||||
__ st(lock_reg, lock_ptr_reg, 0); // unlock
|
||||
}
|
||||
|
||||
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
|
||||
//
|
||||
// Arguments :
|
||||
// Arguments:
|
||||
//
|
||||
// exchange_value: O0
|
||||
// dest: O1
|
||||
@ -656,33 +598,14 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ mov(O0, O3); // scratch copy of exchange value
|
||||
__ ld(O1, 0, O2); // observe the previous value
|
||||
// try to replace O2 with O3
|
||||
__ cas_under_lock(O1, O2, O3,
|
||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
|
||||
__ cas(O1, O2, O3);
|
||||
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
|
||||
|
||||
__ retl(false);
|
||||
__ delayed()->mov(O2, O0); // report previous value to caller
|
||||
|
||||
} else {
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
__ retl(false);
|
||||
__ delayed()->swap(O1, 0, O0);
|
||||
} else {
|
||||
const Register& lock_reg = O2;
|
||||
const Register& lock_ptr_reg = O3;
|
||||
const Register& yield_reg = O4;
|
||||
|
||||
Label retry;
|
||||
Label dontyield;
|
||||
|
||||
generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
|
||||
// got the lock, do the swap
|
||||
__ swap(O1, 0, O0);
|
||||
|
||||
generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
|
||||
__ retl(false);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
__ retl(false);
|
||||
__ delayed()->swap(O1, 0, O0);
|
||||
}
|
||||
|
||||
return start;
|
||||
@ -691,7 +614,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
|
||||
//
|
||||
// Arguments :
|
||||
// Arguments:
|
||||
//
|
||||
// exchange_value: O0
|
||||
// dest: O1
|
||||
@ -701,15 +624,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
// O0: the value previously stored in dest
|
||||
//
|
||||
// Overwrites (v8): O3,O4,O5
|
||||
//
|
||||
address generate_atomic_cmpxchg() {
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
|
||||
address start = __ pc();
|
||||
|
||||
// cmpxchg(dest, compare_value, exchange_value)
|
||||
__ cas_under_lock(O1, O2, O0,
|
||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
|
||||
__ cas(O1, O2, O0);
|
||||
__ retl(false);
|
||||
__ delayed()->nop();
|
||||
|
||||
@ -718,7 +638,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
|
||||
//
|
||||
// Arguments :
|
||||
// Arguments:
|
||||
//
|
||||
// exchange_value: O1:O0
|
||||
// dest: O2
|
||||
@ -728,17 +648,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
// O1:O0: the value previously stored in dest
|
||||
//
|
||||
// This only works on V9, on V8 we don't generate any
|
||||
// code and just return NULL.
|
||||
//
|
||||
// Overwrites: G1,G2,G3
|
||||
//
|
||||
address generate_atomic_cmpxchg_long() {
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
|
||||
address start = __ pc();
|
||||
|
||||
if (!VM_Version::supports_cx8())
|
||||
return NULL;;
|
||||
__ sllx(O0, 32, O0);
|
||||
__ srl(O1, 0, O1);
|
||||
__ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value
|
||||
@ -756,7 +671,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// Support for jint Atomic::add(jint add_value, volatile jint* dest).
|
||||
//
|
||||
// Arguments :
|
||||
// Arguments:
|
||||
//
|
||||
// add_value: O0 (e.g., +1 or -1)
|
||||
// dest: O1
|
||||
@ -765,47 +680,22 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
// O0: the new value stored in dest
|
||||
//
|
||||
// Overwrites (v9): O3
|
||||
// Overwrites (v8): O3,O4,O5
|
||||
// Overwrites: O3
|
||||
//
|
||||
address generate_atomic_add() {
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_add");
|
||||
address start = __ pc();
|
||||
__ BIND(_atomic_add_stub);
|
||||
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
Label(retry);
|
||||
__ BIND(retry);
|
||||
Label(retry);
|
||||
__ BIND(retry);
|
||||
|
||||
__ lduw(O1, 0, O2);
|
||||
__ add(O0, O2, O3);
|
||||
__ cas(O1, O2, O3);
|
||||
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
|
||||
__ retl(false);
|
||||
__ delayed()->add(O0, O2, O0); // note that cas made O2==O3
|
||||
} else {
|
||||
const Register& lock_reg = O2;
|
||||
const Register& lock_ptr_reg = O3;
|
||||
const Register& value_reg = O4;
|
||||
const Register& yield_reg = O5;
|
||||
|
||||
Label(retry);
|
||||
Label(dontyield);
|
||||
|
||||
generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
|
||||
// got lock, do the increment
|
||||
__ ld(O1, 0, value_reg);
|
||||
__ add(O0, value_reg, value_reg);
|
||||
__ st(value_reg, O1, 0);
|
||||
|
||||
// %%% only for RMO and PSO
|
||||
__ membar(Assembler::StoreStore);
|
||||
|
||||
generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
|
||||
|
||||
__ retl(false);
|
||||
__ delayed()->mov(value_reg, O0);
|
||||
}
|
||||
__ lduw(O1, 0, O2);
|
||||
__ add(O0, O2, O3);
|
||||
__ cas(O1, O2, O3);
|
||||
__ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
|
||||
__ retl(false);
|
||||
__ delayed()->add(O0, O2, O0); // note that cas made O2==O3
|
||||
|
||||
return start;
|
||||
}
|
||||
@ -841,7 +731,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ mov(G3, L3);
|
||||
__ mov(G4, L4);
|
||||
__ mov(G5, L5);
|
||||
for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
|
||||
for (i = 0; i < 64; i += 2) {
|
||||
__ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
|
||||
}
|
||||
|
||||
@ -855,7 +745,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ mov(L3, G3);
|
||||
__ mov(L4, G4);
|
||||
__ mov(L5, G5);
|
||||
for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
|
||||
for (i = 0; i < 64; i += 2) {
|
||||
__ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
|
||||
}
|
||||
|
||||
|
@ -52,7 +52,3 @@ address StubRoutines::Sparc::_stop_subroutine_entry = NULL;
|
||||
address StubRoutines::Sparc::_flush_callers_register_windows_entry = CAST_FROM_FN_PTR(address, bootstrap_flush_windows);
|
||||
|
||||
address StubRoutines::Sparc::_partial_subtype_check = NULL;
|
||||
|
||||
int StubRoutines::Sparc::_atomic_memory_operation_lock = StubRoutines::Sparc::unlocked;
|
||||
|
||||
int StubRoutines::Sparc::_v8_oop_lock_cache[StubRoutines::Sparc::nof_v8_oop_lock_cache_entries];
|
||||
|
@ -47,46 +47,14 @@ enum /* platform_dependent_constants */ {
|
||||
class Sparc {
|
||||
friend class StubGenerator;
|
||||
|
||||
public:
|
||||
enum { nof_instance_allocators = 10 };
|
||||
|
||||
// allocator lock values
|
||||
enum {
|
||||
unlocked = 0,
|
||||
locked = 1
|
||||
};
|
||||
|
||||
enum {
|
||||
v8_oop_lock_ignore_bits = 2,
|
||||
v8_oop_lock_bits = 4,
|
||||
nof_v8_oop_lock_cache_entries = 1 << (v8_oop_lock_bits+v8_oop_lock_ignore_bits),
|
||||
v8_oop_lock_mask = right_n_bits(v8_oop_lock_bits),
|
||||
v8_oop_lock_mask_in_place = v8_oop_lock_mask << v8_oop_lock_ignore_bits
|
||||
};
|
||||
|
||||
static int _v8_oop_lock_cache[nof_v8_oop_lock_cache_entries];
|
||||
|
||||
private:
|
||||
static address _test_stop_entry;
|
||||
static address _stop_subroutine_entry;
|
||||
static address _flush_callers_register_windows_entry;
|
||||
|
||||
static int _atomic_memory_operation_lock;
|
||||
|
||||
static address _partial_subtype_check;
|
||||
|
||||
public:
|
||||
// %%% global lock for everyone who needs to use atomic_compare_and_exchange
|
||||
// %%% or atomic_increment -- should probably use more locks for more
|
||||
// %%% scalability-- for instance one for each eden space or group of
|
||||
|
||||
// address of the lock for atomic_compare_and_exchange
|
||||
static int* atomic_memory_operation_lock_addr() { return &_atomic_memory_operation_lock; }
|
||||
|
||||
// accessor and mutator for _atomic_memory_operation_lock
|
||||
static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
|
||||
static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
|
||||
|
||||
// test assembler stop routine by setting registers
|
||||
static void (*test_stop_entry()) () { return CAST_TO_FN_PTR(void (*)(void), _test_stop_entry); }
|
||||
|
||||
|
@ -1054,7 +1054,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// flush the windows now. We don't care about the current (protection) frame
|
||||
// only the outer frames
|
||||
|
||||
__ flush_windows();
|
||||
__ flushw();
|
||||
|
||||
// mark windows as flushed
|
||||
Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
|
||||
|
@ -1338,14 +1338,13 @@ void TemplateTable::lneg() {
|
||||
|
||||
void TemplateTable::fneg() {
|
||||
transition(ftos, ftos);
|
||||
__ fneg(FloatRegisterImpl::S, Ftos_f);
|
||||
__ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f);
|
||||
}
|
||||
|
||||
|
||||
void TemplateTable::dneg() {
|
||||
transition(dtos, dtos);
|
||||
// v8 has fnegd if source and dest are the same
|
||||
__ fneg(FloatRegisterImpl::D, Ftos_f);
|
||||
__ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f);
|
||||
}
|
||||
|
||||
|
||||
@ -1470,19 +1469,10 @@ void TemplateTable::convert() {
|
||||
__ st_long(Otos_l, __ d_tmp);
|
||||
__ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
|
||||
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
if (bytecode() == Bytecodes::_l2f) {
|
||||
__ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
|
||||
} else {
|
||||
__ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
|
||||
}
|
||||
if (bytecode() == Bytecodes::_l2f) {
|
||||
__ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
|
||||
} else {
|
||||
__ call_VM_leaf(
|
||||
Lscratch,
|
||||
bytecode() == Bytecodes::_l2f
|
||||
? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
|
||||
: CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
|
||||
);
|
||||
__ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -1490,11 +1480,6 @@ void TemplateTable::convert() {
|
||||
Label isNaN;
|
||||
// result must be 0 if value is NaN; test by comparing value to itself
|
||||
__ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
|
||||
// According to the v8 manual, you have to have a non-fp instruction
|
||||
// between fcmp and fb.
|
||||
if (!VM_Version::v9_instructions_work()) {
|
||||
__ nop();
|
||||
}
|
||||
__ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
|
||||
__ delayed()->clr(Otos_i); // NaN
|
||||
__ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
|
||||
@ -1537,16 +1522,7 @@ void TemplateTable::convert() {
|
||||
break;
|
||||
|
||||
case Bytecodes::_d2f:
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
__ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
|
||||
}
|
||||
else {
|
||||
// must uncache tos
|
||||
__ push_d();
|
||||
__ pop_i(O0);
|
||||
__ pop_i(O1);
|
||||
__ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
|
||||
}
|
||||
break;
|
||||
|
||||
default: ShouldNotReachHere();
|
||||
@ -1956,17 +1932,8 @@ void TemplateTable::fast_binaryswitch() {
|
||||
__ ld( Rarray, Rscratch, Rscratch );
|
||||
// (Rscratch is already in the native byte-ordering.)
|
||||
__ cmp( Rkey, Rscratch );
|
||||
if ( VM_Version::v9_instructions_work() ) {
|
||||
__ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
|
||||
__ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
|
||||
}
|
||||
else {
|
||||
Label end_of_if;
|
||||
__ br( Assembler::less, true, Assembler::pt, end_of_if );
|
||||
__ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
|
||||
__ mov( Rh, Ri ); // else i = h
|
||||
__ bind(end_of_if); // }
|
||||
}
|
||||
__ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
|
||||
__ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
|
||||
|
||||
// while (i+1 < j)
|
||||
__ bind( entry );
|
||||
@ -3418,9 +3385,7 @@ void TemplateTable::_new() {
|
||||
// has been allocated.
|
||||
__ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
|
||||
|
||||
__ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
|
||||
VM_Version::v9_instructions_work() ? NULL :
|
||||
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
|
||||
__ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
|
||||
|
||||
// if someone beat us on the allocation, try again, otherwise continue
|
||||
__ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
|
||||
@ -3701,14 +3666,7 @@ void TemplateTable::monitorenter() {
|
||||
|
||||
__ verify_oop(O4); // verify each monitor's oop
|
||||
__ tst(O4); // is this entry unused?
|
||||
if (VM_Version::v9_instructions_work())
|
||||
__ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
|
||||
else {
|
||||
Label L;
|
||||
__ br( Assembler::zero, true, Assembler::pn, L );
|
||||
__ delayed()->mov(O3, O1); // rememeber this one if match
|
||||
__ bind(L);
|
||||
}
|
||||
__ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
|
||||
|
||||
__ cmp(O4, O0); // check if current entry is for same object
|
||||
__ brx( Assembler::equal, false, Assembler::pn, exit );
|
||||
|
@ -75,23 +75,14 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
|
||||
}
|
||||
|
||||
if (has_v9()) {
|
||||
assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
|
||||
if (ArraycopySrcPrefetchDistance >= 4096)
|
||||
ArraycopySrcPrefetchDistance = 4064;
|
||||
assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
|
||||
if (ArraycopyDstPrefetchDistance >= 4096)
|
||||
ArraycopyDstPrefetchDistance = 4064;
|
||||
} else {
|
||||
if (ArraycopySrcPrefetchDistance > 0) {
|
||||
warning("prefetch instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(ArraycopySrcPrefetchDistance, 0);
|
||||
}
|
||||
if (ArraycopyDstPrefetchDistance > 0) {
|
||||
warning("prefetch instructions are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(ArraycopyDstPrefetchDistance, 0);
|
||||
}
|
||||
}
|
||||
guarantee(VM_Version::has_v9(), "only SPARC v9 is supported");
|
||||
|
||||
assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
|
||||
if (ArraycopySrcPrefetchDistance >= 4096)
|
||||
ArraycopySrcPrefetchDistance = 4064;
|
||||
assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
|
||||
if (ArraycopyDstPrefetchDistance >= 4096)
|
||||
ArraycopyDstPrefetchDistance = 4064;
|
||||
|
||||
UseSSE = 0; // Only on x86 and x64
|
||||
|
||||
|
@ -177,10 +177,6 @@ public:
|
||||
return AllocatePrefetchDistance > 0 ? AllocatePrefetchStyle : 0;
|
||||
}
|
||||
|
||||
// Legacy
|
||||
static bool v8_instructions_work() { return has_v8() && !has_v9(); }
|
||||
static bool v9_instructions_work() { return has_v9(); }
|
||||
|
||||
// Assembler testing
|
||||
static void allow_all();
|
||||
static void revert();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,7 +55,7 @@ define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineFrequencyCount, 100);
|
||||
define_pd_global(intx, InlineSmallCode, 1000);
|
||||
|
||||
define_pd_global(intx, StackYellowPages, 2);
|
||||
define_pd_global(intx, StackYellowPages, NOT_WINDOWS(2) WINDOWS_ONLY(3));
|
||||
define_pd_global(intx, StackRedPages, 1);
|
||||
#ifdef AMD64
|
||||
// Very large C++ stack frames using solaris-amd64 optimized builds
|
||||
|
@ -1429,6 +1429,8 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
|
||||
assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
|
||||
"possible collision");
|
||||
|
||||
__ block_comment("unpack_array_argument {");
|
||||
|
||||
// Pass the length, ptr pair
|
||||
Label is_null, done;
|
||||
VMRegPair tmp;
|
||||
@ -1453,6 +1455,8 @@ static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType
|
||||
move_ptr(masm, tmp, body_arg);
|
||||
move32_64(masm, tmp, length_arg);
|
||||
__ bind(done);
|
||||
|
||||
__ block_comment("} unpack_array_argument");
|
||||
}
|
||||
|
||||
|
||||
@ -2170,27 +2174,34 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
}
|
||||
}
|
||||
|
||||
// point c_arg at the first arg that is already loaded in case we
|
||||
// need to spill before we call out
|
||||
int c_arg = total_c_args - total_in_args;
|
||||
int c_arg;
|
||||
|
||||
// Pre-load a static method's oop into r14. Used both by locking code and
|
||||
// the normal JNI call code.
|
||||
if (method->is_static() && !is_critical_native) {
|
||||
if (!is_critical_native) {
|
||||
// point c_arg at the first arg that is already loaded in case we
|
||||
// need to spill before we call out
|
||||
c_arg = total_c_args - total_in_args;
|
||||
|
||||
// load oop into a register
|
||||
__ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
|
||||
if (method->is_static()) {
|
||||
|
||||
// Now handlize the static class mirror it's known not-null.
|
||||
__ movptr(Address(rsp, klass_offset), oop_handle_reg);
|
||||
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
|
||||
// load oop into a register
|
||||
__ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
|
||||
|
||||
// Now get the handle
|
||||
__ lea(oop_handle_reg, Address(rsp, klass_offset));
|
||||
// store the klass handle as second argument
|
||||
__ movptr(c_rarg1, oop_handle_reg);
|
||||
// and protect the arg if we must spill
|
||||
c_arg--;
|
||||
// Now handlize the static class mirror it's known not-null.
|
||||
__ movptr(Address(rsp, klass_offset), oop_handle_reg);
|
||||
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
|
||||
|
||||
// Now get the handle
|
||||
__ lea(oop_handle_reg, Address(rsp, klass_offset));
|
||||
// store the klass handle as second argument
|
||||
__ movptr(c_rarg1, oop_handle_reg);
|
||||
// and protect the arg if we must spill
|
||||
c_arg--;
|
||||
}
|
||||
} else {
|
||||
// For JNI critical methods we need to save all registers in save_args.
|
||||
c_arg = 0;
|
||||
}
|
||||
|
||||
// Change state to native (we save the return address in the thread, since it might not
|
||||
|
@ -83,7 +83,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
private:
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define inc_counter_np(counter) (0)
|
||||
#define inc_counter_np(counter) ((void)0)
|
||||
#else
|
||||
void inc_counter_np_(int& counter) {
|
||||
__ incrementl(ExternalAddress((address)&counter));
|
||||
|
@ -81,7 +81,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
private:
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define inc_counter_np(counter) (0)
|
||||
#define inc_counter_np(counter) ((void)0)
|
||||
#else
|
||||
void inc_counter_np_(int& counter) {
|
||||
// This can destroy rscratch1 if counter is far from the code cache
|
||||
|
@ -122,9 +122,7 @@ static int file_open(const char* path, int flag) {
|
||||
}
|
||||
|
||||
static int file_close(int fd) {
|
||||
int ret;
|
||||
RESTARTABLE(close(fd), ret);
|
||||
return ret;
|
||||
return close(fd);
|
||||
}
|
||||
|
||||
static int file_read(int fd, char* buf, int len) {
|
||||
|
@ -199,7 +199,7 @@ int BsdAttachListener::init() {
|
||||
::unlink(initial_path);
|
||||
int res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
|
||||
if (res == -1) {
|
||||
RESTARTABLE(::close(listener), res);
|
||||
::close(listener);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -217,7 +217,7 @@ int BsdAttachListener::init() {
|
||||
}
|
||||
}
|
||||
if (res == -1) {
|
||||
RESTARTABLE(::close(listener), res);
|
||||
::close(listener);
|
||||
::unlink(initial_path);
|
||||
return -1;
|
||||
}
|
||||
@ -345,24 +345,21 @@ BsdAttachOperation* BsdAttachListener::dequeue() {
|
||||
uid_t puid;
|
||||
gid_t pgid;
|
||||
if (::getpeereid(s, &puid, &pgid) != 0) {
|
||||
int res;
|
||||
RESTARTABLE(::close(s), res);
|
||||
::close(s);
|
||||
continue;
|
||||
}
|
||||
uid_t euid = geteuid();
|
||||
gid_t egid = getegid();
|
||||
|
||||
if (puid != euid || pgid != egid) {
|
||||
int res;
|
||||
RESTARTABLE(::close(s), res);
|
||||
::close(s);
|
||||
continue;
|
||||
}
|
||||
|
||||
// peer credential look okay so we read the request
|
||||
BsdAttachOperation* op = read_request(s);
|
||||
if (op == NULL) {
|
||||
int res;
|
||||
RESTARTABLE(::close(s), res);
|
||||
::close(s);
|
||||
continue;
|
||||
} else {
|
||||
return op;
|
||||
@ -413,7 +410,7 @@ void BsdAttachOperation::complete(jint result, bufferedStream* st) {
|
||||
}
|
||||
|
||||
// done
|
||||
RESTARTABLE(::close(this->socket()), rc);
|
||||
::close(this->socket());
|
||||
|
||||
// were we externally suspended while we were waiting?
|
||||
thread->check_and_wait_while_suspended();
|
||||
|
@ -2074,6 +2074,13 @@ void bsd_wrap_code(char* base, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
|
||||
int err) {
|
||||
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
|
||||
", %d) failed; error='%s' (errno=%d)", addr, size, exec,
|
||||
strerror(err), err);
|
||||
}
|
||||
|
||||
// NOTE: Bsd kernel does not really reserve the pages for us.
|
||||
// All it does is to check if there are enough free pages
|
||||
// left at the time of mmap(). This could be a potential
|
||||
@ -2082,18 +2089,45 @@ bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
|
||||
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
||||
#ifdef __OpenBSD__
|
||||
// XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
|
||||
return ::mprotect(addr, size, prot) == 0;
|
||||
if (::mprotect(addr, size, prot) == 0) {
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
|
||||
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
|
||||
return res != (uintptr_t) MAP_FAILED;
|
||||
if (res != (uintptr_t) MAP_FAILED) {
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Warn about any commit errors we see in non-product builds just
|
||||
// in case mmap() doesn't work as described on the man page.
|
||||
NOT_PRODUCT(warn_fail_commit_memory(addr, size, exec, errno);)
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
bool exec) {
|
||||
return commit_memory(addr, size, exec);
|
||||
// alignment_hint is ignored on this OS
|
||||
return pd_commit_memory(addr, size, exec);
|
||||
}
|
||||
|
||||
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
|
||||
const char* mesg) {
|
||||
assert(mesg != NULL, "mesg must be specified");
|
||||
if (!pd_commit_memory(addr, size, exec)) {
|
||||
// add extra info in product mode for vm_exit_out_of_memory():
|
||||
PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
|
||||
}
|
||||
}
|
||||
|
||||
void os::pd_commit_memory_or_exit(char* addr, size_t size,
|
||||
size_t alignment_hint, bool exec,
|
||||
const char* mesg) {
|
||||
// alignment_hint is ignored on this OS
|
||||
pd_commit_memory_or_exit(addr, size, exec, mesg);
|
||||
}
|
||||
|
||||
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
@ -2148,7 +2182,7 @@ bool os::pd_uncommit_memory(char* addr, size_t size) {
|
||||
}
|
||||
|
||||
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
|
||||
return os::commit_memory(addr, size);
|
||||
return os::commit_memory(addr, size, !ExecMem);
|
||||
}
|
||||
|
||||
// If this is a growable mapping, remove the guard pages entirely by
|
||||
@ -2320,21 +2354,20 @@ char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
|
||||
}
|
||||
|
||||
// The memory is committed
|
||||
address pc = CALLER_PC;
|
||||
MemTracker::record_virtual_memory_reserve((address)addr, bytes, pc);
|
||||
MemTracker::record_virtual_memory_commit((address)addr, bytes, pc);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
bool os::release_memory_special(char* base, size_t bytes) {
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
// detaching the SHM segment will also delete it, see reserve_memory_special()
|
||||
int rslt = shmdt(base);
|
||||
if (rslt == 0) {
|
||||
MemTracker::record_virtual_memory_uncommit((address)base, bytes);
|
||||
MemTracker::record_virtual_memory_release((address)base, bytes);
|
||||
tkr.record((address)base, bytes);
|
||||
return true;
|
||||
} else {
|
||||
tkr.discard();
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -3512,7 +3545,7 @@ jint os::init_2(void)
|
||||
|
||||
if (!UseMembar) {
|
||||
address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
|
||||
guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
|
||||
os::set_memory_serialize_page( mem_serialize_page );
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
@ -178,11 +178,11 @@ inline size_t os::write(int fd, const void *buf, unsigned int nBytes) {
|
||||
}
|
||||
|
||||
inline int os::close(int fd) {
|
||||
RESTARTABLE_RETURN_INT(::close(fd));
|
||||
return ::close(fd);
|
||||
}
|
||||
|
||||
inline int os::socket_close(int fd) {
|
||||
RESTARTABLE_RETURN_INT(::close(fd));
|
||||
return ::close(fd);
|
||||
}
|
||||
|
||||
inline int os::socket(int domain, int type, int protocol) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -60,7 +60,7 @@ static char* create_standard_memory(size_t size) {
|
||||
}
|
||||
|
||||
// commit memory
|
||||
if (!os::commit_memory(mapAddress, size)) {
|
||||
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("Could not commit PerfData memory\n");
|
||||
}
|
||||
@ -120,7 +120,7 @@ static void save_memory_to_file(char* addr, size_t size) {
|
||||
addr += result;
|
||||
}
|
||||
|
||||
RESTARTABLE(::close(fd), result);
|
||||
result = ::close(fd);
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
if (result == OS_ERR) {
|
||||
warning("Could not close %s: %s\n", destfile, strerror(errno));
|
||||
@ -632,7 +632,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("could not set shared memory file size: %s\n", strerror(errno));
|
||||
}
|
||||
RESTARTABLE(::close(fd), result);
|
||||
::close(fd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -656,7 +656,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
|
||||
if (result != -1) {
|
||||
return fd;
|
||||
} else {
|
||||
RESTARTABLE(::close(fd), result);
|
||||
::close(fd);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -734,9 +734,7 @@ static char* mmap_create_shared(size_t size) {
|
||||
|
||||
mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
|
||||
// attempt to close the file - restart it if it was interrupted,
|
||||
// but ignore other failures
|
||||
RESTARTABLE(::close(fd), result);
|
||||
result = ::close(fd);
|
||||
assert(result != OS_ERR, "could not close file");
|
||||
|
||||
if (mapAddress == MAP_FAILED) {
|
||||
@ -755,8 +753,7 @@ static char* mmap_create_shared(size_t size) {
|
||||
(void)::memset((void*) mapAddress, 0, size);
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
|
||||
return mapAddress;
|
||||
}
|
||||
@ -909,7 +906,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
|
||||
// attempt to close the file - restart if it gets interrupted,
|
||||
// but ignore other failures
|
||||
RESTARTABLE(::close(fd), result);
|
||||
result = ::close(fd);
|
||||
assert(result != OS_ERR, "could not close file");
|
||||
|
||||
if (mapAddress == MAP_FAILED) {
|
||||
@ -921,8 +918,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
}
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
|
||||
*addr = mapAddress;
|
||||
*sizep = size;
|
||||
|
@ -199,7 +199,7 @@ int LinuxAttachListener::init() {
|
||||
::unlink(initial_path);
|
||||
int res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
|
||||
if (res == -1) {
|
||||
RESTARTABLE(::close(listener), res);
|
||||
::close(listener);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -212,7 +212,7 @@ int LinuxAttachListener::init() {
|
||||
}
|
||||
}
|
||||
if (res == -1) {
|
||||
RESTARTABLE(::close(listener), res);
|
||||
::close(listener);
|
||||
::unlink(initial_path);
|
||||
return -1;
|
||||
}
|
||||
@ -340,24 +340,21 @@ LinuxAttachOperation* LinuxAttachListener::dequeue() {
|
||||
struct ucred cred_info;
|
||||
socklen_t optlen = sizeof(cred_info);
|
||||
if (::getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void*)&cred_info, &optlen) == -1) {
|
||||
int res;
|
||||
RESTARTABLE(::close(s), res);
|
||||
::close(s);
|
||||
continue;
|
||||
}
|
||||
uid_t euid = geteuid();
|
||||
gid_t egid = getegid();
|
||||
|
||||
if (cred_info.uid != euid || cred_info.gid != egid) {
|
||||
int res;
|
||||
RESTARTABLE(::close(s), res);
|
||||
::close(s);
|
||||
continue;
|
||||
}
|
||||
|
||||
// peer credential look okay so we read the request
|
||||
LinuxAttachOperation* op = read_request(s);
|
||||
if (op == NULL) {
|
||||
int res;
|
||||
RESTARTABLE(::close(s), res);
|
||||
::close(s);
|
||||
continue;
|
||||
} else {
|
||||
return op;
|
||||
@ -408,7 +405,7 @@ void LinuxAttachOperation::complete(jint result, bufferedStream* st) {
|
||||
}
|
||||
|
||||
// done
|
||||
RESTARTABLE(::close(this->socket()), rc);
|
||||
::close(this->socket());
|
||||
|
||||
// were we externally suspended while we were waiting?
|
||||
thread->check_and_wait_while_suspended();
|
||||
|
@ -2612,11 +2612,49 @@ void linux_wrap_code(char* base, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
static bool recoverable_mmap_error(int err) {
|
||||
// See if the error is one we can let the caller handle. This
|
||||
// list of errno values comes from JBS-6843484. I can't find a
|
||||
// Linux man page that documents this specific set of errno
|
||||
// values so while this list currently matches Solaris, it may
|
||||
// change as we gain experience with this failure mode.
|
||||
switch (err) {
|
||||
case EBADF:
|
||||
case EINVAL:
|
||||
case ENOTSUP:
|
||||
// let the caller deal with these errors
|
||||
return true;
|
||||
|
||||
default:
|
||||
// Any remaining errors on this OS can cause our reserved mapping
|
||||
// to be lost. That can cause confusion where different data
|
||||
// structures think they have the same memory mapped. The worst
|
||||
// scenario is if both the VM and a library think they have the
|
||||
// same memory mapped.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
|
||||
int err) {
|
||||
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
|
||||
", %d) failed; error='%s' (errno=%d)", addr, size, exec,
|
||||
strerror(err), err);
|
||||
}
|
||||
|
||||
static void warn_fail_commit_memory(char* addr, size_t size,
|
||||
size_t alignment_hint, bool exec,
|
||||
int err) {
|
||||
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
|
||||
", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, size,
|
||||
alignment_hint, exec, strerror(err), err);
|
||||
}
|
||||
|
||||
// NOTE: Linux kernel does not really reserve the pages for us.
|
||||
// All it does is to check if there are enough free pages
|
||||
// left at the time of mmap(). This could be a potential
|
||||
// problem.
|
||||
bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
|
||||
int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) {
|
||||
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
||||
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
|
||||
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
|
||||
@ -2624,9 +2662,32 @@ bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
|
||||
if (UseNUMAInterleaving) {
|
||||
numa_make_global(addr, size);
|
||||
}
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int err = errno; // save errno from mmap() call above
|
||||
|
||||
if (!recoverable_mmap_error(err)) {
|
||||
warn_fail_commit_memory(addr, size, exec, err);
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory.");
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
|
||||
return os::Linux::commit_memory_impl(addr, size, exec) == 0;
|
||||
}
|
||||
|
||||
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
|
||||
const char* mesg) {
|
||||
assert(mesg != NULL, "mesg must be specified");
|
||||
int err = os::Linux::commit_memory_impl(addr, size, exec);
|
||||
if (err != 0) {
|
||||
// the caller wants all commit errors to exit with the specified mesg:
|
||||
warn_fail_commit_memory(addr, size, exec, err);
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Define MAP_HUGETLB here so we can build HotSpot on old systems.
|
||||
@ -2639,8 +2700,9 @@ bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
|
||||
#define MADV_HUGEPAGE 14
|
||||
#endif
|
||||
|
||||
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
bool exec) {
|
||||
int os::Linux::commit_memory_impl(char* addr, size_t size,
|
||||
size_t alignment_hint, bool exec) {
|
||||
int err;
|
||||
if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
|
||||
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
||||
uintptr_t res =
|
||||
@ -2651,16 +2713,46 @@ bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
if (UseNUMAInterleaving) {
|
||||
numa_make_global(addr, size);
|
||||
}
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = errno; // save errno from mmap() call above
|
||||
|
||||
if (!recoverable_mmap_error(err)) {
|
||||
// However, it is not clear that this loss of our reserved mapping
|
||||
// happens with large pages on Linux or that we cannot recover
|
||||
// from the loss. For now, we just issue a warning and we don't
|
||||
// call vm_exit_out_of_memory(). This issue is being tracked by
|
||||
// JBS-8007074.
|
||||
warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
|
||||
// vm_exit_out_of_memory(size, OOM_MMAP_ERROR,
|
||||
// "committing reserved memory.");
|
||||
}
|
||||
// Fall through and try to use small pages
|
||||
}
|
||||
|
||||
if (commit_memory(addr, size, exec)) {
|
||||
err = os::Linux::commit_memory_impl(addr, size, exec);
|
||||
if (err == 0) {
|
||||
realign_memory(addr, size, alignment_hint);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return err;
|
||||
}
|
||||
|
||||
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
bool exec) {
|
||||
return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
|
||||
}
|
||||
|
||||
void os::pd_commit_memory_or_exit(char* addr, size_t size,
|
||||
size_t alignment_hint, bool exec,
|
||||
const char* mesg) {
|
||||
assert(mesg != NULL, "mesg must be specified");
|
||||
int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec);
|
||||
if (err != 0) {
|
||||
// the caller wants all commit errors to exit with the specified mesg:
|
||||
warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
|
||||
}
|
||||
}
|
||||
|
||||
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
@ -2678,7 +2770,7 @@ void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||
// small pages on top of the SHM segment. This method always works for small pages, so we
|
||||
// allow that in any case.
|
||||
if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
|
||||
commit_memory(addr, bytes, alignment_hint, false);
|
||||
commit_memory(addr, bytes, alignment_hint, !ExecMem);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2931,7 +3023,7 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
|
||||
::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent);
|
||||
}
|
||||
|
||||
return os::commit_memory(addr, size);
|
||||
return os::commit_memory(addr, size, !ExecMem);
|
||||
}
|
||||
|
||||
// If this is a growable mapping, remove the guard pages entirely by
|
||||
@ -3053,7 +3145,7 @@ bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
|
||||
MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
|
||||
-1, 0);
|
||||
|
||||
if (p != (void *) -1) {
|
||||
if (p != MAP_FAILED) {
|
||||
// We don't know if this really is a huge page or not.
|
||||
FILE *fp = fopen("/proc/self/maps", "r");
|
||||
if (fp) {
|
||||
@ -3271,22 +3363,21 @@ char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
|
||||
}
|
||||
|
||||
// The memory is committed
|
||||
address pc = CALLER_PC;
|
||||
MemTracker::record_virtual_memory_reserve((address)addr, bytes, pc);
|
||||
MemTracker::record_virtual_memory_commit((address)addr, bytes, pc);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
bool os::release_memory_special(char* base, size_t bytes) {
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
// detaching the SHM segment will also delete it, see reserve_memory_special()
|
||||
int rslt = shmdt(base);
|
||||
if (rslt == 0) {
|
||||
MemTracker::record_virtual_memory_uncommit((address)base, bytes);
|
||||
MemTracker::record_virtual_memory_release((address)base, bytes);
|
||||
tkr.record((address)base, bytes);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
tkr.discard();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -4393,7 +4484,7 @@ jint os::init_2(void)
|
||||
|
||||
if (!UseMembar) {
|
||||
address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
|
||||
guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
|
||||
os::set_memory_serialize_page( mem_serialize_page );
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
@ -76,6 +76,10 @@ class Linux {
|
||||
static julong physical_memory() { return _physical_memory; }
|
||||
static void initialize_system_info();
|
||||
|
||||
static int commit_memory_impl(char* addr, size_t bytes, bool exec);
|
||||
static int commit_memory_impl(char* addr, size_t bytes,
|
||||
size_t alignment_hint, bool exec);
|
||||
|
||||
static void set_glibc_version(const char *s) { _glibc_version = s; }
|
||||
static void set_libpthread_version(const char *s) { _libpthread_version = s; }
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -60,7 +60,7 @@ static char* create_standard_memory(size_t size) {
|
||||
}
|
||||
|
||||
// commit memory
|
||||
if (!os::commit_memory(mapAddress, size)) {
|
||||
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("Could not commit PerfData memory\n");
|
||||
}
|
||||
@ -120,7 +120,7 @@ static void save_memory_to_file(char* addr, size_t size) {
|
||||
addr += result;
|
||||
}
|
||||
|
||||
RESTARTABLE(::close(fd), result);
|
||||
result = ::close(fd);
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
if (result == OS_ERR) {
|
||||
warning("Could not close %s: %s\n", destfile, strerror(errno));
|
||||
@ -632,7 +632,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("could not set shared memory file size: %s\n", strerror(errno));
|
||||
}
|
||||
RESTARTABLE(::close(fd), result);
|
||||
::close(fd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -656,7 +656,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
|
||||
if (result != -1) {
|
||||
return fd;
|
||||
} else {
|
||||
RESTARTABLE(::close(fd), result);
|
||||
::close(fd);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -734,9 +734,7 @@ static char* mmap_create_shared(size_t size) {
|
||||
|
||||
mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
|
||||
// attempt to close the file - restart it if it was interrupted,
|
||||
// but ignore other failures
|
||||
RESTARTABLE(::close(fd), result);
|
||||
result = ::close(fd);
|
||||
assert(result != OS_ERR, "could not close file");
|
||||
|
||||
if (mapAddress == MAP_FAILED) {
|
||||
@ -755,8 +753,7 @@ static char* mmap_create_shared(size_t size) {
|
||||
(void)::memset((void*) mapAddress, 0, size);
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
|
||||
return mapAddress;
|
||||
}
|
||||
@ -907,9 +904,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
|
||||
mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0);
|
||||
|
||||
// attempt to close the file - restart if it gets interrupted,
|
||||
// but ignore other failures
|
||||
RESTARTABLE(::close(fd), result);
|
||||
result = ::close(fd);
|
||||
assert(result != OS_ERR, "could not close file");
|
||||
|
||||
if (mapAddress == MAP_FAILED) {
|
||||
@ -921,8 +916,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
}
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
|
||||
*addr = mapAddress;
|
||||
*sizep = size;
|
||||
|
@ -122,9 +122,7 @@ static int file_open(const char* path, int flag) {
|
||||
}
|
||||
|
||||
static int file_close(int fd) {
|
||||
int ret;
|
||||
RESTARTABLE(close(fd), ret);
|
||||
return ret;
|
||||
return close(fd);
|
||||
}
|
||||
|
||||
static int file_read(int fd, char* buf, int len) {
|
||||
|
@ -392,7 +392,7 @@ int SolarisAttachListener::create_door() {
|
||||
return -1;
|
||||
}
|
||||
assert(fd >= 0, "bad file descriptor");
|
||||
RESTARTABLE(::close(fd), res);
|
||||
::close(fd);
|
||||
|
||||
// attach the door descriptor to the file
|
||||
if ((res = ::fattach(dd, initial_path)) == -1) {
|
||||
@ -410,7 +410,7 @@ int SolarisAttachListener::create_door() {
|
||||
// rename file so that clients can attach
|
||||
if (dd >= 0) {
|
||||
if (::rename(initial_path, door_path) == -1) {
|
||||
RESTARTABLE(::close(dd), res);
|
||||
::close(dd);
|
||||
::fdetach(initial_path);
|
||||
dd = -1;
|
||||
}
|
||||
@ -549,7 +549,7 @@ void SolarisAttachOperation::complete(jint res, bufferedStream* st) {
|
||||
}
|
||||
|
||||
// close socket and we're done
|
||||
RESTARTABLE(::close(this->socket()), rc);
|
||||
::close(this->socket());
|
||||
|
||||
// were we externally suspended while we were waiting?
|
||||
thread->check_and_wait_while_suspended();
|
||||
|
@ -2784,7 +2784,42 @@ int os::vm_allocation_granularity() {
|
||||
return page_size;
|
||||
}
|
||||
|
||||
bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
|
||||
static bool recoverable_mmap_error(int err) {
|
||||
// See if the error is one we can let the caller handle. This
|
||||
// list of errno values comes from the Solaris mmap(2) man page.
|
||||
switch (err) {
|
||||
case EBADF:
|
||||
case EINVAL:
|
||||
case ENOTSUP:
|
||||
// let the caller deal with these errors
|
||||
return true;
|
||||
|
||||
default:
|
||||
// Any remaining errors on this OS can cause our reserved mapping
|
||||
// to be lost. That can cause confusion where different data
|
||||
// structures think they have the same memory mapped. The worst
|
||||
// scenario is if both the VM and a library think they have the
|
||||
// same memory mapped.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
|
||||
int err) {
|
||||
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
|
||||
", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
|
||||
strerror(err), err);
|
||||
}
|
||||
|
||||
static void warn_fail_commit_memory(char* addr, size_t bytes,
|
||||
size_t alignment_hint, bool exec,
|
||||
int err) {
|
||||
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
|
||||
", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
|
||||
alignment_hint, exec, strerror(err), err);
|
||||
}
|
||||
|
||||
int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
|
||||
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
||||
size_t size = bytes;
|
||||
char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
|
||||
@ -2792,14 +2827,38 @@ bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
|
||||
if (UseNUMAInterleaving) {
|
||||
numa_make_global(addr, bytes);
|
||||
}
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
return false;
|
||||
|
||||
int err = errno; // save errno from mmap() call in mmap_chunk()
|
||||
|
||||
if (!recoverable_mmap_error(err)) {
|
||||
warn_fail_commit_memory(addr, bytes, exec, err);
|
||||
vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
|
||||
bool exec) {
|
||||
if (commit_memory(addr, bytes, exec)) {
|
||||
bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
|
||||
return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
|
||||
}
|
||||
|
||||
void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
|
||||
const char* mesg) {
|
||||
assert(mesg != NULL, "mesg must be specified");
|
||||
int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
|
||||
if (err != 0) {
|
||||
// the caller wants all commit errors to exit with the specified mesg:
|
||||
warn_fail_commit_memory(addr, bytes, exec, err);
|
||||
vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
|
||||
}
|
||||
}
|
||||
|
||||
int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
|
||||
size_t alignment_hint, bool exec) {
|
||||
int err = Solaris::commit_memory_impl(addr, bytes, exec);
|
||||
if (err == 0) {
|
||||
if (UseMPSS && alignment_hint > (size_t)vm_page_size()) {
|
||||
// If the large page size has been set and the VM
|
||||
// is using large pages, use the large page size
|
||||
@ -2821,9 +2880,25 @@ bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
|
||||
// Since this is a hint, ignore any failures.
|
||||
(void)Solaris::set_mpss_range(addr, bytes, page_size);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return err;
|
||||
}
|
||||
|
||||
bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
|
||||
bool exec) {
|
||||
return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
|
||||
}
|
||||
|
||||
void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
|
||||
size_t alignment_hint, bool exec,
|
||||
const char* mesg) {
|
||||
assert(mesg != NULL, "mesg must be specified");
|
||||
int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
|
||||
if (err != 0) {
|
||||
// the caller wants all commit errors to exit with the specified mesg:
|
||||
warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
|
||||
vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
|
||||
}
|
||||
}
|
||||
|
||||
// Uncommit the pages in a specified region.
|
||||
@ -2835,7 +2910,7 @@ void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
|
||||
}
|
||||
|
||||
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
|
||||
return os::commit_memory(addr, size);
|
||||
return os::commit_memory(addr, size, !ExecMem);
|
||||
}
|
||||
|
||||
bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
@ -3457,22 +3532,21 @@ char* os::reserve_memory_special(size_t size, char* addr, bool exec) {
|
||||
}
|
||||
|
||||
// The memory is committed
|
||||
address pc = CALLER_PC;
|
||||
MemTracker::record_virtual_memory_reserve((address)retAddr, size, pc);
|
||||
MemTracker::record_virtual_memory_commit((address)retAddr, size, pc);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)retAddr, size, mtNone, CURRENT_PC);
|
||||
|
||||
return retAddr;
|
||||
}
|
||||
|
||||
bool os::release_memory_special(char* base, size_t bytes) {
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
// detaching the SHM segment will also delete it, see reserve_memory_special()
|
||||
int rslt = shmdt(base);
|
||||
if (rslt == 0) {
|
||||
MemTracker::record_virtual_memory_uncommit((address)base, bytes);
|
||||
MemTracker::record_virtual_memory_release((address)base, bytes);
|
||||
tkr.record((address)base, bytes);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
tkr.discard();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -6604,11 +6678,11 @@ size_t os::write(int fd, const void *buf, unsigned int nBytes) {
|
||||
}
|
||||
|
||||
int os::close(int fd) {
|
||||
RESTARTABLE_RETURN_INT(::close(fd));
|
||||
return ::close(fd);
|
||||
}
|
||||
|
||||
int os::socket_close(int fd) {
|
||||
RESTARTABLE_RETURN_INT(::close(fd));
|
||||
return ::close(fd);
|
||||
}
|
||||
|
||||
int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
|
||||
|
@ -168,6 +168,9 @@ class Solaris {
|
||||
static int _dev_zero_fd;
|
||||
static int get_dev_zero_fd() { return _dev_zero_fd; }
|
||||
static void set_dev_zero_fd(int fd) { _dev_zero_fd = fd; }
|
||||
static int commit_memory_impl(char* addr, size_t bytes, bool exec);
|
||||
static int commit_memory_impl(char* addr, size_t bytes,
|
||||
size_t alignment_hint, bool exec);
|
||||
static char* mmap_chunk(char *addr, size_t size, int flags, int prot);
|
||||
static char* anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed);
|
||||
static bool mpss_sanity_check(bool warn, size_t * page_size);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -62,7 +62,7 @@ static char* create_standard_memory(size_t size) {
|
||||
}
|
||||
|
||||
// commit memory
|
||||
if (!os::commit_memory(mapAddress, size)) {
|
||||
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("Could not commit PerfData memory\n");
|
||||
}
|
||||
@ -122,7 +122,7 @@ static void save_memory_to_file(char* addr, size_t size) {
|
||||
addr += result;
|
||||
}
|
||||
|
||||
RESTARTABLE(::close(fd), result);
|
||||
result = ::close(fd);
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
if (result == OS_ERR) {
|
||||
warning("Could not close %s: %s\n", destfile, strerror(errno));
|
||||
@ -437,7 +437,7 @@ static char* get_user_name(int vmid, TRAPS) {
|
||||
addr+=result;
|
||||
}
|
||||
|
||||
RESTARTABLE(::close(fd), result);
|
||||
::close(fd);
|
||||
|
||||
// get the user name for the effective user id of the process
|
||||
char* user_name = get_user_name(psinfo.pr_euid);
|
||||
@ -669,7 +669,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("could not set shared memory file size: %s\n", strerror(errno));
|
||||
}
|
||||
RESTARTABLE(::close(fd), result);
|
||||
::close(fd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -749,9 +749,7 @@ static char* mmap_create_shared(size_t size) {
|
||||
|
||||
mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
|
||||
// attempt to close the file - restart it if it was interrupted,
|
||||
// but ignore other failures
|
||||
RESTARTABLE(::close(fd), result);
|
||||
result = ::close(fd);
|
||||
assert(result != OS_ERR, "could not close file");
|
||||
|
||||
if (mapAddress == MAP_FAILED) {
|
||||
@ -770,8 +768,7 @@ static char* mmap_create_shared(size_t size) {
|
||||
(void)::memset((void*) mapAddress, 0, size);
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
|
||||
return mapAddress;
|
||||
}
|
||||
@ -922,9 +919,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
|
||||
mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0);
|
||||
|
||||
// attempt to close the file - restart if it gets interrupted,
|
||||
// but ignore other failures
|
||||
RESTARTABLE(::close(fd), result);
|
||||
result = ::close(fd);
|
||||
assert(result != OS_ERR, "could not close file");
|
||||
|
||||
if (mapAddress == MAP_FAILED) {
|
||||
@ -936,8 +931,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
}
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
|
||||
*addr = mapAddress;
|
||||
*sizep = size;
|
||||
|
@ -2524,7 +2524,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
addr = (address)((uintptr_t)addr &
|
||||
(~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
|
||||
os::commit_memory((char *)addr, thread->stack_base() - addr,
|
||||
false );
|
||||
!ExecMem);
|
||||
return EXCEPTION_CONTINUE_EXECUTION;
|
||||
}
|
||||
else
|
||||
@ -2875,7 +2875,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
||||
PAGE_READWRITE);
|
||||
// If reservation failed, return NULL
|
||||
if (p_buf == NULL) return NULL;
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
|
||||
os::release_memory(p_buf, bytes + chunk_size);
|
||||
|
||||
// we still need to round up to a page boundary (in case we are using large pages)
|
||||
@ -2941,7 +2941,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
||||
// need to create a dummy 'reserve' record to match
|
||||
// the release.
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf,
|
||||
bytes_to_release, CALLER_PC);
|
||||
bytes_to_release, mtNone, CALLER_PC);
|
||||
os::release_memory(p_buf, bytes_to_release);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
@ -2961,9 +2961,10 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
||||
// Although the memory is allocated individually, it is returned as one.
|
||||
// NMT records it as one block.
|
||||
address pc = CALLER_PC;
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, pc);
|
||||
if ((flags & MEM_COMMIT) != 0) {
|
||||
MemTracker::record_virtual_memory_commit((address)p_buf, bytes, pc);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
|
||||
} else {
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
|
||||
}
|
||||
|
||||
// made it this far, success
|
||||
@ -3154,8 +3155,7 @@ char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
|
||||
char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
|
||||
if (res != NULL) {
|
||||
address pc = CALLER_PC;
|
||||
MemTracker::record_virtual_memory_reserve((address)res, bytes, pc);
|
||||
MemTracker::record_virtual_memory_commit((address)res, bytes, pc);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
|
||||
}
|
||||
|
||||
return res;
|
||||
@ -3164,14 +3164,21 @@ char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
|
||||
|
||||
bool os::release_memory_special(char* base, size_t bytes) {
|
||||
assert(base != NULL, "Sanity check");
|
||||
// Memory allocated via reserve_memory_special() is committed
|
||||
MemTracker::record_virtual_memory_uncommit((address)base, bytes);
|
||||
return release_memory(base, bytes);
|
||||
}
|
||||
|
||||
void os::print_statistics() {
|
||||
}
|
||||
|
||||
static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
|
||||
int err = os::get_last_error();
|
||||
char buf[256];
|
||||
size_t buf_len = os::lasterror(buf, sizeof(buf));
|
||||
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
|
||||
", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
|
||||
exec, buf_len != 0 ? buf : "<no_error_string>", err);
|
||||
}
|
||||
|
||||
bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
|
||||
if (bytes == 0) {
|
||||
// Don't bother the OS with noops.
|
||||
@ -3186,11 +3193,17 @@ bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
|
||||
// is always within a reserve covered by a single VirtualAlloc
|
||||
// in that case we can just do a single commit for the requested size
|
||||
if (!UseNUMAInterleaving) {
|
||||
if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) return false;
|
||||
if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
|
||||
NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
|
||||
return false;
|
||||
}
|
||||
if (exec) {
|
||||
DWORD oldprot;
|
||||
// Windows doc says to use VirtualProtect to get execute permissions
|
||||
if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) return false;
|
||||
if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
|
||||
NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
@ -3205,12 +3218,20 @@ bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
|
||||
MEMORY_BASIC_INFORMATION alloc_info;
|
||||
VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
|
||||
size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
|
||||
if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, PAGE_READWRITE) == NULL)
|
||||
if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
|
||||
PAGE_READWRITE) == NULL) {
|
||||
NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
|
||||
exec);)
|
||||
return false;
|
||||
}
|
||||
if (exec) {
|
||||
DWORD oldprot;
|
||||
if (!VirtualProtect(next_alloc_addr, bytes_to_rq, PAGE_EXECUTE_READWRITE, &oldprot))
|
||||
if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
|
||||
PAGE_EXECUTE_READWRITE, &oldprot)) {
|
||||
NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
|
||||
exec);)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
bytes_remaining -= bytes_to_rq;
|
||||
next_alloc_addr += bytes_to_rq;
|
||||
@ -3222,7 +3243,24 @@ bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
|
||||
|
||||
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
bool exec) {
|
||||
return commit_memory(addr, size, exec);
|
||||
// alignment_hint is ignored on this OS
|
||||
return pd_commit_memory(addr, size, exec);
|
||||
}
|
||||
|
||||
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
|
||||
const char* mesg) {
|
||||
assert(mesg != NULL, "mesg must be specified");
|
||||
if (!pd_commit_memory(addr, size, exec)) {
|
||||
warn_fail_commit_memory(addr, size, exec);
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
|
||||
}
|
||||
}
|
||||
|
||||
void os::pd_commit_memory_or_exit(char* addr, size_t size,
|
||||
size_t alignment_hint, bool exec,
|
||||
const char* mesg) {
|
||||
// alignment_hint is ignored on this OS
|
||||
pd_commit_memory_or_exit(addr, size, exec, mesg);
|
||||
}
|
||||
|
||||
bool os::pd_uncommit_memory(char* addr, size_t bytes) {
|
||||
@ -3240,7 +3278,7 @@ bool os::pd_release_memory(char* addr, size_t bytes) {
|
||||
}
|
||||
|
||||
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
|
||||
return os::commit_memory(addr, size);
|
||||
return os::commit_memory(addr, size, !ExecMem);
|
||||
}
|
||||
|
||||
bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
@ -3264,8 +3302,9 @@ bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
|
||||
|
||||
// Strange enough, but on Win32 one can change protection only for committed
|
||||
// memory, not a big deal anyway, as bytes less or equal than 64K
|
||||
if (!is_committed && !commit_memory(addr, bytes, prot == MEM_PROT_RWX)) {
|
||||
fatal("cannot commit protection page");
|
||||
if (!is_committed) {
|
||||
commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
|
||||
"cannot commit protection page");
|
||||
}
|
||||
// One cannot use os::guard_memory() here, as on Win32 guard page
|
||||
// have different (one-shot) semantics, from MSDN on PAGE_GUARD:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -58,7 +58,7 @@ static char* create_standard_memory(size_t size) {
|
||||
}
|
||||
|
||||
// commit memory
|
||||
if (!os::commit_memory(mapAddress, size)) {
|
||||
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("Could not commit PerfData memory\n");
|
||||
}
|
||||
@ -1498,8 +1498,7 @@ static char* mapping_create_shared(size_t size) {
|
||||
(void)memset(mapAddress, '\0', size);
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
|
||||
return (char*) mapAddress;
|
||||
}
|
||||
@ -1681,8 +1680,7 @@ static void open_file_mapping(const char* user, int vmid,
|
||||
}
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
|
||||
|
||||
*addrp = (char*)mapAddress;
|
||||
@ -1836,9 +1834,10 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
|
||||
return;
|
||||
}
|
||||
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
remove_file_mapping(addr);
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_release((address)addr, bytes);
|
||||
tkr.record((address)addr, bytes);
|
||||
}
|
||||
|
||||
char* PerfMemory::backing_store_filename() {
|
||||
|
@ -1,47 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/threadLocalStorage.hpp"
|
||||
|
||||
#include <asm-sparc/traps.h>
|
||||
|
||||
void MacroAssembler::read_ccr_trap(Register ccr_save) {
|
||||
// No implementation
|
||||
breakpoint_trap();
|
||||
}
|
||||
|
||||
void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) {
|
||||
// No implementation
|
||||
breakpoint_trap();
|
||||
}
|
||||
|
||||
void MacroAssembler::flush_windows_trap() { trap(SP_TRAP_FWIN); }
|
||||
void MacroAssembler::clean_windows_trap() { trap(SP_TRAP_CWIN); }
|
||||
|
||||
// Use software breakpoint trap until we figure out how to do this on Linux
|
||||
void MacroAssembler::get_psr_trap() { trap(SP_TRAP_SBPT); }
|
||||
void MacroAssembler::set_psr_trap() { trap(SP_TRAP_SBPT); }
|
@ -169,7 +169,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong*
|
||||
: "memory");
|
||||
return rv;
|
||||
#else
|
||||
assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
|
||||
volatile jlong_accessor evl, cvl, rv;
|
||||
evl.long_value = exchange_value;
|
||||
cvl.long_value = compare_value;
|
||||
|
@ -1,61 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/threadLocalStorage.hpp"
|
||||
|
||||
#include <sys/trap.h> // For trap numbers
|
||||
#include <v9/sys/psr_compat.h> // For V8 compatibility
|
||||
|
||||
void MacroAssembler::read_ccr_trap(Register ccr_save) {
|
||||
// Execute a trap to get the PSR, mask and shift
|
||||
// to get the condition codes.
|
||||
get_psr_trap();
|
||||
nop();
|
||||
set(PSR_ICC, ccr_save);
|
||||
and3(O0, ccr_save, ccr_save);
|
||||
srl(ccr_save, PSR_ICC_SHIFT, ccr_save);
|
||||
}
|
||||
|
||||
void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) {
|
||||
// Execute a trap to get the PSR, shift back
|
||||
// the condition codes, mask the condition codes
|
||||
// back into and PSR and trap to write back the
|
||||
// PSR.
|
||||
sll(ccr_save, PSR_ICC_SHIFT, scratch2);
|
||||
get_psr_trap();
|
||||
nop();
|
||||
set(~PSR_ICC, scratch1);
|
||||
and3(O0, scratch1, O0);
|
||||
or3(O0, scratch2, O0);
|
||||
set_psr_trap();
|
||||
nop();
|
||||
}
|
||||
|
||||
void MacroAssembler::flush_windows_trap() { trap(ST_FLUSH_WINDOWS); }
|
||||
void MacroAssembler::clean_windows_trap() { trap(ST_CLEAN_WINDOWS); }
|
||||
void MacroAssembler::get_psr_trap() { trap(ST_GETPSR); }
|
||||
void MacroAssembler::set_psr_trap() { trap(ST_SETPSR); }
|
@ -60,21 +60,10 @@ inline jlong Atomic::load(volatile jlong* src) { return *src; }
|
||||
|
||||
#else
|
||||
|
||||
extern "C" void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst);
|
||||
extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
|
||||
|
||||
inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
|
||||
#ifdef COMPILER2
|
||||
// Compiler2 does not support v8, it is used only for v9.
|
||||
_Atomic_move_long_v9(src, dst);
|
||||
#else
|
||||
// The branch is cheaper then emulated LDD.
|
||||
if (VM_Version::v9_instructions_work()) {
|
||||
_Atomic_move_long_v9(src, dst);
|
||||
} else {
|
||||
_Atomic_move_long_v8(src, dst);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
inline jlong Atomic::load(volatile jlong* src) {
|
||||
@ -209,7 +198,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong*
|
||||
: "memory");
|
||||
return rv;
|
||||
#else //_LP64
|
||||
assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
|
||||
volatile jlong_accessor evl, cvl, rv;
|
||||
evl.long_value = exchange_value;
|
||||
cvl.long_value = compare_value;
|
||||
@ -318,7 +306,6 @@ inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong*
|
||||
// Return 64 bit value in %o0
|
||||
return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value);
|
||||
#else // _LP64
|
||||
assert (VM_Version::v9_instructions_work(), "only supported on v9");
|
||||
// Return 64 bit value in %o0,%o1 by hand
|
||||
return _Atomic_casl(exchange_value, dest, compare_value);
|
||||
#endif // _LP64
|
||||
|
@ -152,23 +152,6 @@
|
||||
.nonvolatile
|
||||
.end
|
||||
|
||||
// Support for jlong Atomic::load and Atomic::store on v8.
|
||||
//
|
||||
// void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst)
|
||||
//
|
||||
// Arguments:
|
||||
// src: O0
|
||||
// dest: O1
|
||||
//
|
||||
// Overwrites O2 and O3
|
||||
|
||||
.inline _Atomic_move_long_v8,2
|
||||
.volatile
|
||||
ldd [%o0], %o2
|
||||
std %o2, [%o1]
|
||||
.nonvolatile
|
||||
.end
|
||||
|
||||
// Support for jlong Atomic::load and Atomic::store on v9.
|
||||
//
|
||||
// void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)
|
||||
|
@ -235,6 +235,9 @@ bool InstructForm::is_parm(FormDict &globals) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool InstructForm::is_ideal_negD() const {
|
||||
return (_matrule && _matrule->_rChild && strcmp(_matrule->_rChild->_opType, "NegD") == 0);
|
||||
}
|
||||
|
||||
// Return 'true' if this instruction matches an ideal 'Copy*' node
|
||||
int InstructForm::is_ideal_copy() const {
|
||||
@ -533,6 +536,12 @@ bool InstructForm::rematerialize(FormDict &globals, RegisterForm *registers ) {
|
||||
if( data_type != Form::none )
|
||||
rematerialize = true;
|
||||
|
||||
// Ugly: until a better fix is implemented, disable rematerialization for
|
||||
// negD nodes because they are proved to be problematic.
|
||||
if (is_ideal_negD()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Constants
|
||||
if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) )
|
||||
rematerialize = true;
|
||||
|
@ -147,6 +147,7 @@ public:
|
||||
virtual int is_empty_encoding() const; // _size=0 and/or _insencode empty
|
||||
virtual int is_tls_instruction() const; // tlsLoadP rule or ideal ThreadLocal
|
||||
virtual int is_ideal_copy() const; // node matches ideal 'Copy*'
|
||||
virtual bool is_ideal_negD() const; // node matches ideal 'NegD'
|
||||
virtual bool is_ideal_if() const; // node matches ideal 'If'
|
||||
virtual bool is_ideal_fastlock() const; // node matches 'FastLock'
|
||||
virtual bool is_ideal_membar() const; // node matches ideal 'MemBarXXX'
|
||||
|
@ -506,7 +506,7 @@ ComputeLinearScanOrder::ComputeLinearScanOrder(Compilation* c, BlockBegin* start
|
||||
_loop_map(0, 0), // initialized later with correct size
|
||||
_compilation(c)
|
||||
{
|
||||
TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order");
|
||||
TRACE_LINEAR_SCAN(2, tty->print_cr("***** computing linear-scan block order"));
|
||||
|
||||
init_visited();
|
||||
count_edges(start_block, NULL);
|
||||
@ -683,7 +683,7 @@ void ComputeLinearScanOrder::clear_non_natural_loops(BlockBegin* start_block) {
|
||||
}
|
||||
|
||||
void ComputeLinearScanOrder::assign_loop_depth(BlockBegin* start_block) {
|
||||
TRACE_LINEAR_SCAN(3, "----- computing loop-depth and weight");
|
||||
TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing loop-depth and weight"));
|
||||
init_visited();
|
||||
|
||||
assert(_work_list.is_empty(), "work list must be empty before processing");
|
||||
@ -868,7 +868,7 @@ void ComputeLinearScanOrder::append_block(BlockBegin* cur) {
|
||||
}
|
||||
|
||||
void ComputeLinearScanOrder::compute_order(BlockBegin* start_block) {
|
||||
TRACE_LINEAR_SCAN(3, "----- computing final block order");
|
||||
TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing final block order"));
|
||||
|
||||
// the start block is always the first block in the linear scan order
|
||||
_linear_scan_order = new BlockList(_num_blocks);
|
||||
|
@ -201,23 +201,24 @@ void LIR_OprDesc::validate_type() const {
|
||||
|
||||
#ifdef ASSERT
|
||||
if (!is_pointer() && !is_illegal()) {
|
||||
OprKind kindfield = kind_field(); // Factored out because of compiler bug, see 8002160
|
||||
switch (as_BasicType(type_field())) {
|
||||
case T_LONG:
|
||||
assert((kind_field() == cpu_register || kind_field() == stack_value) &&
|
||||
assert((kindfield == cpu_register || kindfield == stack_value) &&
|
||||
size_field() == double_size, "must match");
|
||||
break;
|
||||
case T_FLOAT:
|
||||
// FP return values can be also in CPU registers on ARM and PPC (softfp ABI)
|
||||
assert((kind_field() == fpu_register || kind_field() == stack_value
|
||||
ARM_ONLY(|| kind_field() == cpu_register)
|
||||
PPC_ONLY(|| kind_field() == cpu_register) ) &&
|
||||
assert((kindfield == fpu_register || kindfield == stack_value
|
||||
ARM_ONLY(|| kindfield == cpu_register)
|
||||
PPC_ONLY(|| kindfield == cpu_register) ) &&
|
||||
size_field() == single_size, "must match");
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
// FP return values can be also in CPU registers on ARM and PPC (softfp ABI)
|
||||
assert((kind_field() == fpu_register || kind_field() == stack_value
|
||||
ARM_ONLY(|| kind_field() == cpu_register)
|
||||
PPC_ONLY(|| kind_field() == cpu_register) ) &&
|
||||
assert((kindfield == fpu_register || kindfield == stack_value
|
||||
ARM_ONLY(|| kindfield == cpu_register)
|
||||
PPC_ONLY(|| kindfield == cpu_register) ) &&
|
||||
size_field() == double_size, "must match");
|
||||
break;
|
||||
case T_BOOLEAN:
|
||||
@ -229,7 +230,7 @@ void LIR_OprDesc::validate_type() const {
|
||||
case T_OBJECT:
|
||||
case T_METADATA:
|
||||
case T_ARRAY:
|
||||
assert((kind_field() == cpu_register || kind_field() == stack_value) &&
|
||||
assert((kindfield == cpu_register || kindfield == stack_value) &&
|
||||
size_field() == single_size, "must match");
|
||||
break;
|
||||
|
||||
|
@ -96,7 +96,7 @@
|
||||
CLEAR_PENDING_EXCEPTION; \
|
||||
return (result); \
|
||||
} \
|
||||
(0
|
||||
(void)(0
|
||||
|
||||
#define KILL_COMPILE_ON_ANY \
|
||||
THREAD); \
|
||||
@ -104,7 +104,7 @@
|
||||
fatal("unhandled ci exception"); \
|
||||
CLEAR_PENDING_EXCEPTION; \
|
||||
} \
|
||||
(0
|
||||
(void)(0
|
||||
|
||||
|
||||
inline const char* bool_to_str(bool b) {
|
||||
|
@ -124,7 +124,7 @@ class DescriptorStream : public ResourceObj {
|
||||
fatal(STREAM->parse_error()); \
|
||||
} \
|
||||
return NULL; \
|
||||
} 0
|
||||
} (void)0
|
||||
|
||||
#define READ() STREAM->read(); CHECK_FOR_PARSE_ERROR()
|
||||
#define PEEK() STREAM->peek(); CHECK_FOR_PARSE_ERROR()
|
||||
@ -133,7 +133,7 @@ class DescriptorStream : public ResourceObj {
|
||||
#define EXPECTED(c, ch) STREAM->assert_char(c, ch); CHECK_FOR_PARSE_ERROR()
|
||||
#define EXPECT_END() STREAM->expect_end(); CHECK_FOR_PARSE_ERROR()
|
||||
|
||||
#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); (0
|
||||
#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); ((void)0
|
||||
|
||||
#ifndef PRODUCT
|
||||
void Identifier::print_on(outputStream* str) const {
|
||||
|
@ -2825,6 +2825,7 @@ void java_lang_invoke_CallSite::compute_offsets() {
|
||||
int java_security_AccessControlContext::_context_offset = 0;
|
||||
int java_security_AccessControlContext::_privilegedContext_offset = 0;
|
||||
int java_security_AccessControlContext::_isPrivileged_offset = 0;
|
||||
int java_security_AccessControlContext::_isAuthorized_offset = -1;
|
||||
|
||||
void java_security_AccessControlContext::compute_offsets() {
|
||||
assert(_isPrivileged_offset == 0, "offsets should be initialized only once");
|
||||
@ -2845,9 +2846,20 @@ void java_security_AccessControlContext::compute_offsets() {
|
||||
fatal("Invalid layout of java.security.AccessControlContext");
|
||||
}
|
||||
_isPrivileged_offset = fd.offset();
|
||||
|
||||
// The offset may not be present for bootstrapping with older JDK.
|
||||
if (ik->find_local_field(vmSymbols::isAuthorized_name(), vmSymbols::bool_signature(), &fd)) {
|
||||
_isAuthorized_offset = fd.offset();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool java_security_AccessControlContext::is_authorized(Handle context) {
|
||||
assert(context.not_null() && context->klass() == SystemDictionary::AccessControlContext_klass(), "Invalid type");
|
||||
assert(_isAuthorized_offset != -1, "should be set");
|
||||
return context->bool_field(_isAuthorized_offset) != 0;
|
||||
}
|
||||
|
||||
oop java_security_AccessControlContext::create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS) {
|
||||
assert(_isPrivileged_offset != 0, "offsets should have been initialized");
|
||||
// Ensure klass is initialized
|
||||
@ -2858,6 +2870,10 @@ oop java_security_AccessControlContext::create(objArrayHandle context, bool isPr
|
||||
result->obj_field_put(_context_offset, context());
|
||||
result->obj_field_put(_privilegedContext_offset, privileged_context());
|
||||
result->bool_field_put(_isPrivileged_offset, isPrivileged);
|
||||
// whitelist AccessControlContexts created by the JVM if present
|
||||
if (_isAuthorized_offset != -1) {
|
||||
result->bool_field_put(_isAuthorized_offset, true);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -2967,6 +2983,15 @@ int java_lang_System::err_offset_in_bytes() {
|
||||
}
|
||||
|
||||
|
||||
bool java_lang_System::has_security_manager() {
|
||||
InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::System_klass());
|
||||
address addr = ik->static_field_addr(static_security_offset);
|
||||
if (UseCompressedOops) {
|
||||
return oopDesc::load_decode_heap_oop((narrowOop *)addr) != NULL;
|
||||
} else {
|
||||
return oopDesc::load_decode_heap_oop((oop*)addr) != NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int java_lang_Class::_klass_offset;
|
||||
int java_lang_Class::_array_klass_offset;
|
||||
@ -3030,6 +3055,7 @@ int java_lang_ClassLoader::parent_offset;
|
||||
int java_lang_System::static_in_offset;
|
||||
int java_lang_System::static_out_offset;
|
||||
int java_lang_System::static_err_offset;
|
||||
int java_lang_System::static_security_offset;
|
||||
int java_lang_StackTraceElement::declaringClass_offset;
|
||||
int java_lang_StackTraceElement::methodName_offset;
|
||||
int java_lang_StackTraceElement::fileName_offset;
|
||||
@ -3155,6 +3181,7 @@ void JavaClasses::compute_hard_coded_offsets() {
|
||||
java_lang_System::static_in_offset = java_lang_System::hc_static_in_offset * x;
|
||||
java_lang_System::static_out_offset = java_lang_System::hc_static_out_offset * x;
|
||||
java_lang_System::static_err_offset = java_lang_System::hc_static_err_offset * x;
|
||||
java_lang_System::static_security_offset = java_lang_System::hc_static_security_offset * x;
|
||||
|
||||
// java_lang_StackTraceElement
|
||||
java_lang_StackTraceElement::declaringClass_offset = java_lang_StackTraceElement::hc_declaringClass_offset * x + header;
|
||||
@ -3354,6 +3381,7 @@ void JavaClasses::check_offsets() {
|
||||
CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, in, "Ljava/io/InputStream;");
|
||||
CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, out, "Ljava/io/PrintStream;");
|
||||
CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, err, "Ljava/io/PrintStream;");
|
||||
CHECK_STATIC_OFFSET("java/lang/System", java_lang_System, security, "Ljava/lang/SecurityManager;");
|
||||
|
||||
// java.lang.StackTraceElement
|
||||
|
||||
|
@ -1167,11 +1167,14 @@ class java_security_AccessControlContext: AllStatic {
|
||||
static int _context_offset;
|
||||
static int _privilegedContext_offset;
|
||||
static int _isPrivileged_offset;
|
||||
static int _isAuthorized_offset;
|
||||
|
||||
static void compute_offsets();
|
||||
public:
|
||||
static oop create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS);
|
||||
|
||||
static bool is_authorized(Handle context);
|
||||
|
||||
// Debugging/initialization
|
||||
friend class JavaClasses;
|
||||
};
|
||||
@ -1231,18 +1234,22 @@ class java_lang_System : AllStatic {
|
||||
enum {
|
||||
hc_static_in_offset = 0,
|
||||
hc_static_out_offset = 1,
|
||||
hc_static_err_offset = 2
|
||||
hc_static_err_offset = 2,
|
||||
hc_static_security_offset = 3
|
||||
};
|
||||
|
||||
static int static_in_offset;
|
||||
static int static_out_offset;
|
||||
static int static_err_offset;
|
||||
static int static_security_offset;
|
||||
|
||||
public:
|
||||
static int in_offset_in_bytes();
|
||||
static int out_offset_in_bytes();
|
||||
static int err_offset_in_bytes();
|
||||
|
||||
static bool has_security_manager();
|
||||
|
||||
// Debugging
|
||||
friend class JavaClasses;
|
||||
};
|
||||
|
@ -598,6 +598,8 @@ StringTable* StringTable::_the_table = NULL;
|
||||
|
||||
bool StringTable::_needs_rehashing = false;
|
||||
|
||||
volatile int StringTable::_parallel_claimed_idx = 0;
|
||||
|
||||
// Pick hashing algorithm
|
||||
unsigned int StringTable::hash_string(const jchar* s, int len) {
|
||||
return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) :
|
||||
@ -761,8 +763,18 @@ void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f)
|
||||
}
|
||||
}
|
||||
|
||||
void StringTable::oops_do(OopClosure* f) {
|
||||
for (int i = 0; i < the_table()->table_size(); ++i) {
|
||||
void StringTable::buckets_do(OopClosure* f, int start_idx, int end_idx) {
|
||||
const int limit = the_table()->table_size();
|
||||
|
||||
assert(0 <= start_idx && start_idx <= limit,
|
||||
err_msg("start_idx (" INT32_FORMAT ") oob?", start_idx));
|
||||
assert(0 <= end_idx && end_idx <= limit,
|
||||
err_msg("end_idx (" INT32_FORMAT ") oob?", end_idx));
|
||||
assert(start_idx <= end_idx,
|
||||
err_msg("Ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
|
||||
start_idx, end_idx));
|
||||
|
||||
for (int i = start_idx; i < end_idx; i += 1) {
|
||||
HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
|
||||
while (entry != NULL) {
|
||||
assert(!entry->is_shared(), "CDS not used for the StringTable");
|
||||
@ -774,6 +786,27 @@ void StringTable::oops_do(OopClosure* f) {
|
||||
}
|
||||
}
|
||||
|
||||
void StringTable::oops_do(OopClosure* f) {
|
||||
buckets_do(f, 0, the_table()->table_size());
|
||||
}
|
||||
|
||||
void StringTable::possibly_parallel_oops_do(OopClosure* f) {
|
||||
const int ClaimChunkSize = 32;
|
||||
const int limit = the_table()->table_size();
|
||||
|
||||
for (;;) {
|
||||
// Grab next set of buckets to scan
|
||||
int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
|
||||
if (start_idx >= limit) {
|
||||
// End of table
|
||||
break;
|
||||
}
|
||||
|
||||
int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
|
||||
buckets_do(f, start_idx, end_idx);
|
||||
}
|
||||
}
|
||||
|
||||
void StringTable::verify() {
|
||||
for (int i = 0; i < the_table()->table_size(); ++i) {
|
||||
HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -246,12 +246,19 @@ private:
|
||||
// Set if one bucket is out of balance due to hash algorithm deficiency
|
||||
static bool _needs_rehashing;
|
||||
|
||||
// Claimed high water mark for parallel chunked scanning
|
||||
static volatile int _parallel_claimed_idx;
|
||||
|
||||
static oop intern(Handle string_or_null, jchar* chars, int length, TRAPS);
|
||||
oop basic_add(int index, Handle string_or_null, jchar* name, int len,
|
||||
unsigned int hashValue, TRAPS);
|
||||
|
||||
oop lookup(int index, jchar* chars, int length, unsigned int hashValue);
|
||||
|
||||
// Apply the give oop closure to the entries to the buckets
|
||||
// in the range [start_idx, end_idx).
|
||||
static void buckets_do(OopClosure* f, int start_idx, int end_idx);
|
||||
|
||||
StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize,
|
||||
sizeof (HashtableEntry<oop, mtSymbol>)) {}
|
||||
|
||||
@ -277,9 +284,12 @@ public:
|
||||
unlink_or_oops_do(cl, NULL);
|
||||
}
|
||||
|
||||
// Invoke "f->do_oop" on the locations of all oops in the table.
|
||||
// Serially invoke "f->do_oop" on the locations of all oops in the table.
|
||||
static void oops_do(OopClosure* f);
|
||||
|
||||
// Possibly parallel version of the above
|
||||
static void possibly_parallel_oops_do(OopClosure* f);
|
||||
|
||||
// Hashing algorithm, used as the hash value used by the
|
||||
// StringTable for bucket selection and comparison (stored in the
|
||||
// HashtableEntry structures). This is used in the String.intern() method.
|
||||
@ -315,5 +325,8 @@ public:
|
||||
// Rehash the symbol table if it gets out of balance
|
||||
static void rehash_table();
|
||||
static bool needs_rehashing() { return _needs_rehashing; }
|
||||
|
||||
// Parallel chunked scanning
|
||||
static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; }
|
||||
};
|
||||
#endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
|
||||
|
@ -86,9 +86,9 @@ class StackMapTable;
|
||||
// These macros are used similarly to CHECK macros but also check
|
||||
// the status of the verifier and return if that has an error.
|
||||
#define CHECK_VERIFY(verifier) \
|
||||
CHECK); if ((verifier)->has_error()) return; (0
|
||||
CHECK); if ((verifier)->has_error()) return; ((void)0
|
||||
#define CHECK_VERIFY_(verifier, result) \
|
||||
CHECK_(result)); if ((verifier)->has_error()) return (result); (0
|
||||
CHECK_(result)); if ((verifier)->has_error()) return (result); ((void)0
|
||||
|
||||
class TypeOrigin VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
|
@ -94,6 +94,7 @@
|
||||
template(java_lang_SecurityManager, "java/lang/SecurityManager") \
|
||||
template(java_security_AccessControlContext, "java/security/AccessControlContext") \
|
||||
template(java_security_ProtectionDomain, "java/security/ProtectionDomain") \
|
||||
template(impliesCreateAccessControlContext_name, "impliesCreateAccessControlContext") \
|
||||
template(java_io_OutputStream, "java/io/OutputStream") \
|
||||
template(java_io_Reader, "java/io/Reader") \
|
||||
template(java_io_BufferedReader, "java/io/BufferedReader") \
|
||||
@ -346,6 +347,7 @@
|
||||
template(contextClassLoader_name, "contextClassLoader") \
|
||||
template(inheritedAccessControlContext_name, "inheritedAccessControlContext") \
|
||||
template(isPrivileged_name, "isPrivileged") \
|
||||
template(isAuthorized_name, "isAuthorized") \
|
||||
template(getClassContext_name, "getClassContext") \
|
||||
template(wait_name, "wait") \
|
||||
template(checkPackageAccess_name, "checkPackageAccess") \
|
||||
|
@ -989,7 +989,7 @@ Klass* ClassHierarchyWalker::find_witness_in(KlassDepChange& changes,
|
||||
assert(changes.involves_context(context_type), "irrelevant dependency");
|
||||
Klass* new_type = changes.new_type();
|
||||
|
||||
count_find_witness_calls();
|
||||
(void)count_find_witness_calls();
|
||||
NOT_PRODUCT(deps_find_witness_singles++);
|
||||
|
||||
// Current thread must be in VM (not native mode, as in CI):
|
||||
|
@ -2615,7 +2615,8 @@ void nmethod::print_relocations() {
|
||||
relocation_begin()-1+ip[1]);
|
||||
for (; ip < index_end; ip++)
|
||||
tty->print_cr(" (%d ?)", ip[0]);
|
||||
tty->print_cr(" @" INTPTR_FORMAT ": index_size=%d", ip, *ip++);
|
||||
tty->print_cr(" @" INTPTR_FORMAT ": index_size=%d", ip, *ip);
|
||||
ip++;
|
||||
tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -565,11 +565,9 @@ bool CardTableExtension::resize_commit_uncommit(int changed_region,
|
||||
if(new_start_aligned < new_end_for_commit) {
|
||||
MemRegion new_committed =
|
||||
MemRegion(new_start_aligned, new_end_for_commit);
|
||||
if (!os::commit_memory((char*)new_committed.start(),
|
||||
new_committed.byte_size())) {
|
||||
vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
|
||||
"card table expansion");
|
||||
}
|
||||
os::commit_memory_or_exit((char*)new_committed.start(),
|
||||
new_committed.byte_size(), !ExecMem,
|
||||
"card table expansion");
|
||||
}
|
||||
result = true;
|
||||
} else if (new_start_aligned > cur_committed.start()) {
|
||||
|
@ -1250,14 +1250,13 @@ uint PSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
|
||||
avg_promoted()->deviation());
|
||||
}
|
||||
|
||||
gclog_or_tty->print( " avg_promoted_padded_avg: %f"
|
||||
gclog_or_tty->print_cr( " avg_promoted_padded_avg: %f"
|
||||
" avg_pretenured_padded_avg: %f"
|
||||
" tenuring_thresh: %d"
|
||||
" target_size: " SIZE_FORMAT,
|
||||
avg_promoted()->padded_average(),
|
||||
_avg_pretenured->padded_average(),
|
||||
tenuring_threshold, target_size);
|
||||
tty->cr();
|
||||
}
|
||||
|
||||
set_survivor_size(target_size);
|
||||
@ -1279,7 +1278,7 @@ void PSAdaptiveSizePolicy::update_averages(bool is_survivor_overflow,
|
||||
avg_promoted()->sample(promoted + _avg_pretenured->padded_average());
|
||||
|
||||
if (PrintAdaptiveSizePolicy) {
|
||||
gclog_or_tty->print(
|
||||
gclog_or_tty->print_cr(
|
||||
"AdaptiveSizePolicy::update_averages:"
|
||||
" survived: " SIZE_FORMAT
|
||||
" promoted: " SIZE_FORMAT
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -101,7 +101,8 @@ bool PSVirtualSpace::expand_by(size_t bytes) {
|
||||
}
|
||||
|
||||
char* const base_addr = committed_high_addr();
|
||||
bool result = special() || os::commit_memory(base_addr, bytes, alignment());
|
||||
bool result = special() ||
|
||||
os::commit_memory(base_addr, bytes, alignment(), !ExecMem);
|
||||
if (result) {
|
||||
_committed_high_addr += bytes;
|
||||
}
|
||||
@ -154,7 +155,7 @@ PSVirtualSpace::expand_into(PSVirtualSpace* other_space, size_t bytes) {
|
||||
if (tmp_bytes > 0) {
|
||||
char* const commit_base = committed_high_addr();
|
||||
if (other_space->special() ||
|
||||
os::commit_memory(commit_base, tmp_bytes, alignment())) {
|
||||
os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) {
|
||||
// Reduce the reserved region in the other space.
|
||||
other_space->set_reserved(other_space->reserved_low_addr() + tmp_bytes,
|
||||
other_space->reserved_high_addr(),
|
||||
@ -269,7 +270,8 @@ bool PSVirtualSpaceHighToLow::expand_by(size_t bytes) {
|
||||
}
|
||||
|
||||
char* const base_addr = committed_low_addr() - bytes;
|
||||
bool result = special() || os::commit_memory(base_addr, bytes, alignment());
|
||||
bool result = special() ||
|
||||
os::commit_memory(base_addr, bytes, alignment(), !ExecMem);
|
||||
if (result) {
|
||||
_committed_low_addr -= bytes;
|
||||
}
|
||||
@ -322,7 +324,7 @@ size_t PSVirtualSpaceHighToLow::expand_into(PSVirtualSpace* other_space,
|
||||
if (tmp_bytes > 0) {
|
||||
char* const commit_base = committed_low_addr() - tmp_bytes;
|
||||
if (other_space->special() ||
|
||||
os::commit_memory(commit_base, tmp_bytes, alignment())) {
|
||||
os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) {
|
||||
// Reduce the reserved region in the other space.
|
||||
other_space->set_reserved(other_space->reserved_low_addr(),
|
||||
other_space->reserved_high_addr() - tmp_bytes,
|
||||
|
@ -263,7 +263,7 @@ class ChunkPool: public CHeapObj<mtInternal> {
|
||||
ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
|
||||
|
||||
// Allocate a new chunk from the pool (might expand the pool)
|
||||
_NOINLINE_ void* allocate(size_t bytes) {
|
||||
_NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) {
|
||||
assert(bytes == _size, "bad size");
|
||||
void* p = NULL;
|
||||
// No VM lock can be taken inside ThreadCritical lock, so os::malloc
|
||||
@ -273,9 +273,9 @@ class ChunkPool: public CHeapObj<mtInternal> {
|
||||
p = get_first();
|
||||
}
|
||||
if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
|
||||
if (p == NULL)
|
||||
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
|
||||
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
|
||||
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -372,7 +372,7 @@ class ChunkPoolCleaner : public PeriodicTask {
|
||||
//--------------------------------------------------------------------------------------
|
||||
// Chunk implementation
|
||||
|
||||
void* Chunk::operator new(size_t requested_size, size_t length) {
|
||||
void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) {
|
||||
// requested_size is equal to sizeof(Chunk) but in order for the arena
|
||||
// allocations to come out aligned as expected the size must be aligned
|
||||
// to expected arena alignment.
|
||||
@ -380,13 +380,14 @@ void* Chunk::operator new(size_t requested_size, size_t length) {
|
||||
assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
|
||||
size_t bytes = ARENA_ALIGN(requested_size) + length;
|
||||
switch (length) {
|
||||
case Chunk::size: return ChunkPool::large_pool()->allocate(bytes);
|
||||
case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
|
||||
case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes);
|
||||
case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
|
||||
case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
|
||||
case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
|
||||
default: {
|
||||
void *p = os::malloc(bytes, mtChunk, CALLER_PC);
|
||||
if (p == NULL)
|
||||
void* p = os::malloc(bytes, mtChunk, CALLER_PC);
|
||||
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
|
||||
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
|
||||
}
|
||||
return p;
|
||||
}
|
||||
}
|
||||
@ -440,7 +441,7 @@ NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
|
||||
Arena::Arena(size_t init_size) {
|
||||
size_t round_size = (sizeof (char *)) - 1;
|
||||
init_size = (init_size+round_size) & ~round_size;
|
||||
_first = _chunk = new (init_size) Chunk(init_size);
|
||||
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
|
||||
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
||||
_max = _chunk->top();
|
||||
set_size_in_bytes(init_size);
|
||||
@ -448,7 +449,7 @@ Arena::Arena(size_t init_size) {
|
||||
}
|
||||
|
||||
Arena::Arena() {
|
||||
_first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
|
||||
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
|
||||
_hwm = _chunk->bottom(); // Save the cached hwm, max
|
||||
_max = _chunk->top();
|
||||
set_size_in_bytes(Chunk::init_size);
|
||||
@ -555,12 +556,9 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
|
||||
size_t len = MAX2(x, (size_t) Chunk::size);
|
||||
|
||||
Chunk *k = _chunk; // Get filled-up chunk address
|
||||
_chunk = new (len) Chunk(len);
|
||||
_chunk = new (alloc_failmode, len) Chunk(len);
|
||||
|
||||
if (_chunk == NULL) {
|
||||
if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
|
||||
signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
if (k) k->set_next(_chunk); // Append new chunk to end of linked list
|
||||
|
@ -340,7 +340,7 @@ class Chunk: CHeapObj<mtChunk> {
|
||||
Chunk* _next; // Next Chunk in list
|
||||
const size_t _len; // Size of this Chunk
|
||||
public:
|
||||
void* operator new(size_t size, size_t length);
|
||||
void* operator new(size_t size, AllocFailType alloc_failmode, size_t length);
|
||||
void operator delete(void* p);
|
||||
Chunk(size_t length);
|
||||
|
||||
@ -403,10 +403,15 @@ protected:
|
||||
|
||||
void signal_out_of_memory(size_t request, const char* whence) const;
|
||||
|
||||
void check_for_overflow(size_t request, const char* whence) const {
|
||||
bool check_for_overflow(size_t request, const char* whence,
|
||||
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const {
|
||||
if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
|
||||
if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
|
||||
return false;
|
||||
}
|
||||
signal_out_of_memory(request, whence);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public:
|
||||
@ -430,7 +435,8 @@ protected:
|
||||
assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
|
||||
x = ARENA_ALIGN(x);
|
||||
debug_only(if (UseMallocOnly) return malloc(x);)
|
||||
check_for_overflow(x, "Arena::Amalloc");
|
||||
if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode))
|
||||
return NULL;
|
||||
NOT_PRODUCT(inc_bytes_allocated(x);)
|
||||
if (_hwm + x > _max) {
|
||||
return grow(x, alloc_failmode);
|
||||
@ -444,7 +450,8 @@ protected:
|
||||
void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
||||
assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
|
||||
debug_only(if (UseMallocOnly) return malloc(x);)
|
||||
check_for_overflow(x, "Arena::Amalloc_4");
|
||||
if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode))
|
||||
return NULL;
|
||||
NOT_PRODUCT(inc_bytes_allocated(x);)
|
||||
if (_hwm + x > _max) {
|
||||
return grow(x, alloc_failmode);
|
||||
@ -465,7 +472,8 @@ protected:
|
||||
size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
|
||||
x += delta;
|
||||
#endif
|
||||
check_for_overflow(x, "Arena::Amalloc_D");
|
||||
if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode))
|
||||
return NULL;
|
||||
NOT_PRODUCT(inc_bytes_allocated(x);)
|
||||
if (_hwm + x > _max) {
|
||||
return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
|
||||
@ -635,8 +643,15 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
|
||||
#define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\
|
||||
(type*) resource_allocate_bytes(thread, (size) * sizeof(type))
|
||||
|
||||
#define NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(thread, type, size)\
|
||||
(type*) resource_allocate_bytes(thread, (size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
|
||||
|
||||
#define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\
|
||||
(type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) )
|
||||
(type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type))
|
||||
|
||||
#define REALLOC_RESOURCE_ARRAY_RETURN_NULL(type, old, old_size, new_size)\
|
||||
(type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type),\
|
||||
(new_size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
|
||||
|
||||
#define FREE_RESOURCE_ARRAY(type, old, size)\
|
||||
resource_free_bytes((char*)(old), (size) * sizeof(type))
|
||||
@ -647,28 +662,40 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
|
||||
#define NEW_RESOURCE_OBJ(type)\
|
||||
NEW_RESOURCE_ARRAY(type, 1)
|
||||
|
||||
#define NEW_C_HEAP_ARRAY(type, size, memflags)\
|
||||
(type*) (AllocateHeap((size) * sizeof(type), memflags))
|
||||
#define NEW_RESOURCE_OBJ_RETURN_NULL(type)\
|
||||
NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1)
|
||||
|
||||
#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
|
||||
(type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags))
|
||||
|
||||
#define FREE_C_HEAP_ARRAY(type, old, memflags) \
|
||||
FreeHeap((char*)(old), memflags)
|
||||
#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\
|
||||
(type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail)
|
||||
|
||||
#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
|
||||
(type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
|
||||
|
||||
#define REALLOC_C_HEAP_ARRAY2(type, old, size, memflags, pc)\
|
||||
(type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, pc))
|
||||
#define NEW_C_HEAP_ARRAY(type, size, memflags)\
|
||||
(type*) (AllocateHeap((size) * sizeof(type), memflags))
|
||||
|
||||
#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail) \
|
||||
(type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail)
|
||||
#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\
|
||||
NEW_C_HEAP_ARRAY3(type, size, memflags, pc, AllocFailStrategy::RETURN_NULL)
|
||||
|
||||
#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
|
||||
NEW_C_HEAP_ARRAY3(type, size, memflags, (address)0, AllocFailStrategy::RETURN_NULL)
|
||||
|
||||
#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
|
||||
(type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags))
|
||||
|
||||
#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\
|
||||
(type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
|
||||
|
||||
#define FREE_C_HEAP_ARRAY(type, old, memflags) \
|
||||
FreeHeap((char*)(old), memflags)
|
||||
|
||||
// allocate type in heap without calling ctor
|
||||
#define NEW_C_HEAP_OBJ(type, memflags)\
|
||||
NEW_C_HEAP_ARRAY(type, 1, memflags)
|
||||
|
||||
#define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\
|
||||
NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags)
|
||||
|
||||
// deallocate obj of type in heap without calling dtor
|
||||
#define FREE_C_HEAP_OBJ(objname, memflags)\
|
||||
FreeHeap((char*)objname, memflags);
|
||||
@ -713,13 +740,21 @@ public:
|
||||
// is set so that we always use malloc except for Solaris where we set the
|
||||
// limit to get mapped memory.
|
||||
template <class E, MEMFLAGS F>
|
||||
class ArrayAllocator : StackObj {
|
||||
class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
|
||||
char* _addr;
|
||||
bool _use_malloc;
|
||||
size_t _size;
|
||||
bool _free_in_destructor;
|
||||
public:
|
||||
ArrayAllocator() : _addr(NULL), _use_malloc(false), _size(0) { }
|
||||
~ArrayAllocator() { free(); }
|
||||
ArrayAllocator(bool free_in_destructor = true) :
|
||||
_addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { }
|
||||
|
||||
~ArrayAllocator() {
|
||||
if (_free_in_destructor) {
|
||||
free();
|
||||
}
|
||||
}
|
||||
|
||||
E* allocate(size_t length);
|
||||
void free();
|
||||
};
|
||||
|
@ -146,10 +146,7 @@ E* ArrayAllocator<E, F>::allocate(size_t length) {
|
||||
vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
|
||||
}
|
||||
|
||||
bool success = os::commit_memory(_addr, _size, false /* executable */);
|
||||
if (!success) {
|
||||
vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (commit)");
|
||||
}
|
||||
os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)");
|
||||
|
||||
return (E*)_addr;
|
||||
}
|
||||
|
@ -110,11 +110,8 @@ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
|
||||
jbyte* guard_card = &_byte_map[_guard_index];
|
||||
uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
|
||||
_guard_region = MemRegion((HeapWord*)guard_page, _page_size);
|
||||
if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
|
||||
// Do better than this for Merlin
|
||||
vm_exit_out_of_memory(_page_size, OOM_MMAP_ERROR, "card table last card");
|
||||
}
|
||||
|
||||
os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
|
||||
!ExecMem, "card table last card");
|
||||
*guard_card = last_card;
|
||||
|
||||
_lowest_non_clean =
|
||||
@ -312,12 +309,9 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
|
||||
MemRegion(cur_committed.end(), new_end_for_commit);
|
||||
|
||||
assert(!new_committed.is_empty(), "Region should not be empty here");
|
||||
if (!os::commit_memory((char*)new_committed.start(),
|
||||
new_committed.byte_size(), _page_size)) {
|
||||
// Do better than this for Merlin
|
||||
vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
|
||||
"card table expansion");
|
||||
}
|
||||
os::commit_memory_or_exit((char*)new_committed.start(),
|
||||
new_committed.byte_size(), _page_size,
|
||||
!ExecMem, "card table expansion");
|
||||
// Use new_end_aligned (as opposed to new_end_for_commit) because
|
||||
// the cur_committed region may include the guard region.
|
||||
} else if (new_end_aligned < cur_committed.end()) {
|
||||
@ -418,7 +412,7 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
|
||||
}
|
||||
// Touch the last card of the covered region to show that it
|
||||
// is committed (or SEGV).
|
||||
debug_only(*byte_for(_covered[ind].last());)
|
||||
debug_only((void) (*byte_for(_covered[ind].last()));)
|
||||
debug_only(verify_guard();)
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,7 +47,6 @@ enum SH_process_strong_roots_tasks {
|
||||
SH_PS_SystemDictionary_oops_do,
|
||||
SH_PS_ClassLoaderDataGraph_oops_do,
|
||||
SH_PS_jvmti_oops_do,
|
||||
SH_PS_StringTable_oops_do,
|
||||
SH_PS_CodeCache_oops_do,
|
||||
// Leave this one last.
|
||||
SH_PS_NumElements
|
||||
@ -127,6 +126,8 @@ SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
|
||||
{
|
||||
if (_active) {
|
||||
outer->change_strong_roots_parity();
|
||||
// Zero the claimed high water mark in the StringTable
|
||||
StringTable::clear_parallel_claimed_index();
|
||||
}
|
||||
}
|
||||
|
||||
@ -154,14 +155,16 @@ void SharedHeap::process_strong_roots(bool activate_scope,
|
||||
// Global (strong) JNI handles
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
|
||||
JNIHandles::oops_do(roots);
|
||||
|
||||
// All threads execute this; the individual threads are task groups.
|
||||
CLDToOopClosure roots_from_clds(roots);
|
||||
CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds);
|
||||
if (ParallelGCThreads > 0) {
|
||||
Threads::possibly_parallel_oops_do(roots, roots_from_clds_p ,code_roots);
|
||||
if (CollectedHeap::use_parallel_gc_threads()) {
|
||||
Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots);
|
||||
} else {
|
||||
Threads::oops_do(roots, roots_from_clds_p, code_roots);
|
||||
}
|
||||
|
||||
if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
|
||||
ObjectSynchronizer::oops_do(roots);
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
|
||||
@ -189,8 +192,12 @@ void SharedHeap::process_strong_roots(bool activate_scope,
|
||||
}
|
||||
}
|
||||
|
||||
if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
|
||||
if (so & SO_Strings) {
|
||||
// All threads execute the following. A specific chunk of buckets
|
||||
// from the StringTable are the individual tasks.
|
||||
if (so & SO_Strings) {
|
||||
if (CollectedHeap::use_parallel_gc_threads()) {
|
||||
StringTable::possibly_parallel_oops_do(roots);
|
||||
} else {
|
||||
StringTable::oops_do(roots);
|
||||
}
|
||||
}
|
||||
|
@ -108,6 +108,7 @@ oop Universe::_the_null_string = NULL;
|
||||
oop Universe::_the_min_jint_string = NULL;
|
||||
LatestMethodOopCache* Universe::_finalizer_register_cache = NULL;
|
||||
LatestMethodOopCache* Universe::_loader_addClass_cache = NULL;
|
||||
LatestMethodOopCache* Universe::_pd_implies_cache = NULL;
|
||||
ActiveMethodOopsCache* Universe::_reflect_invoke_cache = NULL;
|
||||
oop Universe::_out_of_memory_error_java_heap = NULL;
|
||||
oop Universe::_out_of_memory_error_perm_gen = NULL;
|
||||
@ -224,6 +225,7 @@ void Universe::serialize(SerializeClosure* f, bool do_all) {
|
||||
_finalizer_register_cache->serialize(f);
|
||||
_loader_addClass_cache->serialize(f);
|
||||
_reflect_invoke_cache->serialize(f);
|
||||
_pd_implies_cache->serialize(f);
|
||||
}
|
||||
|
||||
void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
|
||||
@ -529,7 +531,9 @@ void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) {
|
||||
if (vt) vt->initialize_vtable(false, CHECK);
|
||||
if (ko->oop_is_instance()) {
|
||||
InstanceKlass* ik = (InstanceKlass*)ko;
|
||||
for (KlassHandle s_h(THREAD, ik->subklass()); s_h() != NULL; s_h = (THREAD, s_h()->next_sibling())) {
|
||||
for (KlassHandle s_h(THREAD, ik->subklass());
|
||||
s_h() != NULL;
|
||||
s_h = KlassHandle(THREAD, s_h()->next_sibling())) {
|
||||
reinitialize_vtable_of(s_h, CHECK);
|
||||
}
|
||||
}
|
||||
@ -645,6 +649,7 @@ jint universe_init() {
|
||||
// Metaspace::initialize_shared_spaces() tries to populate them.
|
||||
Universe::_finalizer_register_cache = new LatestMethodOopCache();
|
||||
Universe::_loader_addClass_cache = new LatestMethodOopCache();
|
||||
Universe::_pd_implies_cache = new LatestMethodOopCache();
|
||||
Universe::_reflect_invoke_cache = new ActiveMethodOopsCache();
|
||||
|
||||
if (UseSharedSpaces) {
|
||||
@ -1108,6 +1113,23 @@ bool universe_post_init() {
|
||||
Universe::_loader_addClass_cache->init(
|
||||
SystemDictionary::ClassLoader_klass(), m, CHECK_false);
|
||||
|
||||
// Setup method for checking protection domain
|
||||
InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false);
|
||||
m = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->
|
||||
find_method(vmSymbols::impliesCreateAccessControlContext_name(),
|
||||
vmSymbols::void_boolean_signature());
|
||||
// Allow NULL which should only happen with bootstrapping.
|
||||
if (m != NULL) {
|
||||
if (m->is_static()) {
|
||||
// NoSuchMethodException doesn't actually work because it tries to run the
|
||||
// <init> function before java_lang_Class is linked. Print error and exit.
|
||||
tty->print_cr("ProtectionDomain.impliesCreateAccessControlContext() has the wrong linkage");
|
||||
return false; // initialization failed
|
||||
}
|
||||
Universe::_pd_implies_cache->init(
|
||||
SystemDictionary::ProtectionDomain_klass(), m, CHECK_false);;
|
||||
}
|
||||
|
||||
// The folowing is initializing converter functions for serialization in
|
||||
// JVM.cpp. If we clean up the StrictMath code above we may want to find
|
||||
// a better solution for this as well.
|
||||
@ -1525,6 +1547,7 @@ bool ActiveMethodOopsCache::is_same_method(const Method* method) const {
|
||||
|
||||
|
||||
Method* LatestMethodOopCache::get_Method() {
|
||||
if (klass() == NULL) return NULL;
|
||||
InstanceKlass* ik = InstanceKlass::cast(klass());
|
||||
Method* m = ik->method_with_idnum(method_idnum());
|
||||
assert(m != NULL, "sanity check");
|
||||
|
@ -176,6 +176,7 @@ class Universe: AllStatic {
|
||||
static oop _the_min_jint_string; // A cache of "-2147483648" as a Java string
|
||||
static LatestMethodOopCache* _finalizer_register_cache; // static method for registering finalizable objects
|
||||
static LatestMethodOopCache* _loader_addClass_cache; // method for registering loaded classes in class loader vector
|
||||
static LatestMethodOopCache* _pd_implies_cache; // method for checking protection domain attributes
|
||||
static ActiveMethodOopsCache* _reflect_invoke_cache; // method for security checks
|
||||
static oop _out_of_memory_error_java_heap; // preallocated error object (no backtrace)
|
||||
static oop _out_of_memory_error_perm_gen; // preallocated error object (no backtrace)
|
||||
@ -333,7 +334,10 @@ class Universe: AllStatic {
|
||||
static oop the_min_jint_string() { return _the_min_jint_string; }
|
||||
static Method* finalizer_register_method() { return _finalizer_register_cache->get_Method(); }
|
||||
static Method* loader_addClass_method() { return _loader_addClass_cache->get_Method(); }
|
||||
|
||||
static Method* protection_domain_implies_method() { return _pd_implies_cache->get_Method(); }
|
||||
static ActiveMethodOopsCache* reflect_invoke_cache() { return _reflect_invoke_cache; }
|
||||
|
||||
static oop null_ptr_exception_instance() { return _null_ptr_exception_instance; }
|
||||
static oop arithmetic_exception_instance() { return _arithmetic_exception_instance; }
|
||||
static oop virtual_machine_error_instance() { return _virtual_machine_error_instance; }
|
||||
|
@ -642,11 +642,21 @@ int GenerateOopMap::next_bb_start_pc(BasicBlock *bb) {
|
||||
// CellType handling methods
|
||||
//
|
||||
|
||||
// Allocate memory and throw LinkageError if failure.
|
||||
#define ALLOC_RESOURCE_ARRAY(var, type, count) \
|
||||
var = NEW_RESOURCE_ARRAY_RETURN_NULL(type, count); \
|
||||
if (var == NULL) { \
|
||||
report_error("Cannot reserve enough memory to analyze this method"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
|
||||
void GenerateOopMap::init_state() {
|
||||
_state_len = _max_locals + _max_stack + _max_monitors;
|
||||
_state = NEW_RESOURCE_ARRAY(CellTypeState, _state_len);
|
||||
ALLOC_RESOURCE_ARRAY(_state, CellTypeState, _state_len);
|
||||
memset(_state, 0, _state_len * sizeof(CellTypeState));
|
||||
_state_vec_buf = NEW_RESOURCE_ARRAY(char, MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */);
|
||||
int count = MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */;
|
||||
ALLOC_RESOURCE_ARRAY(_state_vec_buf, char, count);
|
||||
}
|
||||
|
||||
void GenerateOopMap::make_context_uninitialized() {
|
||||
@ -905,7 +915,7 @@ void GenerateOopMap::init_basic_blocks() {
|
||||
// But cumbersome since we don't know the stack heights yet. (Nor the
|
||||
// monitor stack heights...)
|
||||
|
||||
_basic_blocks = NEW_RESOURCE_ARRAY(BasicBlock, _bb_count);
|
||||
ALLOC_RESOURCE_ARRAY(_basic_blocks, BasicBlock, _bb_count);
|
||||
|
||||
// Make a pass through the bytecodes. Count the number of monitorenters.
|
||||
// This can be used an upper bound on the monitor stack depth in programs
|
||||
@ -976,8 +986,8 @@ void GenerateOopMap::init_basic_blocks() {
|
||||
return;
|
||||
}
|
||||
|
||||
CellTypeState *basicBlockState =
|
||||
NEW_RESOURCE_ARRAY(CellTypeState, bbNo * _state_len);
|
||||
CellTypeState *basicBlockState;
|
||||
ALLOC_RESOURCE_ARRAY(basicBlockState, CellTypeState, bbNo * _state_len);
|
||||
memset(basicBlockState, 0, bbNo * _state_len * sizeof(CellTypeState));
|
||||
|
||||
// Make a pass over the basicblocks and assign their state vectors.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -406,10 +406,10 @@
|
||||
develop(intx, WarmCallMaxSize, 999999, \
|
||||
"size of the largest inlinable method") \
|
||||
\
|
||||
product(intx, MaxNodeLimit, 65000, \
|
||||
product(intx, MaxNodeLimit, 80000, \
|
||||
"Maximum number of nodes") \
|
||||
\
|
||||
product(intx, NodeLimitFudgeFactor, 1000, \
|
||||
product(intx, NodeLimitFudgeFactor, 2000, \
|
||||
"Fudge Factor for certain optimizations") \
|
||||
\
|
||||
product(bool, UseJumpTables, true, \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -435,6 +435,9 @@ void PhaseChaitin::Register_Allocate() {
|
||||
// Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do
|
||||
// not match the Phi itself, insert a copy.
|
||||
coalesce.insert_copies(_matcher);
|
||||
if (C->failing()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// After aggressive coalesce, attempt a first cut at coloring.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -240,6 +240,8 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
|
||||
_unique = C->unique();
|
||||
|
||||
for( uint i=0; i<_phc._cfg._num_blocks; i++ ) {
|
||||
C->check_node_count(NodeLimitFudgeFactor, "out of nodes in coalesce");
|
||||
if (C->failing()) return;
|
||||
Block *b = _phc._cfg._blocks[i];
|
||||
uint cnt = b->num_preds(); // Number of inputs to the Phi
|
||||
|
||||
|
@ -985,6 +985,8 @@ Node *Matcher::xform( Node *n, int max_stack ) {
|
||||
mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root
|
||||
|
||||
while (mstack.is_nonempty()) {
|
||||
C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
|
||||
if (C->failing()) return NULL;
|
||||
n = mstack.node(); // Leave node on stack
|
||||
Node_State nstate = mstack.state();
|
||||
if (nstate == Visit) {
|
||||
|
@ -2930,7 +2930,9 @@ MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
|
||||
Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (remove_dead_region(phase, can_reshape)) return this;
|
||||
// Don't bother trying to transform a dead node
|
||||
if (in(0) && in(0)->is_top()) return NULL;
|
||||
if (in(0) && in(0)->is_top()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Eliminate volatile MemBars for scalar replaced objects.
|
||||
if (can_reshape && req() == (Precedent+1)) {
|
||||
@ -2939,6 +2941,14 @@ Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
|
||||
// Volatile field loads and stores.
|
||||
Node* my_mem = in(MemBarNode::Precedent);
|
||||
// The MembarAquire may keep an unused LoadNode alive through the Precedent edge
|
||||
if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) {
|
||||
assert(my_mem->unique_out() == this, "sanity");
|
||||
phase->hash_delete(this);
|
||||
del_req(Precedent);
|
||||
phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later
|
||||
my_mem = NULL;
|
||||
}
|
||||
if (my_mem != NULL && my_mem->is_Mem()) {
|
||||
const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
|
||||
// Check for scalar replaced object reference.
|
||||
@ -4384,7 +4394,7 @@ static void verify_memory_slice(const MergeMemNode* m, int alias_idx, Node* n) {
|
||||
}
|
||||
}
|
||||
#else // !ASSERT
|
||||
#define verify_memory_slice(m,i,n) (0) // PRODUCT version is no-op
|
||||
#define verify_memory_slice(m,i,n) (void)(0) // PRODUCT version is no-op
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -619,7 +619,7 @@ void collector_func_load(char* name,
|
||||
void* null_argument_3);
|
||||
#pragma weak collector_func_load
|
||||
#define collector_func_load(x0,x1,x2,x3,x4,x5,x6) \
|
||||
( collector_func_load ? collector_func_load(x0,x1,x2,x3,x4,x5,x6),0 : 0 )
|
||||
( collector_func_load ? collector_func_load(x0,x1,x2,x3,x4,x5,x6),(void)0 : (void)0 )
|
||||
#endif // __APPLE__
|
||||
#endif // !_WINDOWS
|
||||
|
||||
|
@ -1141,6 +1141,56 @@ JVM_ENTRY(void, JVM_SetProtectionDomain(JNIEnv *env, jclass cls, jobject protect
|
||||
}
|
||||
JVM_END
|
||||
|
||||
static bool is_authorized(Handle context, instanceKlassHandle klass, TRAPS) {
|
||||
// If there is a security manager and protection domain, check the access
|
||||
// in the protection domain, otherwise it is authorized.
|
||||
if (java_lang_System::has_security_manager()) {
|
||||
|
||||
// For bootstrapping, if pd implies method isn't in the JDK, allow
|
||||
// this context to revert to older behavior.
|
||||
// In this case the isAuthorized field in AccessControlContext is also not
|
||||
// present.
|
||||
if (Universe::protection_domain_implies_method() == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Whitelist certain access control contexts
|
||||
if (java_security_AccessControlContext::is_authorized(context)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
oop prot = klass->protection_domain();
|
||||
if (prot != NULL) {
|
||||
// Call pd.implies(new SecurityPermission("createAccessControlContext"))
|
||||
// in the new wrapper.
|
||||
methodHandle m(THREAD, Universe::protection_domain_implies_method());
|
||||
Handle h_prot(THREAD, prot);
|
||||
JavaValue result(T_BOOLEAN);
|
||||
JavaCallArguments args(h_prot);
|
||||
JavaCalls::call(&result, m, &args, CHECK_false);
|
||||
return (result.get_jboolean() != 0);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Create an AccessControlContext with a protection domain with null codesource
|
||||
// and null permissions - which gives no permissions.
|
||||
oop create_dummy_access_control_context(TRAPS) {
|
||||
InstanceKlass* pd_klass = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass());
|
||||
// new ProtectionDomain(null,null);
|
||||
oop null_protection_domain = pd_klass->allocate_instance(CHECK_NULL);
|
||||
Handle null_pd(THREAD, null_protection_domain);
|
||||
|
||||
// new ProtectionDomain[] {pd};
|
||||
objArrayOop context = oopFactory::new_objArray(pd_klass, 1, CHECK_NULL);
|
||||
context->obj_at_put(0, null_pd());
|
||||
|
||||
// new AccessControlContext(new ProtectionDomain[] {pd})
|
||||
objArrayHandle h_context(THREAD, context);
|
||||
oop result = java_security_AccessControlContext::create(h_context, false, Handle(), CHECK_NULL);
|
||||
return result;
|
||||
}
|
||||
|
||||
JVM_ENTRY(jobject, JVM_DoPrivileged(JNIEnv *env, jclass cls, jobject action, jobject context, jboolean wrapException))
|
||||
JVMWrapper("JVM_DoPrivileged");
|
||||
@ -1149,8 +1199,29 @@ JVM_ENTRY(jobject, JVM_DoPrivileged(JNIEnv *env, jclass cls, jobject action, job
|
||||
THROW_MSG_0(vmSymbols::java_lang_NullPointerException(), "Null action");
|
||||
}
|
||||
|
||||
// Stack allocated list of privileged stack elements
|
||||
PrivilegedElement pi;
|
||||
// Compute the frame initiating the do privileged operation and setup the privileged stack
|
||||
vframeStream vfst(thread);
|
||||
vfst.security_get_caller_frame(1);
|
||||
|
||||
if (vfst.at_end()) {
|
||||
THROW_MSG_0(vmSymbols::java_lang_InternalError(), "no caller?");
|
||||
}
|
||||
|
||||
Method* method = vfst.method();
|
||||
instanceKlassHandle klass (THREAD, method->method_holder());
|
||||
|
||||
// Check that action object understands "Object run()"
|
||||
Handle h_context;
|
||||
if (context != NULL) {
|
||||
h_context = Handle(THREAD, JNIHandles::resolve(context));
|
||||
bool authorized = is_authorized(h_context, klass, CHECK_NULL);
|
||||
if (!authorized) {
|
||||
// Create an unprivileged access control object and call it's run function
|
||||
// instead.
|
||||
oop noprivs = create_dummy_access_control_context(CHECK_NULL);
|
||||
h_context = Handle(THREAD, noprivs);
|
||||
}
|
||||
}
|
||||
|
||||
// Check that action object understands "Object run()"
|
||||
Handle object (THREAD, JNIHandles::resolve(action));
|
||||
@ -1164,12 +1235,10 @@ JVM_ENTRY(jobject, JVM_DoPrivileged(JNIEnv *env, jclass cls, jobject action, job
|
||||
THROW_MSG_0(vmSymbols::java_lang_InternalError(), "No run method");
|
||||
}
|
||||
|
||||
// Compute the frame initiating the do privileged operation and setup the privileged stack
|
||||
vframeStream vfst(thread);
|
||||
vfst.security_get_caller_frame(1);
|
||||
|
||||
// Stack allocated list of privileged stack elements
|
||||
PrivilegedElement pi;
|
||||
if (!vfst.at_end()) {
|
||||
pi.initialize(&vfst, JNIHandles::resolve(context), thread->privileged_stack_top(), CHECK_NULL);
|
||||
pi.initialize(&vfst, h_context(), thread->privileged_stack_top(), CHECK_NULL);
|
||||
thread->set_privileged_stack_top(&pi);
|
||||
}
|
||||
|
||||
@ -3241,24 +3310,10 @@ JVM_ENTRY(jobject, JVM_CurrentClassLoader(JNIEnv *env))
|
||||
JVM_END
|
||||
|
||||
|
||||
// Utility object for collecting method holders walking down the stack
|
||||
class KlassLink: public ResourceObj {
|
||||
public:
|
||||
KlassHandle klass;
|
||||
KlassLink* next;
|
||||
|
||||
KlassLink(KlassHandle k) { klass = k; next = NULL; }
|
||||
};
|
||||
|
||||
|
||||
JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env))
|
||||
JVMWrapper("JVM_GetClassContext");
|
||||
ResourceMark rm(THREAD);
|
||||
JvmtiVMObjectAllocEventCollector oam;
|
||||
// Collect linked list of (handles to) method holders
|
||||
KlassLink* first = NULL;
|
||||
KlassLink* last = NULL;
|
||||
int depth = 0;
|
||||
vframeStream vfst(thread);
|
||||
|
||||
if (SystemDictionary::reflect_CallerSensitive_klass() != NULL) {
|
||||
@ -3272,32 +3327,23 @@ JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env))
|
||||
}
|
||||
|
||||
// Collect method holders
|
||||
GrowableArray<KlassHandle>* klass_array = new GrowableArray<KlassHandle>();
|
||||
for (; !vfst.at_end(); vfst.security_next()) {
|
||||
Method* m = vfst.method();
|
||||
// Native frames are not returned
|
||||
if (!m->is_ignored_by_security_stack_walk() && !m->is_native()) {
|
||||
Klass* holder = m->method_holder();
|
||||
assert(holder->is_klass(), "just checking");
|
||||
depth++;
|
||||
KlassLink* l = new KlassLink(KlassHandle(thread, holder));
|
||||
if (first == NULL) {
|
||||
first = last = l;
|
||||
} else {
|
||||
last->next = l;
|
||||
last = l;
|
||||
}
|
||||
klass_array->append(holder);
|
||||
}
|
||||
}
|
||||
|
||||
// Create result array of type [Ljava/lang/Class;
|
||||
objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), depth, CHECK_NULL);
|
||||
objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), klass_array->length(), CHECK_NULL);
|
||||
// Fill in mirrors corresponding to method holders
|
||||
int index = 0;
|
||||
while (first != NULL) {
|
||||
result->obj_at_put(index++, first->klass()->java_mirror());
|
||||
first = first->next;
|
||||
for (int i = 0; i < klass_array->length(); i++) {
|
||||
result->obj_at_put(i, klass_array->at(i)->java_mirror());
|
||||
}
|
||||
assert(index == depth, "just checking");
|
||||
|
||||
return (jobjectArray) JNIHandles::make_local(env, result);
|
||||
JVM_END
|
||||
|
@ -1897,7 +1897,7 @@ jvmtiEnv *jvmti;
|
||||
</description>
|
||||
</param>
|
||||
<param id="monitor_info_ptr">
|
||||
<allocbuf outcount="owned_monitor_depth_count_ptr">
|
||||
<allocbuf outcount="monitor_info_count_ptr">
|
||||
<struct>jvmtiMonitorStackDepthInfo</struct>
|
||||
</allocbuf>
|
||||
<description>
|
||||
|
@ -159,7 +159,7 @@ WB_END
|
||||
|
||||
|
||||
WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
|
||||
os::commit_memory((char *)(uintptr_t)addr, size);
|
||||
os::commit_memory((char *)(uintptr_t)addr, size, !ExecMem);
|
||||
MemTracker::record_virtual_memory_type((address)(uintptr_t)addr, mtTest);
|
||||
WB_END
|
||||
|
||||
|
@ -1566,6 +1566,15 @@ julong Arguments::limit_by_allocatable_memory(julong limit) {
|
||||
return result;
|
||||
}
|
||||
|
||||
void Arguments::set_heap_base_min_address() {
|
||||
if (FLAG_IS_DEFAULT(HeapBaseMinAddress) && UseG1GC && HeapBaseMinAddress < 1*G) {
|
||||
// By default HeapBaseMinAddress is 2G on all platforms except Solaris x86.
|
||||
// G1 currently needs a lot of C-heap, so on Solaris we have to give G1
|
||||
// some extra space for the C-heap compared to other collectors.
|
||||
FLAG_SET_ERGO(uintx, HeapBaseMinAddress, 1*G);
|
||||
}
|
||||
}
|
||||
|
||||
void Arguments::set_heap_size() {
|
||||
if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) {
|
||||
// Deprecated flag
|
||||
@ -1885,21 +1894,6 @@ bool Arguments::check_vm_args_consistency() {
|
||||
// Note: Needs platform-dependent factoring.
|
||||
bool status = true;
|
||||
|
||||
#if ( (defined(COMPILER2) && defined(SPARC)))
|
||||
// NOTE: The call to VM_Version_init depends on the fact that VM_Version_init
|
||||
// on sparc doesn't require generation of a stub as is the case on, e.g.,
|
||||
// x86. Normally, VM_Version_init must be called from init_globals in
|
||||
// init.cpp, which is called by the initial java thread *after* arguments
|
||||
// have been parsed. VM_Version_init gets called twice on sparc.
|
||||
extern void VM_Version_init();
|
||||
VM_Version_init();
|
||||
if (!VM_Version::has_v9()) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"V8 Machine detected, Server requires V9\n");
|
||||
status = false;
|
||||
}
|
||||
#endif /* COMPILER2 && SPARC */
|
||||
|
||||
// Allow both -XX:-UseStackBanging and -XX:-UseBoundThreads in non-product
|
||||
// builds so the cost of stack banging can be measured.
|
||||
#if (defined(PRODUCT) && defined(SOLARIS))
|
||||
@ -3525,6 +3519,8 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
||||
}
|
||||
}
|
||||
|
||||
set_heap_base_min_address();
|
||||
|
||||
// Set heap size based on available physical memory
|
||||
set_heap_size();
|
||||
|
||||
|
@ -315,6 +315,8 @@ class Arguments : AllStatic {
|
||||
// limits the given memory size by the maximum amount of memory this process is
|
||||
// currently allowed to allocate or reserve.
|
||||
static julong limit_by_allocatable_memory(julong size);
|
||||
// Setup HeapBaseMinAddress
|
||||
static void set_heap_base_min_address();
|
||||
// Setup heap size
|
||||
static void set_heap_size();
|
||||
// Based on automatic selection criteria, should the
|
||||
|
@ -647,10 +647,13 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller
|
||||
#ifndef ASSERT
|
||||
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
|
||||
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
|
||||
MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
|
||||
void* ptr = ::realloc(memblock, size);
|
||||
if (ptr != NULL) {
|
||||
MemTracker::record_realloc((address)memblock, (address)ptr, size, memflags,
|
||||
tkr.record((address)memblock, (address)ptr, size, memflags,
|
||||
caller == 0 ? CALLER_PC : caller);
|
||||
} else {
|
||||
tkr.discard();
|
||||
}
|
||||
return ptr;
|
||||
#else
|
||||
@ -1456,7 +1459,7 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
|
||||
char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
|
||||
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -1466,7 +1469,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
|
||||
MEMFLAGS flags) {
|
||||
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_type((address)result, flags);
|
||||
}
|
||||
|
||||
@ -1476,7 +1479,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
|
||||
char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
|
||||
char* result = pd_attempt_reserve_memory_at(bytes, addr);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -1503,18 +1506,36 @@ bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
return res;
|
||||
}
|
||||
|
||||
void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable,
|
||||
const char* mesg) {
|
||||
pd_commit_memory_or_exit(addr, bytes, executable, mesg);
|
||||
MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
|
||||
}
|
||||
|
||||
void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
|
||||
bool executable, const char* mesg) {
|
||||
os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg);
|
||||
MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
|
||||
}
|
||||
|
||||
bool os::uncommit_memory(char* addr, size_t bytes) {
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
|
||||
bool res = pd_uncommit_memory(addr, bytes);
|
||||
if (res) {
|
||||
MemTracker::record_virtual_memory_uncommit((address)addr, bytes);
|
||||
tkr.record((address)addr, bytes);
|
||||
} else {
|
||||
tkr.discard();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
bool os::release_memory(char* addr, size_t bytes) {
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
bool res = pd_release_memory(addr, bytes);
|
||||
if (res) {
|
||||
MemTracker::record_virtual_memory_release((address)addr, bytes);
|
||||
tkr.record((address)addr, bytes);
|
||||
} else {
|
||||
tkr.discard();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -1525,8 +1546,7 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
bool allow_exec) {
|
||||
char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_commit((address)result, bytes, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -1539,10 +1559,12 @@ char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
|
||||
}
|
||||
|
||||
bool os::unmap_memory(char *addr, size_t bytes) {
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
bool result = pd_unmap_memory(addr, bytes);
|
||||
if (result) {
|
||||
MemTracker::record_virtual_memory_uncommit((address)addr, bytes);
|
||||
MemTracker::record_virtual_memory_release((address)addr, bytes);
|
||||
tkr.record((address)addr, bytes);
|
||||
} else {
|
||||
tkr.discard();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -78,6 +78,10 @@ enum ThreadPriority { // JLS 20.20.1-3
|
||||
CriticalPriority = 11 // Critical thread priority
|
||||
};
|
||||
|
||||
// Executable parameter flag for os::commit_memory() and
|
||||
// os::commit_memory_or_exit().
|
||||
const bool ExecMem = true;
|
||||
|
||||
// Typedef for structured exception handling support
|
||||
typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
|
||||
|
||||
@ -104,9 +108,16 @@ class os: AllStatic {
|
||||
static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr);
|
||||
static void pd_split_reserved_memory(char *base, size_t size,
|
||||
size_t split, bool realloc);
|
||||
static bool pd_commit_memory(char* addr, size_t bytes, bool executable = false);
|
||||
static bool pd_commit_memory(char* addr, size_t bytes, bool executable);
|
||||
static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
bool executable = false);
|
||||
bool executable);
|
||||
// Same as pd_commit_memory() that either succeeds or calls
|
||||
// vm_exit_out_of_memory() with the specified mesg.
|
||||
static void pd_commit_memory_or_exit(char* addr, size_t bytes,
|
||||
bool executable, const char* mesg);
|
||||
static void pd_commit_memory_or_exit(char* addr, size_t size,
|
||||
size_t alignment_hint,
|
||||
bool executable, const char* mesg);
|
||||
static bool pd_uncommit_memory(char* addr, size_t bytes);
|
||||
static bool pd_release_memory(char* addr, size_t bytes);
|
||||
|
||||
@ -261,9 +272,16 @@ class os: AllStatic {
|
||||
static char* attempt_reserve_memory_at(size_t bytes, char* addr);
|
||||
static void split_reserved_memory(char *base, size_t size,
|
||||
size_t split, bool realloc);
|
||||
static bool commit_memory(char* addr, size_t bytes, bool executable = false);
|
||||
static bool commit_memory(char* addr, size_t bytes, bool executable);
|
||||
static bool commit_memory(char* addr, size_t size, size_t alignment_hint,
|
||||
bool executable = false);
|
||||
bool executable);
|
||||
// Same as commit_memory() that either succeeds or calls
|
||||
// vm_exit_out_of_memory() with the specified mesg.
|
||||
static void commit_memory_or_exit(char* addr, size_t bytes,
|
||||
bool executable, const char* mesg);
|
||||
static void commit_memory_or_exit(char* addr, size_t size,
|
||||
size_t alignment_hint,
|
||||
bool executable, const char* mesg);
|
||||
static bool uncommit_memory(char* addr, size_t bytes);
|
||||
static bool release_memory(char* addr, size_t bytes);
|
||||
|
||||
|
@ -2731,7 +2731,7 @@ VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver,
|
||||
// ResourceObject, so do not put any ResourceMarks in here.
|
||||
char *s = sig->as_C_string();
|
||||
int len = (int)strlen(s);
|
||||
*s++; len--; // Skip opening paren
|
||||
s++; len--; // Skip opening paren
|
||||
char *t = s+len;
|
||||
while( *(--t) != ')' ) ; // Find close paren
|
||||
|
||||
|
@ -533,11 +533,13 @@ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
|
||||
lower_high() + lower_needs <= lower_high_boundary(),
|
||||
"must not expand beyond region");
|
||||
if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
|
||||
debug_only(warning("os::commit_memory failed"));
|
||||
debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
|
||||
", lower_needs=" SIZE_FORMAT ", %d) failed",
|
||||
lower_high(), lower_needs, _executable);)
|
||||
return false;
|
||||
} else {
|
||||
_lower_high += lower_needs;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (middle_needs > 0) {
|
||||
assert(lower_high_boundary() <= middle_high() &&
|
||||
@ -545,7 +547,10 @@ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
|
||||
"must not expand beyond region");
|
||||
if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
|
||||
_executable)) {
|
||||
debug_only(warning("os::commit_memory failed"));
|
||||
debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
|
||||
", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
|
||||
", %d) failed", middle_high(), middle_needs,
|
||||
middle_alignment(), _executable);)
|
||||
return false;
|
||||
}
|
||||
_middle_high += middle_needs;
|
||||
@ -555,7 +560,9 @@ bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
|
||||
upper_high() + upper_needs <= upper_high_boundary(),
|
||||
"must not expand beyond region");
|
||||
if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
|
||||
debug_only(warning("os::commit_memory failed"));
|
||||
debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
|
||||
", upper_needs=" SIZE_FORMAT ", %d) failed",
|
||||
upper_high(), upper_needs, _executable);)
|
||||
return false;
|
||||
} else {
|
||||
_upper_high += upper_needs;
|
||||
|
@ -247,7 +247,7 @@ template <> void DCmdArgument<NanoTimeArgument>::init_value(TRAPS) {
|
||||
} else {
|
||||
_value._time = 0;
|
||||
_value._nanotime = 0;
|
||||
strcmp(_value._unit, "ns");
|
||||
strcpy(_value._unit, "ns");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -130,7 +130,7 @@ bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records)
|
||||
if (malloc_ptr->is_arena_record()) {
|
||||
// see if arena memory record present
|
||||
MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
|
||||
if (next_malloc_ptr->is_arena_memory_record()) {
|
||||
if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) {
|
||||
assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
|
||||
"Arena records do not match");
|
||||
size = next_malloc_ptr->size();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -457,9 +457,8 @@ class SeqMemPointerRecord : public MemPointerRecord {
|
||||
public:
|
||||
SeqMemPointerRecord(): _seq(0){ }
|
||||
|
||||
SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size)
|
||||
: MemPointerRecord(addr, flags, size) {
|
||||
_seq = SequenceGenerator::next();
|
||||
SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size, jint seq)
|
||||
: MemPointerRecord(addr, flags, size), _seq(seq) {
|
||||
}
|
||||
|
||||
SeqMemPointerRecord(const SeqMemPointerRecord& copy_from)
|
||||
@ -488,8 +487,8 @@ class SeqMemPointerRecordEx : public MemPointerRecordEx {
|
||||
SeqMemPointerRecordEx(): _seq(0) { }
|
||||
|
||||
SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size,
|
||||
address pc): MemPointerRecordEx(addr, flags, size, pc) {
|
||||
_seq = SequenceGenerator::next();
|
||||
jint seq, address pc):
|
||||
MemPointerRecordEx(addr, flags, size, pc), _seq(seq) {
|
||||
}
|
||||
|
||||
SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -69,10 +69,11 @@ MemRecorder::MemRecorder() {
|
||||
|
||||
if (_pointer_records != NULL) {
|
||||
// recode itself
|
||||
address pc = CURRENT_PC;
|
||||
record((address)this, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
|
||||
sizeof(MemRecorder), CALLER_PC);
|
||||
sizeof(MemRecorder), SequenceGenerator::next(), pc);
|
||||
record((address)_pointer_records, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
|
||||
_pointer_records->instance_size(),CURRENT_PC);
|
||||
_pointer_records->instance_size(), SequenceGenerator::next(), pc);
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,7 +117,8 @@ int MemRecorder::sort_record_fn(const void* e1, const void* e2) {
|
||||
}
|
||||
}
|
||||
|
||||
bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, address pc) {
|
||||
bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, jint seq, address pc) {
|
||||
assert(seq > 0, "No sequence number");
|
||||
#ifdef ASSERT
|
||||
if (MemPointerRecord::is_virtual_memory_record(flags)) {
|
||||
assert((flags & MemPointerRecord::tag_masks) != 0, "bad virtual memory record");
|
||||
@ -133,11 +135,11 @@ bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, address pc) {
|
||||
#endif
|
||||
|
||||
if (MemTracker::track_callsite()) {
|
||||
SeqMemPointerRecordEx ap(p, flags, size, pc);
|
||||
SeqMemPointerRecordEx ap(p, flags, size, seq, pc);
|
||||
debug_only(check_dup_seq(ap.seq());)
|
||||
return _pointer_records->append(&ap);
|
||||
} else {
|
||||
SeqMemPointerRecord ap(p, flags, size);
|
||||
SeqMemPointerRecord ap(p, flags, size, seq);
|
||||
debug_only(check_dup_seq(ap.seq());)
|
||||
return _pointer_records->append(&ap);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -220,7 +220,7 @@ class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> {
|
||||
~MemRecorder();
|
||||
|
||||
// record a memory operation
|
||||
bool record(address addr, MEMFLAGS flags, size_t size, address caller_pc = 0);
|
||||
bool record(address addr, MEMFLAGS flags, size_t size, jint seq, address caller_pc = 0);
|
||||
|
||||
// linked list support
|
||||
inline void set_next(MemRecorder* rec) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user