This commit is contained in:
Jesper Wilhelmsson 2012-12-07 07:36:51 -08:00
commit 6c64f0223c
184 changed files with 16121 additions and 16576 deletions

View File

@ -71,41 +71,36 @@ default:: $(AdditionalTargets) $(JvmtiGeneratedFiles)
!include $(HOTSPOTWORKSPACE)/make/hotspot_version
!if "$(HOTSPOT_RELEASE_VERSION)" != ""
HOTSPOT_RELEASE_VERSION="$(HOTSPOT_RELEASE_VERSION)"
!else
HOTSPOT_RELEASE_VERSION="$(HS_MAJOR_VER).$(HS_MINOR_VER)-b$(HS_BUILD_NUMBER)"
!endif
!if "$(USER_RELEASE_SUFFIX)" != ""
HOTSPOT_BUILD_VERSION$(HOTSPOT_BUILD_VERSION) = internal-$(USER_RELEASE_SUFFIX)
HOTSPOT_BUILD_VERSION = internal-$(USER_RELEASE_SUFFIX)
!else
HOTSPOT_BUILD_VERSION$(HOTSPOT_BUILD_VERSION) = internal
HOTSPOT_BUILD_VERSION = internal
!endif
!if "$(HOTSPOT_BUILD_VERSION)" != ""
HOTSPOT_RELEASE_VERSION="$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION)"
!if "$(HOTSPOT_RELEASE_VERSION)" != ""
HOTSPOT_RELEASE_VERSION="\\\"$(HOTSPOT_RELEASE_VERSION)\\\""
!else
HOTSPOT_RELEASE_VERSION="\\\"$(HS_MAJOR_VER).$(HS_MINOR_VER)-b$(HS_BUILD_NUMBER)-$(HOTSPOT_BUILD_VERSION)\\\""
!endif
!if "$(JRE_RELEASE_VERSION)" != ""
JRE_RELEASE_VERSION="$(JRE_RELEASE_VERSION)"
JRE_RELEASE_VERSION="\\\"$(JRE_RELEASE_VERSION)\\\""
!else
JRE_RELEASE_VERSION="$(JDK_MAJOR_VER).$(JDK_MINOR_VER).$(JDK_MICRO_VER)"
JRE_RELEASE_VERSION="\\\"$(JDK_MAJOR_VER).$(JDK_MINOR_VER).$(JDK_MICRO_VER)\\\""
!endif
# Define HOTSPOT_VM_DISTRO if HOTSPOT_VM_DISTRO is set,
# and if it is not see if we have the src/closed directory
!if "$(HOTSPOT_VM_DISTRO)" != ""
HOTSPOT_VM_DISTRO="$(HOTSPOT_VM_DISTRO)"
HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO)
!else
!if exists($(HOTSPOTWORKSPACE)\src\closed)
HOTSPOT_VM_DISTRO="Java HotSpot(TM)"
HOTSPOT_VM_DISTRO="\\\"Java HotSpot(TM)\\\""
!else
HOTSPOT_VM_DISTRO="OpenJDK"
HOTSPOT_VM_DISTRO="\\\"OpenJDK\\\""
!endif
!endif
ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) \
-define HOTSPOT_RELEASE_VERSION=\\\"$(HOTSPOT_RELEASE_VERSION)\\\" \
-define JRE_RELEASE_VERSION=\\\"$(JRE_RELEASE_VERSION)\\\" \
-define HOTSPOT_VM_DISTRO=\\\"$(HOTSPOT_VM_DISTRO)\\\"
ReleaseOptions = -define HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) -define JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) -define HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO)
ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) $(ReleaseOptions)
$(HOTSPOTBUILDSPACE)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class
@$(RUN_JAVA) -Djava.class.path="$(HOTSPOTBUILDSPACE)/classes" ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -25,33 +25,8 @@
#ifndef CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
#define CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP
#include "asm/assembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "code/codeCache.hpp"
#include "runtime/handles.inline.hpp"
#include "asm/assembler.hpp"
inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
jint& stub_inst = *(jint*) branch;
stub_inst = patched_branch(target - branch, stub_inst, 0);
}
#ifndef PRODUCT
inline void MacroAssembler::pd_print_patched_instruction(address branch) {
jint stub_inst = *(jint*) branch;
print_instruction(stub_inst);
::tty->print("%s", " (unresolved)");
}
#endif // PRODUCT
inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
inline int AddressLiteral::low10() const {
return Assembler::low10(value());
}
// inlines for SPARC assembler -- dmu 5/97
inline void Assembler::check_delay() {
# ifdef CHECK_DELAY
@ -77,8 +52,7 @@ inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
inline void Assembler::add(Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rtype ); }
inline void Assembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec ); }
inline void Assembler::add(Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(bpr_op2) | wdisp16(intptr_t(d), intptr_t(pc())) | predict(p) | rs1(s1), rt); has_delay_slot(); }
inline void Assembler::bpr( RCondition c, bool a, Predict p, Register s1, Label& L) { bpr( c, a, p, s1, target(L)); }
@ -111,16 +85,9 @@ inline void Assembler::flush( Register s1, int simm13a) { emit_data( op(arith_op
inline void Assembler::jmpl( Register s1, Register s2, Register d ) { cti(); emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { cti(); emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); has_delay_slot(); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
else ldf(w, s1, s2.as_constant(), d);
}
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
inline void Assembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); ldf( w, a.base(), a.disp() + offset, d); }
inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
@ -152,98 +119,9 @@ inline void Assembler::ldx( Register s1, int simm13a, Register d) { v9_only();
inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
#ifdef _LP64
// Make all 32 bit loads signed so 64 bit registers maintain proper sign
inline void Assembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
inline void Assembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
#else
inline void Assembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
inline void Assembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
#endif
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
# ifdef _LP64
inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
# else
inline void Assembler::ld( Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
# endif
#endif
inline void Assembler::ld( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); }
else { ld( a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldsb(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); }
else { ldsb(a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldsh(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); }
else { ldsh(a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldsw(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); }
else { ldsw(a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldub(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); }
else { ldub(a.base(), a.disp() + offset, d); }
}
inline void Assembler::lduh(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); }
else { lduh(a.base(), a.disp() + offset, d); }
}
inline void Assembler::lduw(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); }
else { lduw(a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldd( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); }
else { ldd( a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldx( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); }
else { ldx( a.base(), a.disp() + offset, d); }
}
inline void Assembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
inline void Assembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
inline void Assembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
inline void Assembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
inline void Assembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
inline void Assembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
inline void Assembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); }
inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
// form effective addresses this way:
inline void Assembler::add(const Address& a, Register d, int offset) {
if (a.has_index()) add(a.base(), a.index(), d);
else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; }
if (offset != 0) add(d, offset, d);
}
inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) add(s1, s2.as_register(), d);
else { add(s1, s2.as_constant() + offset, d); offset = 0; }
if (offset != 0) add(d, offset, d);
}
inline void Assembler::andn(Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) andn(s1, s2.as_register(), d);
else andn(s1, s2.as_constant(), d);
}
inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::prefetch(Register s1, Register s2, PrefetchFcn f) { v9_only(); emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::prefetch(Register s1, int simm13a, PrefetchFcn f) { v9_only(); emit_data( op(ldst_op) | fcn(f) | op3(prefetch_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::prefetch(const Address& a, PrefetchFcn f, int offset) { v9_only(); relocate(a.rspec(offset)); prefetch(a.base(), a.disp() + offset, f); }
inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_long( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); }
inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); }
@ -251,20 +129,9 @@ inline void Assembler::sethi( int imm22a, Register d, RelocationHolder const& rs
// pp 222
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
if (s2.is_register()) stf(w, d, s1, s2.as_register());
else stf(w, d, s1, s2.as_constant());
}
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) {
relocate(a.rspec(offset));
if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); }
else { stf(w, d, a.base(), a.disp() + offset); }
}
inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_long( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_long( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
@ -285,46 +152,6 @@ inline void Assembler::stx( Register d, Register s1, int simm13a) { v9_only();
inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_long( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::st( Register d, Register s1, Register s2) { stw(d, s1, s2); }
inline void Assembler::st( Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void Assembler::st( Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
#endif
inline void Assembler::stb(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); }
else { stb(d, a.base(), a.disp() + offset); }
}
inline void Assembler::sth(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); }
else { sth(d, a.base(), a.disp() + offset); }
}
inline void Assembler::stw(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); }
else { stw(d, a.base(), a.disp() + offset); }
}
inline void Assembler::st( Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); }
else { st( d, a.base(), a.disp() + offset); }
}
inline void Assembler::std(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); }
else { std(d, a.base(), a.disp() + offset); }
}
inline void Assembler::stx(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); }
else { stx(d, a.base(), a.disp() + offset); }
}
inline void Assembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
inline void Assembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
inline void Assembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
inline void Assembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
inline void Assembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
// v8 p 99
inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
@ -336,561 +163,9 @@ inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only();
inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) sub(s1, s2.as_register(), d);
else { sub(s1, s2.as_constant() + offset, d); offset = 0; }
if (offset != 0) sub(d, offset, d);
}
// pp 231
inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_long( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
inline void Assembler::swap( Register s1, int simm13a, Register d) { v9_dep(); emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
inline void Assembler::swap( Address& a, Register d, int offset ) {
relocate(a.rspec(offset));
if (a.has_index()) { assert(offset == 0, ""); swap( a.base(), a.index(), d ); }
else { swap( a.base(), a.disp() + offset, d ); }
}
// Use the right loads/stores for the platform
inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d);
#else
Assembler::ld( s1, s2, d);
#endif
}
inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, simm13a, d);
#else
Assembler::ld( s1, simm13a, d);
#endif
}
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
ld_ptr(s1, in_bytes(simm13a), d);
}
#endif
inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d);
#else
Assembler::ld( s1, s2, d);
#endif
}
inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
#ifdef _LP64
Assembler::ldx(a, d, offset);
#else
Assembler::ld( a, d, offset);
#endif
}
inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2);
#else
Assembler::st( d, s1, s2);
#endif
}
inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
#ifdef _LP64
Assembler::stx(d, s1, simm13a);
#else
Assembler::st( d, s1, simm13a);
#endif
}
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
st_ptr(d, s1, in_bytes(simm13a));
}
#endif
inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2);
#else
Assembler::st( d, s1, s2);
#endif
}
inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
#ifdef _LP64
Assembler::stx(d, a, offset);
#else
Assembler::st( d, a, offset);
#endif
}
// Use the right loads/stores for the platform
inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d);
#else
Assembler::ldd(s1, s2, d);
#endif
}
inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, simm13a, d);
#else
Assembler::ldd(s1, simm13a, d);
#endif
}
inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d);
#else
Assembler::ldd(s1, s2, d);
#endif
}
inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
#ifdef _LP64
Assembler::ldx(a, d, offset);
#else
Assembler::ldd(a, d, offset);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2);
#else
Assembler::std(d, s1, s2);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
#ifdef _LP64
Assembler::stx(d, s1, simm13a);
#else
Assembler::std(d, s1, simm13a);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2);
#else
Assembler::std(d, s1, s2);
#endif
}
inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
#ifdef _LP64
Assembler::stx(d, a, offset);
#else
Assembler::std(d, a, offset);
#endif
}
// Functions for isolating 64 bit shifts for LP64
inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::sllx(s1, s2, d);
#else
Assembler::sll( s1, s2, d);
#endif
}
inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64
Assembler::sllx(s1, imm6a, d);
#else
Assembler::sll( s1, imm6a, d);
#endif
}
inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::srlx(s1, s2, d);
#else
Assembler::srl( s1, s2, d);
#endif
}
inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64
Assembler::srlx(s1, imm6a, d);
#else
Assembler::srl( s1, imm6a, d);
#endif
}
inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
if (s2.is_register()) sll_ptr(s1, s2.as_register(), d);
else sll_ptr(s1, s2.as_constant(), d);
}
// Use the right branch for the platform
inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
Assembler::bp(c, a, icc, p, d, rt);
else
Assembler::br(c, a, d, rt);
}
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
br(c, a, p, target(L));
}
// Branch that tests either xcc or icc depending on the
// architecture compiled (LP64 or not)
inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
#ifdef _LP64
Assembler::bp(c, a, xcc, p, d, rt);
#else
MacroAssembler::br(c, a, p, d, rt);
#endif
}
inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
brx(c, a, p, target(L));
}
inline void MacroAssembler::ba( Label& L ) {
br(always, false, pt, L);
}
// Warning: V9 only functions
inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
Assembler::bp(c, a, cc, p, d, rt);
}
inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
Assembler::bp(c, a, cc, p, L);
}
inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
fbp(c, a, fcc0, p, d, rt);
else
Assembler::fb(c, a, d, rt);
}
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
fb(c, a, p, target(L));
}
inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
Assembler::fbp(c, a, cc, p, d, rt);
}
inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
Assembler::fbp(c, a, cc, p, L);
}
inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
inline bool MacroAssembler::is_far_target(address d) {
if (ForceUnreachable) {
// References outside the code cache should be treated as far
return d < CodeCache::low_bound() || d > CodeCache::high_bound();
}
return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
}
// Call with a check to see if we need to deal with the added
// expense of relocation and if we overflow the displacement
// of the quick call instruction.
inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
#ifdef _LP64
intptr_t disp;
// NULL is ok because it will be relocated later.
// Must change NULL to a reachable address in order to
// pass asserts here and in wdisp.
if ( d == NULL )
d = pc();
// Is this address within range of the call instruction?
// If not, use the expensive instruction sequence
if (is_far_target(d)) {
relocate(rt);
AddressLiteral dest(d);
jumpl_to(dest, O7, O7);
} else {
Assembler::call(d, rt);
}
#else
Assembler::call( d, rt );
#endif
}
inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
MacroAssembler::call( target(L), rt);
}
inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
// prefetch instruction
inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
Assembler::bp( never, true, xcc, pt, d, rt );
}
inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
// clobbers o7 on V8!!
// returns delta from gotten pc to addr after
inline int MacroAssembler::get_pc( Register d ) {
int x = offset();
if (VM_Version::v9_instructions_work())
rdpc(d);
else {
Label lbl;
Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
if (d == O7) delayed()->nop();
else delayed()->mov(O7, d);
bind(lbl);
}
return offset() - x;
}
// Note: All MacroAssembler::set_foo functions are defined out-of-line.
// Loads the current PC of the following instruction as an immediate value in
// 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
#ifdef _LP64
Unimplemented();
#else
Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
Assembler::add(reg,thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
#endif
return thepc;
}
inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, d);
} else {
sethi(addrlit, d);
}
ld(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, d);
} else {
sethi(addrlit, d);
}
ldub(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, d);
} else {
sethi(addrlit, d);
}
ld_ptr(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, temp);
} else {
sethi(addrlit, temp);
}
st(s, temp, addrlit.low10() + offset);
}
inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, temp);
} else {
sethi(addrlit, temp);
}
st_ptr(s, temp, addrlit.low10() + offset);
}
// This code sequence is relocatable to any address, even on LP64.
inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) {
assert_not_delayed();
// Force fixed length sethi because NativeJump and NativeFarCall don't handle
// variable length instruction streams.
patchable_sethi(addrlit, temp);
jmpl(temp, addrlit.low10() + offset, d);
}
inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) {
jumpl_to(addrlit, temp, G0, offset);
}
inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
int ld_offset, int jmp_offset) {
assert_not_delayed();
//sethi(al); // sethi is caller responsibility for this one
ld_ptr(a, temp, ld_offset);
jmp(temp, jmp_offset);
}
inline void MacroAssembler::set_metadata(Metadata* obj, Register d) {
set_metadata(allocate_metadata_address(obj), d);
}
inline void MacroAssembler::set_metadata_constant(Metadata* obj, Register d) {
set_metadata(constant_metadata_address(obj), d);
}
inline void MacroAssembler::set_metadata(const AddressLiteral& obj_addr, Register d) {
assert(obj_addr.rspec().type() == relocInfo::metadata_type, "must be a metadata reloc");
set(obj_addr, d);
}
inline void MacroAssembler::set_oop(jobject obj, Register d) {
set_oop(allocate_oop_address(obj), d);
}
inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
set_oop(constant_oop_address(obj), d);
}
inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) {
assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
set(obj_addr, d);
}
inline void MacroAssembler::load_argument( Argument& a, Register d ) {
if (a.is_register())
mov(a.as_register(), d);
else
ld (a.as_address(), d);
}
inline void MacroAssembler::store_argument( Register s, Argument& a ) {
if (a.is_register())
mov(s, a.as_register());
else
st_ptr (s, a.as_address()); // ABI says everything is right justified.
}
inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
if (a.is_register())
mov(s, a.as_register());
else
st_ptr (s, a.as_address());
}
#ifdef _LP64
inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
if (a.is_float_register())
// V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
fmov(FloatRegisterImpl::S, s, a.as_float_register() );
else
// Floats are stored in the high half of the stack entry
// The low half is undefined per the ABI.
stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat));
}
inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) {
if (a.is_float_register())
// V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2
fmov(FloatRegisterImpl::D, s, a.as_double_register() );
else
stf(FloatRegisterImpl::D, s, a.as_address());
}
inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
if (a.is_register())
mov(s, a.as_register());
else
stx(s, a.as_address());
}
#endif
inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); }
inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); }
inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); }
inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); }
inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); }
inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); }
inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); }
inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
// returns if membar generates anything, obviously this code should mirror
// membar below.
inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
if( !os::is_MP() ) return false; // Not needed on single CPU
if( VM_Version::v9_instructions_work() ) {
const Membar_mask_bits effective_mask =
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
return (effective_mask != 0);
} else {
return true;
}
}
inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
// Uniprocessors do not need memory barriers
if (!os::is_MP()) return;
// Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
// 8.4.4.3, a.31 and a.50.
if( VM_Version::v9_instructions_work() ) {
// Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
// of the mmask subfield of const7a that does anything that isn't done
// implicitly is StoreLoad.
const Membar_mask_bits effective_mask =
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
if ( effective_mask != 0 ) {
Assembler::membar( effective_mask );
}
} else {
// stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
// do not issue the stbar because to my knowledge all v8 machines implement TSO,
// which guarantees that all stores behave as if an stbar were issued just after
// each one of them. On these machines, stbar ought to be a nop. There doesn't
// appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
// it can't be specified by stbar, nor have I come up with a way to simulate it.
//
// Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
// space. Put one here to be on the safe side.
Assembler::ldstub(SP, 0, G0);
}
}
#endif // CPU_SPARC_VM_ASSEMBLER_SPARC_INLINE_HPP

View File

@ -31,9 +31,4 @@ private:
public:
void flush_bundle(bool start_new_bundle) {}
// Heuristic for pre-packing the pt/pn bit of a predicted branch.
bool is_backward_branch(Label& L) {
return L.is_bound() && insts_end() <= locator_address(L.loc());
}
#endif // CPU_SPARC_VM_CODEBUFFER_SPARC_HPP

View File

@ -204,25 +204,6 @@
intptr_t* out_register_addr(Register reg) const {
return younger_sp_addr_at(reg->after_save()->sp_offset_in_saved_window());
}
intptr_t* memory_param_addr(int param_ix, bool is_in) const {
int offset = callee_register_argument_save_area_sp_offset + param_ix;
if (is_in)
return fp_addr_at(offset);
else
return sp_addr_at(offset);
}
intptr_t* param_addr(int param_ix, bool is_in) const {
if (param_ix >= callee_register_argument_save_area_words)
return memory_param_addr(param_ix, is_in);
else if (is_in)
return register_addr(Argument(param_ix, true).as_register());
else {
// the registers are stored in the next younger frame
// %%% is this really necessary?
ShouldNotReachHere();
return NULL;
}
}
// Interpreter frames
@ -269,12 +250,8 @@
#ifndef CC_INTERP
// where Lmonitors is saved:
BasicObjectLock** interpreter_frame_monitors_addr() const {
return (BasicObjectLock**) sp_addr_at(Lmonitors->sp_offset_in_saved_window());
}
intptr_t** interpreter_frame_esp_addr() const {
return (intptr_t**)sp_addr_at(Lesp->sp_offset_in_saved_window());
}
inline BasicObjectLock** interpreter_frame_monitors_addr() const;
inline intptr_t** interpreter_frame_esp_addr() const;
inline void interpreter_frame_set_tos_address(intptr_t* x);

View File

@ -25,6 +25,8 @@
#ifndef CPU_SPARC_VM_FRAME_SPARC_INLINE_HPP
#define CPU_SPARC_VM_FRAME_SPARC_INLINE_HPP
#include "asm/macroAssembler.hpp"
// Inline functions for SPARC frames:
// Constructors
@ -185,6 +187,13 @@ inline intptr_t* frame::interpreter_frame_tos_address() const {
return *interpreter_frame_esp_addr() + 1;
}
inline BasicObjectLock** frame::interpreter_frame_monitors_addr() const {
return (BasicObjectLock**) sp_addr_at(Lmonitors->sp_offset_in_saved_window());
}
inline intptr_t** frame::interpreter_frame_esp_addr() const {
return (intptr_t**)sp_addr_at(Lesp->sp_offset_in_saved_window());
}
inline void frame::interpreter_frame_set_tos_address( intptr_t* x ) {
*interpreter_frame_esp_addr() = x - 1;
}

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "runtime/icache.hpp"
#define __ _masm->

View File

@ -25,7 +25,7 @@
#ifndef CPU_SPARC_VM_INTERP_MASM_SPARC_HPP
#define CPU_SPARC_VM_INTERP_MASM_SPARC_HPP
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/invocationCounter.hpp"
// This file specializes the assember with interpreter-specific macros

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,765 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
#define CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/codeBuffer.hpp"
#include "code/codeCache.hpp"
inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
inline int AddressLiteral::low10() const {
return Assembler::low10(value());
}
inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
jint& stub_inst = *(jint*) branch;
stub_inst = patched_branch(target - branch, stub_inst, 0);
}
#ifndef PRODUCT
inline void MacroAssembler::pd_print_patched_instruction(address branch) {
jint stub_inst = *(jint*) branch;
print_instruction(stub_inst);
::tty->print("%s", " (unresolved)");
}
#endif // PRODUCT
// Use the right loads/stores for the platform
inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d);
#else
ld( s1, s2, d);
#endif
}
inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, simm13a, d);
#else
ld( s1, simm13a, d);
#endif
}
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
ld_ptr(s1, in_bytes(simm13a), d);
}
#endif
inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
#ifdef _LP64
ldx(s1, s2, d);
#else
ld( s1, s2, d);
#endif
}
inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
#ifdef _LP64
ldx(a, d, offset);
#else
ld( a, d, offset);
#endif
}
inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2);
#else
st( d, s1, s2);
#endif
}
inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
#ifdef _LP64
Assembler::stx(d, s1, simm13a);
#else
st( d, s1, simm13a);
#endif
}
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
st_ptr(d, s1, in_bytes(simm13a));
}
#endif
inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
#ifdef _LP64
stx(d, s1, s2);
#else
st( d, s1, s2);
#endif
}
inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
#ifdef _LP64
stx(d, a, offset);
#else
st( d, a, offset);
#endif
}
// Use the right loads/stores for the platform
inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, s2, d);
#else
Assembler::ldd(s1, s2, d);
#endif
}
inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
#ifdef _LP64
Assembler::ldx(s1, simm13a, d);
#else
Assembler::ldd(s1, simm13a, d);
#endif
}
inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
#ifdef _LP64
ldx(s1, s2, d);
#else
ldd(s1, s2, d);
#endif
}
inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
#ifdef _LP64
ldx(a, d, offset);
#else
ldd(a, d, offset);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
#ifdef _LP64
Assembler::stx(d, s1, s2);
#else
Assembler::std(d, s1, s2);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
#ifdef _LP64
Assembler::stx(d, s1, simm13a);
#else
Assembler::std(d, s1, simm13a);
#endif
}
inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
#ifdef _LP64
stx(d, s1, s2);
#else
std(d, s1, s2);
#endif
}
inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
#ifdef _LP64
stx(d, a, offset);
#else
std(d, a, offset);
#endif
}
// Functions for isolating 64 bit shifts for LP64
inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::sllx(s1, s2, d);
#else
Assembler::sll( s1, s2, d);
#endif
}
inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64
Assembler::sllx(s1, imm6a, d);
#else
Assembler::sll( s1, imm6a, d);
#endif
}
inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
#ifdef _LP64
Assembler::srlx(s1, s2, d);
#else
Assembler::srl( s1, s2, d);
#endif
}
inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
#ifdef _LP64
Assembler::srlx(s1, imm6a, d);
#else
Assembler::srl( s1, imm6a, d);
#endif
}
inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
if (s2.is_register()) sll_ptr(s1, s2.as_register(), d);
else sll_ptr(s1, s2.as_constant(), d);
}
// Use the right branch for the platform
inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
Assembler::bp(c, a, icc, p, d, rt);
else
Assembler::br(c, a, d, rt);
}
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
br(c, a, p, target(L));
}
// Branch that tests either xcc or icc depending on the
// architecture compiled (LP64 or not)
inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
#ifdef _LP64
Assembler::bp(c, a, xcc, p, d, rt);
#else
MacroAssembler::br(c, a, p, d, rt);
#endif
}
inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
brx(c, a, p, target(L));
}
inline void MacroAssembler::ba( Label& L ) {
br(always, false, pt, L);
}
// Warning: V9 only functions
inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
Assembler::bp(c, a, cc, p, d, rt);
}
inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
Assembler::bp(c, a, cc, p, L);
}
inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
fbp(c, a, fcc0, p, d, rt);
else
Assembler::fb(c, a, d, rt);
}
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
fb(c, a, p, target(L));
}
inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
Assembler::fbp(c, a, cc, p, d, rt);
}
inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
Assembler::fbp(c, a, cc, p, L);
}
inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
inline bool MacroAssembler::is_far_target(address d) {
if (ForceUnreachable) {
// References outside the code cache should be treated as far
return d < CodeCache::low_bound() || d > CodeCache::high_bound();
}
return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
}
// Call with a check to see if we need to deal with the added
// expense of relocation and if we overflow the displacement
// of the quick call instruction.
inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
#ifdef _LP64
intptr_t disp;
// NULL is ok because it will be relocated later.
// Must change NULL to a reachable address in order to
// pass asserts here and in wdisp.
if ( d == NULL )
d = pc();
// Is this address within range of the call instruction?
// If not, use the expensive instruction sequence
if (is_far_target(d)) {
relocate(rt);
AddressLiteral dest(d);
jumpl_to(dest, O7, O7);
} else {
Assembler::call(d, rt);
}
#else
Assembler::call( d, rt );
#endif
}
inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
MacroAssembler::call( target(L), rt);
}
inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
// prefetch instruction
inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work())
Assembler::bp( never, true, xcc, pt, d, rt );
}
inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
// clobbers o7 on V8!!
// returns delta from gotten pc to addr after
inline int MacroAssembler::get_pc( Register d ) {
int x = offset();
if (VM_Version::v9_instructions_work())
rdpc(d);
else {
Label lbl;
Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
if (d == O7) delayed()->nop();
else delayed()->mov(O7, d);
bind(lbl);
}
return offset() - x;
}
// Note: All MacroAssembler::set_foo functions are defined out-of-line.
// Loads the current PC of the following instruction as an immediate value in
// 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
#ifdef _LP64
Unimplemented();
#else
Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
add(reg, thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
#endif
return thepc;
}
inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, d);
} else {
sethi(addrlit, d);
}
ld(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, d);
} else {
sethi(addrlit, d);
}
ldub(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, d);
} else {
sethi(addrlit, d);
}
ld_ptr(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, temp);
} else {
sethi(addrlit, temp);
}
st(s, temp, addrlit.low10() + offset);
}
inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, temp);
} else {
sethi(addrlit, temp);
}
st_ptr(s, temp, addrlit.low10() + offset);
}
// This code sequence is relocatable to any address, even on LP64.
inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) {
assert_not_delayed();
// Force fixed length sethi because NativeJump and NativeFarCall don't handle
// variable length instruction streams.
patchable_sethi(addrlit, temp);
jmpl(temp, addrlit.low10() + offset, d);
}
inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) {
jumpl_to(addrlit, temp, G0, offset);
}
inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
int ld_offset, int jmp_offset) {
assert_not_delayed();
//sethi(al); // sethi is caller responsibility for this one
ld_ptr(a, temp, ld_offset);
jmp(temp, jmp_offset);
}
inline void MacroAssembler::set_metadata(Metadata* obj, Register d) {
set_metadata(allocate_metadata_address(obj), d);
}
inline void MacroAssembler::set_metadata_constant(Metadata* obj, Register d) {
set_metadata(constant_metadata_address(obj), d);
}
inline void MacroAssembler::set_metadata(const AddressLiteral& obj_addr, Register d) {
assert(obj_addr.rspec().type() == relocInfo::metadata_type, "must be a metadata reloc");
set(obj_addr, d);
}
inline void MacroAssembler::set_oop(jobject obj, Register d) {
set_oop(allocate_oop_address(obj), d);
}
inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
set_oop(constant_oop_address(obj), d);
}
inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) {
assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
set(obj_addr, d);
}
inline void MacroAssembler::load_argument( Argument& a, Register d ) {
if (a.is_register())
mov(a.as_register(), d);
else
ld (a.as_address(), d);
}
inline void MacroAssembler::store_argument( Register s, Argument& a ) {
if (a.is_register())
mov(s, a.as_register());
else
st_ptr (s, a.as_address()); // ABI says everything is right justified.
}
inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
if (a.is_register())
mov(s, a.as_register());
else
st_ptr (s, a.as_address());
}
#ifdef _LP64
inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
if (a.is_float_register())
// V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
fmov(FloatRegisterImpl::S, s, a.as_float_register() );
else
// Floats are stored in the high half of the stack entry
// The low half is undefined per the ABI.
stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat));
}
inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) {
if (a.is_float_register())
// V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2
fmov(FloatRegisterImpl::D, s, a.as_double_register() );
else
stf(FloatRegisterImpl::D, s, a.as_address());
}
inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
if (a.is_register())
mov(s, a.as_register());
else
stx(s, a.as_address());
}
#endif
inline void MacroAssembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype) {
relocate(rtype);
add(s1, simm13a, d);
}
inline void MacroAssembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec) {
relocate(rspec);
add(s1, simm13a, d);
}
// form effective addresses this way:
inline void MacroAssembler::add(const Address& a, Register d, int offset) {
if (a.has_index()) add(a.base(), a.index(), d);
else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; }
if (offset != 0) add(d, offset, d);
}
inline void MacroAssembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) add(s1, s2.as_register(), d);
else { add(s1, s2.as_constant() + offset, d); offset = 0; }
if (offset != 0) add(d, offset, d);
}
inline void MacroAssembler::andn(Register s1, RegisterOrConstant s2, Register d) {
if (s2.is_register()) andn(s1, s2.as_register(), d);
else andn(s1, s2.as_constant(), d);
}
inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); }
inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); }
inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); }
inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); }
inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); }
inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); }
inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); }
inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
#ifdef _LP64
// Make all 32 bit loads signed so 64 bit registers maintain proper sign
inline void MacroAssembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
#else
inline void MacroAssembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
#endif
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
# ifdef _LP64
inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
# else
inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
# endif
#endif
inline void MacroAssembler::ld( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); }
else { ld( a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::ldsb(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); }
else { ldsb(a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::ldsh(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); }
else { ldsh(a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::ldsw(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); }
else { ldsw(a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::ldub(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); }
else { ldub(a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::lduh(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); }
else { lduh(a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::lduw(const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); }
else { lduw(a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::ldd( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); }
else { ldd( a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::ldx( const Address& a, Register d, int offset) {
if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); }
else { ldx( a.base(), a.disp() + offset, d); }
}
inline void MacroAssembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
inline void MacroAssembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
inline void MacroAssembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
inline void MacroAssembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
inline void MacroAssembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
inline void MacroAssembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
inline void MacroAssembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
inline void MacroAssembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); }
inline void MacroAssembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
else ldf(w, s1, s2.as_constant(), d);
}
inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) {
relocate(a.rspec(offset));
ldf(w, a.base(), a.disp() + offset, d);
}
// returns if membar generates anything, obviously this code should mirror
// membar below.
inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
if( !os::is_MP() ) return false; // Not needed on single CPU
if( VM_Version::v9_instructions_work() ) {
const Membar_mask_bits effective_mask =
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
return (effective_mask != 0);
} else {
return true;
}
}
inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
// Uniprocessors do not need memory barriers
if (!os::is_MP()) return;
// Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
// 8.4.4.3, a.31 and a.50.
if( VM_Version::v9_instructions_work() ) {
// Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
// of the mmask subfield of const7a that does anything that isn't done
// implicitly is StoreLoad.
const Membar_mask_bits effective_mask =
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
if ( effective_mask != 0 ) {
Assembler::membar( effective_mask );
}
} else {
// stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
// do not issue the stbar because to my knowledge all v8 machines implement TSO,
// which guarantees that all stores behave as if an stbar were issued just after
// each one of them. On these machines, stbar ought to be a nop. There doesn't
// appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
// it can't be specified by stbar, nor have I come up with a way to simulate it.
//
// Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
// space. Put one here to be on the safe side.
Assembler::ldstub(SP, 0, G0);
}
}
inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) {
relocate(a.rspec(offset));
assert(!a.has_index(), "");
prefetch(a.base(), a.disp() + offset, f);
}
inline void MacroAssembler::st(Register d, Register s1, Register s2) { stw(d, s1, s2); }
inline void MacroAssembler::st(Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
#ifdef ASSERT
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
inline void MacroAssembler::st(Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
#endif
inline void MacroAssembler::st(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); }
else { st( d, a.base(), a.disp() + offset); }
}
inline void MacroAssembler::stb(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); }
else { stb(d, a.base(), a.disp() + offset); }
}
inline void MacroAssembler::sth(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); }
else { sth(d, a.base(), a.disp() + offset); }
}
inline void MacroAssembler::stw(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); }
else { stw(d, a.base(), a.disp() + offset); }
}
inline void MacroAssembler::std(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); }
else { std(d, a.base(), a.disp() + offset); }
}
inline void MacroAssembler::stx(Register d, const Address& a, int offset) {
if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); }
else { stx(d, a.base(), a.disp() + offset); }
}
inline void MacroAssembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
inline void MacroAssembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
inline void MacroAssembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
inline void MacroAssembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
inline void MacroAssembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
inline void MacroAssembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
if (s2.is_register()) stf(w, d, s1, s2.as_register());
else stf(w, d, s1, s2.as_constant());
}
inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) {
relocate(a.rspec(offset));
if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); }
else { stf(w, d, a.base(), a.disp() + offset); }
}
inline void MacroAssembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) sub(s1, s2.as_register(), d);
else { sub(s1, s2.as_constant() + offset, d); offset = 0; }
if (offset != 0) sub(d, offset, d);
}
inline void MacroAssembler::swap(Address& a, Register d, int offset) {
relocate(a.rspec(offset));
if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d ); }
else { swap(a.base(), a.disp() + offset, d); }
}
#endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP

View File

@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "prims/methodHandles.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/oop.inline.hpp"

View File

@ -25,7 +25,7 @@
#ifndef CPU_SPARC_VM_NATIVEINST_SPARC_HPP
#define CPU_SPARC_VM_NATIVEINST_SPARC_HPP
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "runtime/icache.hpp"
#include "runtime/os.hpp"
@ -194,11 +194,10 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
static int inv_simm( int x, int nbits ) { return Assembler::inv_simm(x, nbits); }
static intptr_t inv_wdisp( int x, int nbits ) { return Assembler::inv_wdisp( x, 0, nbits); }
static intptr_t inv_wdisp16( int x ) { return Assembler::inv_wdisp16(x, 0); }
static int branch_destination_offset(int x) { return Assembler::branch_destination(x, 0); }
static int branch_destination_offset(int x) { return MacroAssembler::branch_destination(x, 0); }
static int patch_branch_destination_offset(int dest_offset, int x) {
return Assembler::patched_branch(dest_offset, x, 0);
return MacroAssembler::patched_branch(dest_offset, x, 0);
}
void set_annul_bit() { set_long_at(0, long_at(0) | Assembler::annul(true)); }
// utility for checking if x is either of 2 small constants
static bool is_either(int x, int k1, int k2) {
@ -889,7 +888,6 @@ class NativeGeneralJump: public NativeInstruction {
int patched_instr = patch_branch_destination_offset(dest - addr_at(0), long_at(0));
set_long_at(0, patched_instr);
}
void set_annul() { set_annul_bit(); }
NativeInstruction *delay_slot_instr() { return nativeInstruction_at(addr_at(4));}
void fill_delay_slot(int instr) { set_long_at(4, instr);}
Assembler::Condition condition() {

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/assembler.hpp"
#include "code/relocInfo.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/oop.inline.hpp"

View File

@ -24,8 +24,7 @@
#include "precompiled.hpp"
#ifdef COMPILER2
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/instanceOop.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
#include "runtime/stubCodeGenerator.hpp"

View File

@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "code/vmreg.hpp"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_sparc.hpp"
#include "memory/resourceArea.hpp"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -28,48 +28,6 @@
#include "asm/assembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "code/codeCache.hpp"
#include "runtime/handles.inline.hpp"
inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
unsigned char op = branch[0];
assert(op == 0xE8 /* call */ ||
op == 0xE9 /* jmp */ ||
op == 0xEB /* short jmp */ ||
(op & 0xF0) == 0x70 /* short jcc */ ||
op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
"Invalid opcode at patch point");
if (op == 0xEB || (op & 0xF0) == 0x70) {
// short offset operators (jmp and jcc)
char* disp = (char*) &branch[1];
int imm8 = target - (address) &disp[1];
guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
*disp = imm8;
} else {
int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
int imm32 = target - (address) &disp[1];
*disp = imm32;
}
}
#ifndef PRODUCT
inline void MacroAssembler::pd_print_patched_instruction(address branch) {
const char* s;
unsigned char op = branch[0];
if (op == 0xE8) {
s = "call";
} else if (op == 0xE9 || op == 0xEB) {
s = "jmp";
} else if ((op & 0xF0) == 0x70) {
s = "jcc";
} else if (op == 0x0F) {
s = "jcc";
} else {
s = "????";
}
tty->print("%s (unresolved)", s);
}
#endif // ndef PRODUCT
#ifndef _LP64
inline int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { return reg_enc; }
@ -87,12 +45,6 @@ inline void Assembler::prefixq(Address adr, Register reg) {}
inline void Assembler::prefix(Address adr, XMMRegister reg) {}
inline void Assembler::prefixq(Address adr, XMMRegister reg) {}
#else
inline void Assembler::emit_long64(jlong x) {
*(jlong*) _code_pos = x;
_code_pos += sizeof(jlong);
code_section()->set_end(_code_pos);
}
#endif // _LP64
#endif // CPU_X86_VM_ASSEMBLER_X86_INLINE_HPP

View File

@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/cppInterpreter.hpp"
#include "interpreter/interpreter.hpp"

View File

@ -25,6 +25,8 @@
#ifndef CPU_X86_VM_FRAME_X86_INLINE_HPP
#define CPU_X86_VM_FRAME_X86_INLINE_HPP
#include "code/codeCache.hpp"
// Inline functions for Intel frames:
// Constructors:

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "runtime/icache.hpp"
#define __ _masm->

View File

@ -25,8 +25,10 @@
#ifndef CPU_X86_VM_INTERP_MASM_X86_32_HPP
#define CPU_X86_VM_INTERP_MASM_X86_32_HPP
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/invocationCounter.hpp"
#include "runtime/frame.hpp"
// This file specializes the assember with interpreter-specific macros

View File

@ -25,8 +25,10 @@
#ifndef CPU_X86_VM_INTERP_MASM_X86_64_HPP
#define CPU_X86_VM_INTERP_MASM_X86_64_HPP
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/invocationCounter.hpp"
#include "runtime/frame.hpp"
// This file specializes the assember with interpreter-specific macros

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:

View File

@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_x86.hpp"
#include "oops/oop.inline.hpp"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "code/relocInfo.hpp"
#include "nativeInst_x86.hpp"
#include "oops/oop.inline.hpp"

View File

@ -24,12 +24,11 @@
#include "precompiled.hpp"
#ifdef COMPILER2
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "opto/runtime.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"

View File

@ -24,12 +24,11 @@
#include "precompiled.hpp"
#ifdef COMPILER2
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "opto/runtime.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"

View File

@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"

View File

@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
#include "runtime/stubCodeGenerator.hpp"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_x86_32.hpp"
#include "memory/resourceArea.hpp"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_x86_64.hpp"
#include "memory/resourceArea.hpp"

View File

@ -46,6 +46,12 @@ int AbstractAssembler::code_fill_byte() {
return 0;
}
#ifdef ASSERT
bool AbstractAssembler::pd_check_instruction_mark() {
ShouldNotCallThis();
}
#endif
void Assembler::pd_patch_instruction(address branch, address target) {
ShouldNotCallThis();
}
@ -80,6 +86,11 @@ void MacroAssembler::store_oop(jobject obj) {
emit_address((address) obj);
}
void MacroAssembler::store_Metadata(Metadata* md) {
code_section()->relocate(pc(), metadata_Relocation::spec_for_immediate());
emit_address((address) md);
}
static void should_not_call() {
report_should_not_call(__FILE__, __LINE__);
}

View File

@ -55,14 +55,9 @@ class MacroAssembler : public Assembler {
public:
void advance(int bytes);
void store_oop(jobject obj);
void store_Metadata(Metadata* obj);
};
#ifdef ASSERT
inline bool AbstractAssembler::pd_check_instruction_mark() {
ShouldNotCallThis();
}
#endif
address ShouldNotCallThisStub();
address ShouldNotCallThisEntry();

View File

@ -1015,11 +1015,7 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
// Helper for figuring out if frames are interpreter frames
bool CppInterpreter::contains(address pc) {
#ifdef PRODUCT
ShouldNotCallThis();
#else
return false; // make frame::print_value_on work
#endif // !PRODUCT
}
// Result handlers and convertors

View File

@ -52,11 +52,7 @@ define_pd_global(intx, StackShadowPages, 5 LP64_ONLY(+1) DEBUG_ONLY(+3));
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
#ifdef _ALLBSD_SOURCE
define_pd_global(bool, UseMembar, true);
#else
define_pd_global(bool, UseMembar, false);
#endif
// GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread

View File

@ -23,29 +23,10 @@
*/
// no precompiled headers
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "runtime/osThread.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "assembler_zero.inline.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "assembler_arm.inline.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.inline.hpp"
#endif
#include <signal.h>
void OSThread::pd_initialize() {
assert(this != NULL, "check");

View File

@ -29,6 +29,7 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_bsd.h"
#include "memory/allocation.inline.hpp"
@ -62,26 +63,6 @@
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
# include "nativeInst_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
# include "nativeInst_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "assembler_zero.inline.hpp"
# include "nativeInst_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "assembler_arm.inline.hpp"
# include "nativeInst_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.inline.hpp"
# include "nativeInst_ppc.hpp"
#endif
// put OS-includes here
# include <sys/types.h>

View File

@ -26,13 +26,13 @@
#define OS_BSD_VM_OS_BSD_INLINE_HPP
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#ifdef TARGET_OS_ARCH_bsd_x86
# include "atomic_bsd_x86.inline.hpp"
# include "orderAccess_bsd_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_bsd_zero
# include "atomic_bsd_zero.inline.hpp"
# include "orderAccess_bsd_zero.inline.hpp"
#endif

View File

@ -23,29 +23,10 @@
*/
// no precompiled headers
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "runtime/mutex.hpp"
#include "runtime/osThread.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "assembler_zero.inline.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "assembler_arm.inline.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.inline.hpp"
#endif
#include <signal.h>
void OSThread::pd_initialize() {
assert(this != NULL, "check");

View File

@ -29,6 +29,7 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_linux.h"
#include "memory/allocation.inline.hpp"
@ -62,26 +63,6 @@
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
# include "nativeInst_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
# include "nativeInst_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "assembler_zero.inline.hpp"
# include "nativeInst_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "assembler_arm.inline.hpp"
# include "nativeInst_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.inline.hpp"
# include "nativeInst_ppc.hpp"
#endif
// put OS-includes here
# include <sys/types.h>

View File

@ -26,25 +26,22 @@
#define OS_LINUX_VM_OS_LINUX_INLINE_HPP
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#ifdef TARGET_OS_ARCH_linux_x86
# include "atomic_linux_x86.inline.hpp"
# include "orderAccess_linux_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "atomic_linux_sparc.inline.hpp"
# include "orderAccess_linux_sparc.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_zero
# include "atomic_linux_zero.inline.hpp"
# include "orderAccess_linux_zero.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_arm
# include "atomic_linux_arm.inline.hpp"
# include "orderAccess_linux_arm.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_ppc
# include "atomic_linux_ppc.inline.hpp"
# include "orderAccess_linux_ppc.inline.hpp"
#endif

View File

@ -30,14 +30,8 @@
#include "runtime/osThread.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
# include <signal.h>
#include <signal.h>
// ***************************************************************
// Platform dependent initialization and cleanup

View File

@ -29,6 +29,7 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_solaris.h"
#include "memory/allocation.inline.hpp"
@ -63,14 +64,6 @@
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
# include "nativeInst_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
# include "nativeInst_sparc.hpp"
#endif
// put OS-includes here
# include <dlfcn.h>

View File

@ -26,13 +26,13 @@
#define OS_SOLARIS_VM_OS_SOLARIS_INLINE_HPP
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#ifdef TARGET_OS_ARCH_solaris_x86
# include "atomic_solaris_x86.inline.hpp"
# include "orderAccess_solaris_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_solaris_sparc
# include "atomic_solaris_sparc.inline.hpp"
# include "orderAccess_solaris_sparc.inline.hpp"
#endif

View File

@ -30,9 +30,6 @@
#include "runtime/osThread.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
void OSThread::pd_initialize() {
set_thread_handle(NULL);

View File

@ -32,6 +32,7 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm_windows.h"
#include "memory/allocation.inline.hpp"
@ -65,10 +66,6 @@
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
# include "nativeInst_x86.hpp"
#endif
#ifdef _DEBUG
#include <crtdbg.h>

View File

@ -26,9 +26,10 @@
#define OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#ifdef TARGET_OS_ARCH_windows_x86
# include "atomic_windows_x86.inline.hpp"
# include "orderAccess_windows_x86.inline.hpp"
#endif

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"

View File

@ -23,7 +23,7 @@
*/
// no precompiled headers
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
@ -33,7 +33,6 @@
#include "jvm_bsd.h"
#include "memory/allocation.inline.hpp"
#include "mutex_bsd.inline.hpp"
#include "nativeInst_x86.hpp"
#include "os_share_bsd.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"

View File

@ -23,7 +23,7 @@
*/
// no precompiled headers
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"

View File

@ -23,7 +23,7 @@
*/
// no precompiled headers
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
@ -33,7 +33,6 @@
#include "jvm_linux.h"
#include "memory/allocation.inline.hpp"
#include "mutex_linux.inline.hpp"
#include "nativeInst_x86.hpp"
#include "os_share_linux.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"

View File

@ -23,8 +23,7 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"

View File

@ -23,7 +23,7 @@
*/
// no precompiled headers
#include "assembler_sparc.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"

View File

@ -27,6 +27,7 @@
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "vm_version_x86.hpp"
// Implementation of class OrderAccess.

View File

@ -23,7 +23,7 @@
*/
// no precompiled headers
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
@ -33,7 +33,6 @@
#include "jvm_solaris.h"
#include "memory/allocation.inline.hpp"
#include "mutex_solaris.inline.hpp"
#include "nativeInst_x86.hpp"
#include "os_share_solaris.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"

View File

@ -23,8 +23,8 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/threadLocalStorage.hpp"

View File

@ -23,7 +23,7 @@
*/
// no precompiled headers
#include "assembler_x86.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"

View File

@ -13,6 +13,6 @@ Adding the -i option with also report inlining like PrintInlining.
More information about the LogCompilation output can be found at
http://wikis.sun.com/display/HotSpotInternals/LogCompilation+overview
http://wikis.sun.com/display/HotSpotInternals/PrintCompilation
http://wikis.sun.com/display/HotSpotInternals/LogCompilation+tool
https://wikis.oracle.com/display/HotSpotInternals/LogCompilation+overview
https://wikis.oracle.com/display/HotSpotInternals/PrintCompilation
https://wikis.oracle.com/display/HotSpotInternals/LogCompilation+tool

View File

@ -38,6 +38,7 @@ public class CallSite {
private String reason;
private List<CallSite> calls;
private int endNodes;
private int endLiveNodes;
private double timeStamp;
CallSite() {
@ -106,7 +107,7 @@ public class CallSite {
}
}
if (getEndNodes() > 0) {
stream.printf(" (end time: %6.4f nodes: %d)", getTimeStamp(), getEndNodes());
stream.printf(" (end time: %6.4f nodes: %d live: %d)", getTimeStamp(), getEndNodes(), getEndLiveNodes());
}
stream.println("");
if (getReceiver() != null) {
@ -195,6 +196,14 @@ public class CallSite {
return endNodes;
}
void setEndLiveNodes(int n) {
endLiveNodes = n;
}
public int getEndLiveNodes() {
return endLiveNodes;
}
void setTimeStamp(double time) {
timeStamp = time;
}

View File

@ -37,13 +37,13 @@ import org.xml.sax.helpers.*;
public class LogCompilation extends DefaultHandler implements ErrorHandler, Constants {
public static void usage(int exitcode) {
System.out.println("Usage: LogCompilation [ -v ] [ -c ] [ -s ] [ -e | -N ] file1 ...");
System.out.println("Usage: LogCompilation [ -v ] [ -c ] [ -s ] [ -e | -n ] file1 ...");
System.out.println(" -c: clean up malformed 1.5 xml");
System.out.println(" -i: print inlining decisions");
System.out.println(" -S: print compilation statistics");
System.out.println(" -s: sort events by start time");
System.out.println(" -e: sort events by elapsed time");
System.out.println(" -N: sort events by name and start");
System.out.println(" -n: sort events by name and start");
System.exit(exitcode);
}
@ -137,7 +137,11 @@ public class LogCompilation extends DefaultHandler implements ErrorHandler, Cons
v2 = Integer.valueOf(0);
}
phaseNodes.put(phase.getName(), Integer.valueOf(v2.intValue() + phase.getNodes()));
out.printf("\t%s %6.4f %d %d\n", phase.getName(), phase.getElapsedTime(), phase.getStartNodes(), phase.getNodes());
/* Print phase name, elapsed time, nodes at the start of the phase,
nodes created in the phase, live nodes at the start of the phase,
live nodes added in the phase.
*/
out.printf("\t%s %6.4f %d %d %d %d\n", phase.getName(), phase.getElapsedTime(), phase.getStartNodes(), phase.getNodes(), phase.getStartLiveNodes(), phase.getLiveNodes());
}
} else if (e instanceof MakeNotEntrantEvent) {
MakeNotEntrantEvent mne = (MakeNotEntrantEvent) e;

View File

@ -224,7 +224,6 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
throw new InternalError("can't find " + name);
}
int indent = 0;
String compile_id;
String type(String id) {
String result = types.get(id);
@ -268,12 +267,18 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
if (qname.equals("phase")) {
Phase p = new Phase(search(atts, "name"),
Double.parseDouble(search(atts, "stamp")),
Integer.parseInt(search(atts, "nodes")));
Integer.parseInt(search(atts, "nodes", "0")),
Integer.parseInt(search(atts, "live")));
phaseStack.push(p);
} else if (qname.equals("phase_done")) {
Phase p = phaseStack.pop();
p.setEndNodes(Integer.parseInt(search(atts, "nodes")));
if (! p.getId().equals(search(atts, "name"))) {
System.out.println("phase: " + p.getId());
throw new InternalError("phase name mismatch");
}
p.setEnd(Double.parseDouble(search(atts, "stamp")));
p.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
p.setEndLiveNodes(Integer.parseInt(search(atts, "live")));
compile.getPhases().add(p);
} else if (qname.equals("task")) {
compile = new Compilation(Integer.parseInt(search(atts, "compile_id", "-1")));
@ -317,13 +322,16 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
m.setName(search(atts, "name"));
m.setReturnType(type(search(atts, "return")));
m.setArguments(search(atts, "arguments", "void"));
if (search(atts, "unloaded", "0").equals("0")) {
m.setBytes(search(atts, "bytes"));
m.setIICount(search(atts, "iicount"));
m.setFlags(search(atts, "flags"));
}
methods.put(id, m);
} else if (qname.equals("call")) {
site = new CallSite(bci, method(search(atts, "method")));
site.setCount(Integer.parseInt(search(atts, "count")));
site.setCount(Integer.parseInt(search(atts, "count", "0")));
String receiver = atts.getValue("receiver");
if (receiver != null) {
site.setReceiver(type(receiver));
@ -406,6 +414,7 @@ public class LogParser extends DefaultHandler implements ErrorHandler, Constants
} else if (qname.equals("parse_done")) {
CallSite call = scopes.pop();
call.setEndNodes(Integer.parseInt(search(atts, "nodes", "1")));
call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "1")));
call.setTimeStamp(Double.parseDouble(search(atts, "stamp")));
scopes.push(call);
}

View File

@ -30,10 +30,13 @@ public class Phase extends BasicLogEvent {
private final int startNodes;
private int endNodes;
private final int startLiveNodes;
private int endLiveNodes;
Phase(String n, double s, int nodes) {
Phase(String n, double s, int nodes, int live) {
super(s, n);
startNodes = nodes;
startLiveNodes = live;
}
int getNodes() {
@ -55,6 +58,22 @@ public class Phase extends BasicLogEvent {
public int getEndNodes() {
return endNodes;
}
/* Number of live nodes added by the phase */
int getLiveNodes() {
return getEndLiveNodes() - getStartLiveNodes();
}
void setEndLiveNodes(int n) {
endLiveNodes = n;
}
public int getStartLiveNodes() {
return startLiveNodes;
}
public int getEndLiveNodes() {
return endLiveNodes;
}
@Override
public void print(PrintStream stream) {

View File

@ -212,7 +212,7 @@ int main(int argc, char *argv[])
AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._VM_file._name));
AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._HPP_file._name));
AD.addInclude(AD._CPP_file, "memory/allocation.inline.hpp");
AD.addInclude(AD._CPP_file, "asm/assembler.hpp");
AD.addInclude(AD._CPP_file, "asm/macroAssembler.inline.hpp");
AD.addInclude(AD._CPP_file, "code/vmreg.hpp");
AD.addInclude(AD._CPP_file, "gc_interface/collectedHeap.inline.hpp");
AD.addInclude(AD._CPP_file, "oops/compiledICHolder.hpp");
@ -231,17 +231,14 @@ int main(int argc, char *argv[])
AD.addInclude(AD._CPP_file, "runtime/stubRoutines.hpp");
AD.addInclude(AD._CPP_file, "utilities/growableArray.hpp");
#ifdef TARGET_ARCH_x86
AD.addInclude(AD._CPP_file, "assembler_x86.inline.hpp");
AD.addInclude(AD._CPP_file, "nativeInst_x86.hpp");
AD.addInclude(AD._CPP_file, "vmreg_x86.inline.hpp");
#endif
#ifdef TARGET_ARCH_sparc
AD.addInclude(AD._CPP_file, "assembler_sparc.inline.hpp");
AD.addInclude(AD._CPP_file, "nativeInst_sparc.hpp");
AD.addInclude(AD._CPP_file, "vmreg_sparc.inline.hpp");
#endif
#ifdef TARGET_ARCH_arm
AD.addInclude(AD._CPP_file, "assembler_arm.inline.hpp");
AD.addInclude(AD._CPP_file, "nativeInst_arm.hpp");
AD.addInclude(AD._CPP_file, "vmreg_arm.inline.hpp");
#endif

View File

@ -23,26 +23,13 @@
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/icache.hpp"
#include "runtime/os.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "assembler_zero.inline.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "assembler_arm.inline.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.inline.hpp"
#endif
// Implementation of AbstractAssembler
@ -56,16 +43,13 @@ AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
if (code == NULL) return;
CodeSection* cs = code->insts();
cs->clear_mark(); // new assembler kills old mark
_code_section = cs;
_code_begin = cs->start();
_code_limit = cs->limit();
_code_pos = cs->end();
_oop_recorder= code->oop_recorder();
DEBUG_ONLY( _short_branch_delta = 0; )
if (_code_begin == NULL) {
if (cs->start() == NULL) {
vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s",
code->name()));
}
_code_section = cs;
_oop_recorder= code->oop_recorder();
DEBUG_ONLY( _short_branch_delta = 0; )
}
void AbstractAssembler::set_code_section(CodeSection* cs) {
@ -73,9 +57,6 @@ void AbstractAssembler::set_code_section(CodeSection* cs) {
assert(cs->is_allocated(), "need to pre-allocate this section");
cs->clear_mark(); // new assembly into this section kills old mark
_code_section = cs;
_code_begin = cs->start();
_code_limit = cs->limit();
_code_pos = cs->end();
}
// Inform CodeBuffer that incoming code and relocation will be for stubs
@ -83,7 +64,6 @@ address AbstractAssembler::start_a_stub(int required_space) {
CodeBuffer* cb = code();
CodeSection* cs = cb->stubs();
assert(_code_section == cb->insts(), "not in insts?");
sync();
if (cs->maybe_expand_to_ensure_remaining(required_space)
&& cb->blob() == NULL) {
return NULL;
@ -96,7 +76,6 @@ address AbstractAssembler::start_a_stub(int required_space) {
// Should not be called if start_a_stub() returned NULL
void AbstractAssembler::end_a_stub() {
assert(_code_section == code()->stubs(), "not in stubs?");
sync();
set_code_section(code()->insts());
}
@ -104,8 +83,7 @@ void AbstractAssembler::end_a_stub() {
address AbstractAssembler::start_a_const(int required_space, int required_align) {
CodeBuffer* cb = code();
CodeSection* cs = cb->consts();
assert(_code_section == cb->insts(), "not in insts?");
sync();
assert(_code_section == cb->insts() || _code_section == cb->stubs(), "not in insts/stubs?");
address end = cs->end();
int pad = -(intptr_t)end & (required_align-1);
if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) {
@ -121,16 +99,13 @@ address AbstractAssembler::start_a_const(int required_space, int required_align)
}
// Inform CodeBuffer that incoming code and relocation will be code
// Should not be called if start_a_const() returned NULL
void AbstractAssembler::end_a_const() {
// in section cs (insts or stubs).
void AbstractAssembler::end_a_const(CodeSection* cs) {
assert(_code_section == code()->consts(), "not in consts?");
sync();
set_code_section(code()->insts());
set_code_section(cs);
}
void AbstractAssembler::flush() {
sync();
ICache::invalidate_range(addr_at(0), offset());
}

View File

@ -25,12 +25,14 @@
#ifndef SHARE_VM_ASM_ASSEMBLER_HPP
#define SHARE_VM_ASM_ASSEMBLER_HPP
#include "asm/codeBuffer.hpp"
#include "code/oopRecorder.hpp"
#include "code/relocInfo.hpp"
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/top.hpp"
#ifdef TARGET_ARCH_x86
# include "register_x86.hpp"
# include "vm_version_x86.hpp"
@ -54,7 +56,6 @@
// This file contains platform-independent assembler declarations.
class CodeBuffer;
class MacroAssembler;
class AbstractAssembler;
class Label;
@ -122,7 +123,7 @@ class Label VALUE_OBJ_CLASS_SPEC {
assert(_loc == -1, "already bound");
_loc = loc;
}
void bind_loc(int pos, int sect); // = bind_loc(locator(pos, sect))
void bind_loc(int pos, int sect) { bind_loc(CodeBuffer::locator(pos, sect)); }
#ifndef PRODUCT
// Iterates over all unresolved instructions for printing
@ -137,8 +138,8 @@ class Label VALUE_OBJ_CLASS_SPEC {
assert(_loc >= 0, "unbound label");
return _loc;
}
int loc_pos() const; // == locator_pos(loc())
int loc_sect() const; // == locator_sect(loc())
int loc_pos() const { return CodeBuffer::locator_pos(loc()); }
int loc_sect() const { return CodeBuffer::locator_sect(loc()); }
bool is_bound() const { return _loc >= 0; }
bool is_unbound() const { return _loc == -1 && _patch_index > 0; }
@ -201,26 +202,32 @@ class AbstractAssembler : public ResourceObj {
protected:
CodeSection* _code_section; // section within the code buffer
address _code_begin; // first byte of code buffer
address _code_limit; // first byte after code buffer
address _code_pos; // current code generation position
OopRecorder* _oop_recorder; // support for relocInfo::oop_type
// Code emission & accessing
address addr_at(int pos) const { return _code_begin + pos; }
address addr_at(int pos) const { return code_section()->start() + pos; }
// This routine is called with a label is used for an address.
// Labels and displacements truck in offsets, but target must return a PC.
address target(Label& L); // return _code_section->target(L)
address target(Label& L) { return code_section()->target(L, pc()); }
bool is8bit(int x) const { return -0x80 <= x && x < 0x80; }
bool isByte(int x) const { return 0 <= x && x < 0x100; }
bool isShiftCount(int x) const { return 0 <= x && x < 32; }
void emit_byte(int x); // emit a single byte
void emit_word(int x); // emit a 16-bit word (not a wordSize word!)
void emit_long(jint x); // emit a 32-bit word (not a longSize word!)
void emit_address(address x); // emit an address (not a longSize word!)
void emit_int8( int8_t x) { code_section()->emit_int8( x); }
void emit_int16( int16_t x) { code_section()->emit_int16( x); }
void emit_int32( int32_t x) { code_section()->emit_int32( x); }
void emit_int64( int64_t x) { code_section()->emit_int64( x); }
void emit_float( jfloat x) { code_section()->emit_float( x); }
void emit_double( jdouble x) { code_section()->emit_double( x); }
void emit_address(address x) { code_section()->emit_address(x); }
void emit_byte(int x) { emit_int8 (x); } // deprecated
void emit_word(int x) { emit_int16(x); } // deprecated
void emit_long(jint x) { emit_int32(x); } // deprecated
// Instruction boundaries (required when emitting relocatable values).
class InstructionMark: public StackObj {
@ -237,10 +244,10 @@ class AbstractAssembler : public ResourceObj {
}
};
friend class InstructionMark;
#ifdef ASSERT
#ifdef ASSERT
// Make it return true on platforms which need to verify
// instruction boundaries for some operations.
inline static bool pd_check_instruction_mark();
static bool pd_check_instruction_mark();
// Add delta to short branch distance to verify that it still fit into imm8.
int _short_branch_delta;
@ -262,13 +269,13 @@ class AbstractAssembler : public ResourceObj {
_assm->clear_short_branch_delta();
}
};
#else
#else
// Dummy in product.
class ShortBranchVerifier: public StackObj {
public:
ShortBranchVerifier(AbstractAssembler* assm) {}
};
#endif
#endif
// Label functions
void print(Label& L);
@ -278,9 +285,6 @@ class AbstractAssembler : public ResourceObj {
// Creation
AbstractAssembler(CodeBuffer* code);
// save end pointer back to code buf.
void sync();
// ensure buf contains all code (call this before using/copying the code)
void flush();
@ -308,26 +312,31 @@ class AbstractAssembler : public ResourceObj {
static bool is_simm32(intptr_t x) { return is_simm(x, 32); }
// Accessors
CodeBuffer* code() const; // _code_section->outer()
CodeSection* code_section() const { return _code_section; }
int sect() const; // return _code_section->index()
address pc() const { return _code_pos; }
int offset() const { return _code_pos - _code_begin; }
int locator() const; // CodeBuffer::locator(offset(), sect())
CodeBuffer* code() const { return code_section()->outer(); }
int sect() const { return code_section()->index(); }
address pc() const { return code_section()->end(); }
int offset() const { return code_section()->size(); }
int locator() const { return CodeBuffer::locator(offset(), sect()); }
OopRecorder* oop_recorder() const { return _oop_recorder; }
void set_oop_recorder(OopRecorder* r) { _oop_recorder = r; }
address inst_mark() const;
void set_inst_mark();
void clear_inst_mark();
address inst_mark() const { return code_section()->mark(); }
void set_inst_mark() { code_section()->set_mark(); }
void clear_inst_mark() { code_section()->clear_mark(); }
// Constants in code
void a_byte(int x);
void a_long(jint x);
void relocate(RelocationHolder const& rspec, int format = 0);
void relocate(RelocationHolder const& rspec, int format = 0) {
assert(!pd_check_instruction_mark()
|| inst_mark() == NULL || inst_mark() == code_section()->end(),
"call relocate() between instructions");
code_section()->relocate(code_section()->end(), rspec, format);
}
void relocate( relocInfo::relocType rtype, int format = 0) {
if (rtype != relocInfo::none)
relocate(Relocation::spec_simple(rtype), format);
code_section()->relocate(code_section()->end(), rtype, format);
}
static int code_fill_byte(); // used to pad out odd-sized code buffers
@ -348,52 +357,55 @@ class AbstractAssembler : public ResourceObj {
void end_a_stub();
// Ditto for constants.
address start_a_const(int required_space, int required_align = sizeof(double));
void end_a_const();
void end_a_const(CodeSection* cs); // Pass the codesection to continue in (insts or stubs?).
// constants support
//
// We must remember the code section (insts or stubs) in c1
// so we can reset to the proper section in end_a_const().
address long_constant(jlong c) {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
*(jlong*)ptr = c;
_code_pos = ptr + sizeof(c);
end_a_const();
emit_int64(c);
end_a_const(c1);
}
return ptr;
}
address double_constant(jdouble c) {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
*(jdouble*)ptr = c;
_code_pos = ptr + sizeof(c);
end_a_const();
emit_double(c);
end_a_const(c1);
}
return ptr;
}
address float_constant(jfloat c) {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
*(jfloat*)ptr = c;
_code_pos = ptr + sizeof(c);
end_a_const();
emit_float(c);
end_a_const(c1);
}
return ptr;
}
address address_constant(address c) {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
*(address*)ptr = c;
_code_pos = ptr + sizeof(c);
end_a_const();
emit_address(c);
end_a_const(c1);
}
return ptr;
}
address address_constant(address c, RelocationHolder const& rspec) {
CodeSection* c1 = _code_section;
address ptr = start_a_const(sizeof(c), sizeof(c));
if (ptr != NULL) {
relocate(rspec);
*(address*)ptr = c;
_code_pos = ptr + sizeof(c);
end_a_const();
emit_address(c);
end_a_const(c1);
}
return ptr;
}

View File

@ -26,92 +26,21 @@
#define SHARE_VM_ASM_ASSEMBLER_INLINE_HPP
#include "asm/assembler.hpp"
#include "asm/codeBuffer.hpp"
#include "compiler/disassembler.hpp"
#include "runtime/threadLocalStorage.hpp"
inline void AbstractAssembler::sync() {
CodeSection* cs = code_section();
guarantee(cs->start() == _code_begin, "must not shift code buffer");
cs->set_end(_code_pos);
}
inline void AbstractAssembler::emit_byte(int x) {
assert(isByte(x), "not a byte");
*(unsigned char*)_code_pos = (unsigned char)x;
_code_pos += sizeof(unsigned char);
sync();
}
inline void AbstractAssembler::emit_word(int x) {
*(short*)_code_pos = (short)x;
_code_pos += sizeof(short);
sync();
}
inline void AbstractAssembler::emit_long(jint x) {
*(jint*)_code_pos = x;
_code_pos += sizeof(jint);
sync();
}
inline void AbstractAssembler::emit_address(address x) {
*(address*)_code_pos = x;
_code_pos += sizeof(address);
sync();
}
inline address AbstractAssembler::inst_mark() const {
return code_section()->mark();
}
inline void AbstractAssembler::set_inst_mark() {
code_section()->set_mark();
}
inline void AbstractAssembler::clear_inst_mark() {
code_section()->clear_mark();
}
inline void AbstractAssembler::relocate(RelocationHolder const& rspec, int format) {
assert(!pd_check_instruction_mark()
|| inst_mark() == NULL || inst_mark() == _code_pos,
"call relocate() between instructions");
code_section()->relocate(_code_pos, rspec, format);
}
inline CodeBuffer* AbstractAssembler::code() const {
return code_section()->outer();
}
inline int AbstractAssembler::sect() const {
return code_section()->index();
}
inline int AbstractAssembler::locator() const {
return CodeBuffer::locator(offset(), sect());
}
inline address AbstractAssembler::target(Label& L) {
return code_section()->target(L, pc());
}
inline int Label::loc_pos() const {
return CodeBuffer::locator_pos(loc());
}
inline int Label::loc_sect() const {
return CodeBuffer::locator_sect(loc());
}
inline void Label::bind_loc(int pos, int sect) {
bind_loc(CodeBuffer::locator(pos, sect));
}
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "assembler_zero.inline.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "assembler_arm.inline.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.inline.hpp"
#endif
#endif // SHARE_VM_ASM_ASSEMBLER_INLINE_HPP

View File

@ -254,6 +254,10 @@ address CodeBuffer::locator_address(int locator) const {
return start + locator_pos(locator);
}
bool CodeBuffer::is_backward_branch(Label& L) {
return L.is_bound() && insts_end() <= locator_address(L.loc());
}
address CodeBuffer::decode_begin() {
address begin = _insts.start();
if (_decode_begin != NULL && _decode_begin > begin)
@ -758,7 +762,18 @@ void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
// Make the new code copy use the old copy's relocations:
dest_cs->initialize_locs_from(cs);
}
// Do relocation after all sections are copied.
// This is necessary if the code uses constants in stubs, which are
// relocated when the corresponding instruction in the code (e.g., a
// call) is relocated. Stubs are placed behind the main code
// section, so that section has to be copied before relocating.
for (int n = (int) SECT_FIRST; n < (int)SECT_LIMIT; n++) {
// pull code out of each section
const CodeSection* cs = code_section(n);
if (cs->is_empty()) continue; // skip trivial section
CodeSection* dest_cs = dest->code_section(n);
{ // Repair the pc relative information in the code after the move
RelocIterator iter(dest_cs);
while (iter.next()) {

Some files were not shown because too many files have changed in this diff Show More