This commit is contained in:
Alejandro Murillo 2014-02-21 08:09:15 -08:00
commit fe929c4107
414 changed files with 59506 additions and 4144 deletions

View File

@ -80,7 +80,7 @@ combination of ptrace and /proc calls.
*************************************************************************************/
#if defined(sparc) || defined(sparcv9)
#if defined(sparc) || defined(sparcv9) || defined(ppc64)
#define user_regs_struct pt_regs
#endif

View File

@ -29,54 +29,51 @@
#include <thread_db.h>
#include "libproc_impl.h"
static const char* alt_root = NULL;
static int alt_root_len = -1;
#define SA_ALTROOT "SA_ALTROOT"
static void init_alt_root() {
if (alt_root_len == -1) {
alt_root = getenv(SA_ALTROOT);
if (alt_root) {
alt_root_len = strlen(alt_root);
} else {
alt_root_len = 0;
}
}
}
int pathmap_open(const char* name) {
int fd;
char alt_path[PATH_MAX + 1];
static const char *alt_root = NULL;
static int alt_root_initialized = 0;
init_alt_root();
int fd;
char alt_path[PATH_MAX + 1], *alt_path_end;
const char *s;
if (alt_root_len > 0) {
strcpy(alt_path, alt_root);
strcat(alt_path, name);
fd = open(alt_path, O_RDONLY);
if (fd >= 0) {
print_debug("path %s substituted for %s\n", alt_path, name);
return fd;
}
if (!alt_root_initialized) {
alt_root_initialized = -1;
alt_root = getenv(SA_ALTROOT);
}
if (strrchr(name, '/')) {
strcpy(alt_path, alt_root);
strcat(alt_path, strrchr(name, '/'));
fd = open(alt_path, O_RDONLY);
if (fd >= 0) {
print_debug("path %s substituted for %s\n", alt_path, name);
return fd;
}
}
} else {
fd = open(name, O_RDONLY);
if (fd >= 0) {
return fd;
}
}
if (alt_root == NULL) {
return open(name, O_RDONLY);
}
return -1;
strcpy(alt_path, alt_root);
alt_path_end = alt_path + strlen(alt_path);
// Strip path items one by one and try to open file with alt_root prepended
s = name;
while (1) {
strcat(alt_path, s);
s += 1;
fd = open(alt_path, O_RDONLY);
if (fd >= 0) {
print_debug("path %s substituted for %s\n", alt_path, name);
return fd;
}
// Linker always put full path to solib to process, so we can rely
// on presence of /. If slash is not present, it means, that SOlib doesn't
// physically exist (e.g. linux-gate.so) and we fail opening it anyway
if ((s = strchr(s, '/')) == NULL) {
break;
}
*alt_path_end = 0;
}
return -1;
}
static bool _libsaproc_debug;

View File

@ -55,31 +55,21 @@ class LinuxCDebugger implements CDebugger {
if (pc == null) {
return null;
}
List objs = getLoadObjectList();
Object[] arr = objs.toArray();
// load objects are sorted by base address, do binary search
int mid = -1;
int low = 0;
int high = arr.length - 1;
while (low <= high) {
mid = (low + high) >> 1;
LoadObject midVal = (LoadObject) arr[mid];
long cmp = pc.minus(midVal.getBase());
if (cmp < 0) {
high = mid - 1;
} else if (cmp > 0) {
long size = midVal.getSize();
if (cmp >= size) {
low = mid + 1;
} else {
return (LoadObject) arr[mid];
}
} else { // match found
return (LoadObject) arr[mid];
}
/* Typically we have about ten loaded objects here. So no reason to do
sort/binary search here. Linear search gives us acceptable performance.*/
List objs = getLoadObjectList();
for (int i = 0; i < objs.size(); i++) {
LoadObject ob = (LoadObject) objs.get(i);
Address base = ob.getBase();
long size = ob.getSize();
if ( pc.greaterThanOrEqual(base) && pc.lessThan(base.addOffsetTo(size))) {
return ob;
}
}
// no match found.
return null;
}

View File

@ -0,0 +1,77 @@
/*
* @(#)AdaptiveFreeList.java
*
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.memory;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObject;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
public class AdaptiveFreeList extends VMObject {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("AdaptiveFreeList<FreeChunk>");
sizeField = type.getCIntegerField("_size");
countField = type.getCIntegerField("_count");
headerSize = type.getSize();
}
// Fields
private static CIntegerField sizeField;
private static CIntegerField countField;
private static long headerSize;
//Constructor
public AdaptiveFreeList(Address address) {
super(address);
}
// Accessors
public long size() {
return sizeField.getValue(addr);
}
public long count() {
return countField.getValue(addr);
}
public static long sizeOf() {
return headerSize;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,25 +24,29 @@
package sun.jvm.hotspot.memory;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.debugger.Debugger;
import sun.jvm.hotspot.oops.ObjectHeap;
import sun.jvm.hotspot.oops.Oop;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObjectFactory;
import sun.jvm.hotspot.types.AddressField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
import sun.jvm.hotspot.utilities.Assert;
public class CompactibleFreeListSpace extends CompactibleSpace {
private static AddressField collectorField;
// for free size, three fields
// FreeBlockDictionary* _dictionary; // ptr to dictionary for large size blocks
// FreeList _indexedFreeList[IndexSetSize]; // indexed array for small size blocks
// LinearAllocBlock _smallLinearAllocBlock; // small linear alloc in TLAB
private static AddressField indexedFreeListField;
private static AddressField dictionaryField;
private static long smallLinearAllocBlockFieldOffset;
private static long indexedFreeListSizeOf;
private int heapWordSize; // 4 for 32bit, 8 for 64 bits
private int IndexSetStart; // for small indexed list
@ -109,11 +113,11 @@ public class CompactibleFreeListSpace extends CompactibleSpace {
// small chunks
long size = 0;
Address cur = addr.addOffsetTo( indexedFreeListField.getOffset() );
cur = cur.addOffsetTo(IndexSetStart*FreeList.sizeOf());
cur = cur.addOffsetTo(IndexSetStart*AdaptiveFreeList.sizeOf());
for (int i=IndexSetStart; i<IndexSetSize; i += IndexSetStride) {
FreeList freeList = (FreeList) VMObjectFactory.newObject(FreeList.class, cur);
AdaptiveFreeList freeList = (AdaptiveFreeList) VMObjectFactory.newObject(AdaptiveFreeList.class, cur);
size += i*freeList.count();
cur= cur.addOffsetTo(IndexSetStride*FreeList.sizeOf());
cur= cur.addOffsetTo(IndexSetStride*AdaptiveFreeList.sizeOf());
}
// large block

View File

@ -1,72 +0,0 @@
/*
* @(#)FreeList.java
*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.memory;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.runtime.*;
public class FreeList extends VMObject {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("FreeList<FreeChunk>");
sizeField = type.getCIntegerField("_size");
countField = type.getCIntegerField("_count");
headerSize = type.getSize();
}
// Fields
private static CIntegerField sizeField;
private static CIntegerField countField;
private static long headerSize;
//Constructor
public FreeList(Address address) {
super(address);
}
// Accessors
public long size() {
return sizeField.getValue(addr);
}
public long count() {
return countField.getValue(addr);
}
public static long sizeOf() {
return headerSize;
}
}

View File

@ -371,19 +371,23 @@ function sym2addr(dso, sym) {
return sa.dbg.lookup(dso, sym);
}
// returns the ClosestSymbol or null
function closestSymbolFor(addr) {
if (sa.cdbg == null) {
function loadObjectContainingPC(addr) {
if (sa.cdbg == null) {
// no CDebugger support, return null
return null;
} else {
var dso = sa.cdbg.loadObjectContainingPC(addr);
if (dso != null) {
return dso.closestSymbolToPC(addr);
} else {
return null;
}
}
}
return sa.cdbg.loadObjectContainingPC(addr);
}
// returns the ClosestSymbol or null
function closestSymbolFor(addr) {
var dso = loadObjectContainingPC(addr);
if (dso != null) {
return dso.closestSymbolToPC(addr);
}
return null;
}
// Address-to-symbol
@ -804,6 +808,16 @@ delete tmp;
// VM type to SA class map
var vmType2Class = new Object();
// C2 only classes
try{
vmType2Class["ExceptionBlob"] = sapkg.code.ExceptionBlob;
vmType2Class["UncommonTrapBlob"] = sapkg.code.UncommonTrapBlob;
} catch(e) {
// Ignore exception. C2 specific objects might be not
// available in client VM
}
// This is *not* exhaustive. Add more if needed.
// code blobs
vmType2Class["BufferBlob"] = sapkg.code.BufferBlob;
@ -812,10 +826,8 @@ vmType2Class["RuntimeStub"] = sapkg.code.RuntimeStub;
vmType2Class["SafepointBlob"] = sapkg.code.SafepointBlob;
vmType2Class["C2IAdapter"] = sapkg.code.C2IAdapter;
vmType2Class["DeoptimizationBlob"] = sapkg.code.DeoptimizationBlob;
vmType2Class["ExceptionBlob"] = sapkg.code.ExceptionBlob;
vmType2Class["I2CAdapter"] = sapkg.code.I2CAdapter;
vmType2Class["OSRAdapter"] = sapkg.code.OSRAdapter;
vmType2Class["UncommonTrapBlob"] = sapkg.code.UncommonTrapBlob;
vmType2Class["PCDesc"] = sapkg.code.PCDesc;
// interpreter
@ -876,21 +888,29 @@ function isOop(addr) {
// returns description of given pointer as a String
function whatis(addr) {
addr = any2addr(addr);
var ptrLoc = findPtr(addr);
if (ptrLoc.isUnknown()) {
var vmType = vmTypeof(addr);
if (vmType != null) {
return "pointer to " + vmType.name;
} else {
var sym = closestSymbolFor(addr);
if (sym != null) {
return sym.name + '+' + sym.offset;
} else {
return ptrLoc.toString();
}
}
} else {
return ptrLoc.toString();
}
addr = any2addr(addr);
var ptrLoc = findPtr(addr);
if (!ptrLoc.isUnknown()) {
return ptrLoc.toString();
}
var vmType = vmTypeof(addr);
if (vmType != null) {
return "pointer to " + vmType.name;
}
var dso = loadObjectContainingPC(addr);
if (dso == null) {
return ptrLoc.toString();
}
var sym = dso.closestSymbolToPC(addr);
if (sym != null) {
return sym.name + '+' + sym.offset;
}
var s = dso.getName();
var p = s.lastIndexOf("/");
var base = dso.getBase();
return s.substring(p+1, s.length) + '+' + addr.minus(base);
}

View File

@ -87,6 +87,7 @@ endif
# Typical C1/C2 targets made available with this Makefile
C1_VM_TARGETS=product1 fastdebug1 optimized1 debug1
C2_VM_TARGETS=product fastdebug optimized debug
CORE_VM_TARGETS=productcore fastdebugcore optimizedcore debugcore
ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero debugzero
SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark debugshark
MINIMAL1_VM_TARGETS=productminimal1 fastdebugminimal1 debugminimal1
@ -136,6 +137,12 @@ all_fastdebugshark: fastdebugshark docs export_fastdebug
all_debugshark: debugshark docs export_debug
all_optimizedshark: optimizedshark docs export_optimized
allcore: all_productcore all_fastdebugcore
all_productcore: productcore docs export_product
all_fastdebugcore: fastdebugcore docs export_fastdebug
all_debugcore: debugcore docs export_debug
all_optimizedcore: optimizedcore docs export_optimized
# Do everything
world: all create_jdk
@ -154,6 +161,7 @@ endif
# Output directories
C1_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler1
C2_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2
CORE_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_core
MINIMAL1_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1
ZERO_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_zero
SHARK_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_shark
@ -167,6 +175,10 @@ $(C2_VM_TARGETS):
$(CD) $(GAMMADIR)/make; \
$(MAKE) BUILD_DIR=$(C2_DIR) BUILD_FLAVOR=$@ VM_TARGET=$@ generic_build2 $(ALT_OUT)
$(CORE_VM_TARGETS):
$(CD) $(GAMMADIR)/make; \
$(MAKE) BUILD_DIR=$(CORE_DIR) BUILD_FLAVOR=$(@:%core=%) VM_TARGET=$@ generic_buildcore $(ALT_OUT)
$(ZERO_VM_TARGETS):
$(CD) $(GAMMADIR)/make; \
$(MAKE) BUILD_DIR=$(ZERO_DIR) BUILD_FLAVOR=$(@:%zero=%) VM_TARGET=$@ generic_buildzero $(ALT_OUT)
@ -228,6 +240,20 @@ else
$(MAKE_ARGS) $(VM_TARGET)
endif
generic_buildcore: $(HOTSPOT_SCRIPT)
ifeq ($(HS_ARCH),ppc)
ifeq ($(ARCH_DATA_MODEL),64)
$(MKDIR) -p $(OUTPUTDIR)
$(CD) $(OUTPUTDIR); \
$(MAKE) -f $(ABS_OS_MAKEFILE) \
$(MAKE_ARGS) $(VM_TARGET)
else
@$(ECHO) "No ($(VM_TARGET)) for ppc ARCH_DATA_MODEL=$(ARCH_DATA_MODEL)"
endif
else
@$(ECHO) "No ($(VM_TARGET)) for $(HS_ARCH)"
endif
generic_buildzero: $(HOTSPOT_SCRIPT)
$(MKDIR) -p $(OUTPUTDIR)
$(CD) $(OUTPUTDIR); \
@ -287,6 +313,7 @@ XUSAGE=$(HS_SRC_DIR)/share/vm/Xusage.txt
DOCS_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_docs
C1_BUILD_DIR =$(C1_DIR)/$(BUILD_FLAVOR)
C2_BUILD_DIR =$(C2_DIR)/$(BUILD_FLAVOR)
CORE_BUILD_DIR =$(CORE_DIR)/$(BUILD_FLAVOR)
MINIMAL1_BUILD_DIR=$(MINIMAL1_DIR)/$(BUILD_FLAVOR)
ZERO_BUILD_DIR =$(ZERO_DIR)/$(BUILD_FLAVOR)
SHARK_BUILD_DIR =$(SHARK_DIR)/$(BUILD_FLAVOR)
@ -464,6 +491,28 @@ $(EXPORT_SERVER_DIR)/%.dSYM: $(ZERO_BUILD_DIR)/%.dSYM
$(install-dir)
endif
# Core
ifeq ($(JVM_VARIANT_CORE), true)
# Common
$(EXPORT_LIB_DIR)/%.jar: $(CORE_BUILD_DIR)/../generated/%.jar
$(install-file)
$(EXPORT_INCLUDE_DIR)/%: $(CORE_BUILD_DIR)/../generated/jvmtifiles/%
$(install-file)
# Unix
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(CORE_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(CORE_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(CORE_BUILD_DIR)/%.diz
$(install-file)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(CORE_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_SERVER_DIR)/%.debuginfo: $(CORE_BUILD_DIR)/%.debuginfo
$(install-file)
$(EXPORT_SERVER_DIR)/%.diz: $(CORE_BUILD_DIR)/%.diz
$(install-file)
endif
# Shark
ifeq ($(JVM_VARIANT_ZEROSHARK), true)
# Common
@ -531,6 +580,7 @@ clobber clean: clean_build clean_export clean_jdk
clean_build:
$(RM) -r $(C1_DIR)
$(RM) -r $(C2_DIR)
$(RM) -r $(CORE_DIR)
$(RM) -r $(ZERO_DIR)
$(RM) -r $(SHARK_DIR)
$(RM) -r $(MINIMAL1_DIR)

377
hotspot/make/aix/Makefile Normal file
View File

@ -0,0 +1,377 @@
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# This makefile creates a build tree and lights off a build.
# You can go back into the build tree and perform rebuilds or
# incremental builds as desired. Be sure to reestablish
# environment variable settings for LD_LIBRARY_PATH and JAVA_HOME.
# The make process now relies on java and javac. These can be
# specified either implicitly on the PATH, by setting the
# (JDK-inherited) ALT_BOOTDIR environment variable to full path to a
# JDK in which bin/java and bin/javac are present and working (e.g.,
# /usr/local/java/jdk1.3/solaris), or via the (JDK-inherited)
# default BOOTDIR path value. Note that one of ALT_BOOTDIR
# or BOOTDIR has to be set. We do *not* search javac, javah, rmic etc.
# from the PATH.
#
# One can set ALT_BOOTDIR or BOOTDIR to point to a jdk that runs on
# an architecture that differs from the target architecture, as long
# as the bootstrap jdk runs under the same flavor of OS as the target
# (i.e., if the target is linux, point to a jdk that runs on a linux
# box). In order to use such a bootstrap jdk, set the make variable
# REMOTE to the desired remote command mechanism, e.g.,
#
# make REMOTE="rsh -l me myotherlinuxbox"
# Along with VM, Serviceability Agent (SA) is built for SA/JDI binding.
# JDI binding on SA produces two binaries:
# 1. sa-jdi.jar - This is built before building libjvm.so
# Please refer to ./makefiles/sa.make
# 2. libsa.so - Native library for SA - This is built after
# libjsig.so (signal interposition library)
# Please refer to ./makefiles/vm.make
# If $(GAMMADIR)/agent dir is not present, SA components are not built.
# No tests on Aix.
TEST_IN_BUILD=false
ifeq ($(GAMMADIR),)
include ../../make/defs.make
else
include $(GAMMADIR)/make/defs.make
endif
include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make
ifndef CC_INTERP
ifndef FORCE_TIERED
FORCE_TIERED=1
endif
endif
ifdef LP64
ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
_JUNK_ := $(shell echo >&2 \
$(OSNAME) $(ARCH) "*** ERROR: this platform does not support 64-bit compilers!")
@exit 1
endif
endif
# we need to set up LP64 correctly to satisfy sanity checks in adlc
ifneq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
MFLAGS += " LP64=1 "
endif
# pass USE_SUNCC further, through MFLAGS
ifdef USE_SUNCC
MFLAGS += " USE_SUNCC=1 "
endif
# The following renders pathnames in generated Makefiles valid on
# machines other than the machine containing the build tree.
#
# For example, let's say my build tree lives on /files12 on
# exact.east.sun.com. This logic will cause GAMMADIR to begin with
# /net/exact/files12/...
#
# We only do this on SunOS variants, for a couple of reasons:
# * It is extremely rare that source trees exist on other systems
# * It has been claimed that the Linux automounter is flakey, so
# changing GAMMADIR in a way that exercises the automounter could
# prove to be a source of unreliability in the build process.
# Obviously, this Makefile is only relevant on SunOS boxes to begin
# with, but the SunOS conditionalization will make it easier to
# combine Makefiles in the future (assuming we ever do that).
ifeq ($(OSNAME),solaris)
# prepend current directory to relative pathnames.
NEW_GAMMADIR := \
$(shell echo $(GAMMADIR) | \
sed -e "s=^\([^/].*\)=$(shell pwd)/\1=" \
)
unexport NEW_GAMMADIR
# If NEW_GAMMADIR doesn't already start with "/net/":
ifeq ($(strip $(filter /net/%,$(NEW_GAMMADIR))),)
# prepend /net/$(HOST)
# remove /net/$(HOST) if name already began with /home/
# remove /net/$(HOST) if name already began with /java/
# remove /net/$(HOST) if name already began with /lab/
NEW_GAMMADIR := \
$(shell echo $(NEW_GAMMADIR) | \
sed -e "s=^\(.*\)=/net/$(HOST)\1=" \
-e "s=^/net/$(HOST)/home/=/home/=" \
-e "s=^/net/$(HOST)/java/=/java/=" \
-e "s=^/net/$(HOST)/lab/=/lab/=" \
)
# Don't use the new value for GAMMADIR unless a file with the new
# name actually exists.
ifneq ($(wildcard $(NEW_GAMMADIR)),)
GAMMADIR := $(NEW_GAMMADIR)
endif
endif
endif
# BUILDARCH is set to "zero" for Zero builds. VARIANTARCH
# is used to give the build directories meaningful names.
VARIANTARCH = $(subst i386,i486,$(ZERO_LIBARCH))
# There is a (semi-) regular correspondence between make targets and actions:
#
# Target Tree Type Build Dir
#
# debug compiler2 <os>_<arch>_compiler2/debug
# fastdebug compiler2 <os>_<arch>_compiler2/fastdebug
# optimized compiler2 <os>_<arch>_compiler2/optimized
# product compiler2 <os>_<arch>_compiler2/product
#
# debug1 compiler1 <os>_<arch>_compiler1/debug
# fastdebug1 compiler1 <os>_<arch>_compiler1/fastdebug
# optimized1 compiler1 <os>_<arch>_compiler1/optimized
# product1 compiler1 <os>_<arch>_compiler1/product
#
# debugcore core <os>_<arch>_core/debug
# fastdebugcore core <os>_<arch>_core/fastdebug
# optimizedcore core <os>_<arch>_core/optimized
# productcore core <os>_<arch>_core/product
#
# debugzero zero <os>_<arch>_zero/debug
# fastdebugzero zero <os>_<arch>_zero/fastdebug
# optimizedzero zero <os>_<arch>_zero/optimized
# productzero zero <os>_<arch>_zero/product
#
# debugshark shark <os>_<arch>_shark/debug
# fastdebugshark shark <os>_<arch>_shark/fastdebug
# optimizedshark shark <os>_<arch>_shark/optimized
# productshark shark <os>_<arch>_shark/product
#
# fastdebugminimal1 minimal1 <os>_<arch>_minimal1/fastdebug
# productminimal1 minimal1 <os>_<arch>_minimal1/product
#
# What you get with each target:
#
# debug* - debug compile with asserts enabled
# fastdebug* - optimized compile, but with asserts enabled
# optimized* - optimized compile, no asserts
# product* - the shippable thing: optimized compile, no asserts, -DPRODUCT
# This target list needs to be coordinated with the usage message
# in the build.sh script:
TARGETS = debug fastdebug optimized product
ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
SUBDIR_DOCS = $(OSNAME)_$(VARIANTARCH)_docs
else
SUBDIR_DOCS = $(OSNAME)_$(BUILDARCH)_docs
endif
SUBDIRS_C1 = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler1/,$(TARGETS))
SUBDIRS_C2 = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler2/,$(TARGETS))
SUBDIRS_TIERED = $(addprefix $(OSNAME)_$(BUILDARCH)_tiered/,$(TARGETS))
SUBDIRS_CORE = $(addprefix $(OSNAME)_$(BUILDARCH)_core/,$(TARGETS))
SUBDIRS_ZERO = $(addprefix $(OSNAME)_$(VARIANTARCH)_zero/,$(TARGETS))
SUBDIRS_SHARK = $(addprefix $(OSNAME)_$(VARIANTARCH)_shark/,$(TARGETS))
SUBDIRS_MINIMAL1 = $(addprefix $(OSNAME)_$(BUILDARCH)_minimal1/,$(TARGETS))
TARGETS_C2 = $(TARGETS)
TARGETS_C1 = $(addsuffix 1,$(TARGETS))
TARGETS_TIERED = $(addsuffix tiered,$(TARGETS))
TARGETS_CORE = $(addsuffix core,$(TARGETS))
TARGETS_ZERO = $(addsuffix zero,$(TARGETS))
TARGETS_SHARK = $(addsuffix shark,$(TARGETS))
TARGETS_MINIMAL1 = $(addsuffix minimal1,$(TARGETS))
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH)
BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
BUILDTREE_VARS += ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS) OBJCOPY=$(OBJCOPY) STRIP_POLICY=$(STRIP_POLICY) ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES) ZIPEXE=$(ZIPEXE)
BUILDTREE = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS)
#-------------------------------------------------------------------------------
# Could make everything by default, but that would take a while.
all:
@echo "Try '$(MAKE) <target> ...' where <target> is one or more of"
@echo " $(TARGETS_C2)"
@echo " $(TARGETS_C1)"
@echo " $(TARGETS_CORE)"
@echo " $(TARGETS_ZERO)"
@echo " $(TARGETS_SHARK)"
@echo " $(TARGETS_MINIMAL1)"
checks: check_os_version check_j2se_version
# We do not want people accidentally building on old systems (e.g. Linux 2.2.x,
# Solaris 2.5.1, 2.6).
# Disable this check by setting DISABLE_HOTSPOT_OS_VERSION_CHECK=ok.
SUPPORTED_OS_VERSION = AIX
OS_VERSION := $(shell uname -a)
EMPTY_IF_NOT_SUPPORTED = $(filter $(SUPPORTED_OS_VERSION),$(OS_VERSION))
check_os_version:
ifeq ($(DISABLE_HOTSPOT_OS_VERSION_CHECK)$(EMPTY_IF_NOT_SUPPORTED),)
$(QUIETLY) >&2 echo "*** This OS is not supported:" `uname -a`; exit 1;
endif
# jvmti.make requires XSLT (J2SE 1.4.x or newer):
XSLT_CHECK = $(REMOTE) $(RUN.JAVAP) javax.xml.transform.TransformerFactory
# If not found then fail fast.
check_j2se_version:
$(QUIETLY) $(XSLT_CHECK) > /dev/null 2>&1; \
if [ $$? -ne 0 ]; then \
$(REMOTE) $(RUN.JAVA) -version; \
echo "*** An XSLT processor (J2SE 1.4.x or newer) is required" \
"to bootstrap this build" 1>&2; \
exit 1; \
fi
$(SUBDIRS_TIERED): $(BUILDTREE_MAKE)
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=tiered
$(SUBDIRS_C2): $(BUILDTREE_MAKE)
ifeq ($(FORCE_TIERED),1)
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=tiered FORCE_TIERED=1
else
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=compiler2
endif
$(SUBDIRS_C1): $(BUILDTREE_MAKE)
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=compiler1
$(SUBDIRS_CORE): $(BUILDTREE_MAKE)
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=core
$(SUBDIRS_ZERO): $(BUILDTREE_MAKE) platform_zero
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=zero VARIANTARCH=$(VARIANTARCH)
$(SUBDIRS_SHARK): $(BUILDTREE_MAKE) platform_zero
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=shark VARIANTARCH=$(VARIANTARCH)
$(SUBDIRS_MINIMAL1): $(BUILDTREE_MAKE)
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=minimal1
platform_zero: $(GAMMADIR)/make/$(OSNAME)/platform_zero.in
$(SED) 's/@ZERO_ARCHDEF@/$(ZERO_ARCHDEF)/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@
# Define INSTALL=y at command line to automatically copy JVM into JAVA_HOME
$(TARGETS_C2): $(SUBDIRS_C2)
cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS)
ifdef INSTALL
cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS) install
endif
$(TARGETS_TIERED): $(SUBDIRS_TIERED)
cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS)
ifdef INSTALL
cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS) install
endif
$(TARGETS_C1): $(SUBDIRS_C1)
cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS)
ifdef INSTALL
cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS) install
endif
$(TARGETS_CORE): $(SUBDIRS_CORE)
cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS)
ifdef INSTALL
cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS) install
endif
$(TARGETS_ZERO): $(SUBDIRS_ZERO)
cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS)
ifdef INSTALL
cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS) install
endif
$(TARGETS_SHARK): $(SUBDIRS_SHARK)
cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && $(MAKE) $(MFLAGS)
ifdef INSTALL
cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && $(MAKE) $(MFLAGS) install
endif
$(TARGETS_MINIMAL1): $(SUBDIRS_MINIMAL1)
cd $(OSNAME)_$(BUILDARCH)_minimal1/$(patsubst %minimal1,%,$@) && $(MAKE) $(MFLAGS)
ifdef INSTALL
cd $(OSNAME)_$(BUILDARCH)_minimal1/$(patsubst %minimal1,%,$@) && $(MAKE) $(MFLAGS) install
endif
# Just build the tree, and nothing else:
tree: $(SUBDIRS_C2)
tree1: $(SUBDIRS_C1)
treecore: $(SUBDIRS_CORE)
treezero: $(SUBDIRS_ZERO)
treeshark: $(SUBDIRS_SHARK)
treeminimal1: $(SUBDIRS_MINIMAL1)
# Doc target. This is the same for all build options.
# Hence create a docs directory beside ...$(ARCH)_[...]
# We specify 'BUILD_FLAVOR=product' so that the proper
# ENABLE_FULL_DEBUG_SYMBOLS value is used.
docs: checks
$(QUIETLY) mkdir -p $(SUBDIR_DOCS)
$(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) BUILD_FLAVOR=product jvmtidocs
# Synonyms for win32-like targets.
compiler2: debug product
compiler1: debug1 product1
core: debugcore productcore
zero: debugzero productzero
shark: debugshark productshark
clean_docs:
rm -rf $(SUBDIR_DOCS)
clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark clean_minimal1:
rm -rf $(OSNAME)_$(BUILDARCH)_$(subst clean_,,$@)
clean: clean_compiler2 clean_compiler1 clean_core clean_zero clean_shark clean_minimal1 clean_docs
include $(GAMMADIR)/make/cscope.make
#-------------------------------------------------------------------------------
.PHONY: $(TARGETS_C2) $(TARGETS_C1) $(TARGETS_CORE) $(TARGETS_ZERO) $(TARGETS_SHARK) $(TARGETS_MINIMAL1)
.PHONY: tree tree1 treecore treezero treeshark
.PHONY: all compiler1 compiler2 core zero shark
.PHONY: clean clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark docs clean_docs
.PHONY: checks check_os_version check_j2se_version

View File

@ -0,0 +1,20 @@
#! /bin/sh
#
# This file is used by adlc.make to selectively update generated
# adlc files. Because source and target diretories are relative
# paths, this file is copied to the target build directory before
# use.
#
# adlc-updater <file> <source-dir> <target-dir>
#
fix_lines() {
# repair bare #line directives in $1 to refer to $2
awk < $1 > $1+ '
/^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next}
{print}
' F2=$2
mv $1+ $1
}
fix_lines $2/$1 $3/$1
[ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \
( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )

View File

@ -0,0 +1,87 @@
#! /bin/sh
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# This script is used only from top.make.
# The macro $(MFLAGS-adjusted) calls this script to
# adjust the "-j" arguments to take into account
# the HOTSPOT_BUILD_JOBS variable. The default
# handling of the "-j" argument by gnumake does
# not meet our needs, so we must adjust it ourselves.
# This argument adjustment applies to two recursive
# calls to "$(MAKE) $(MFLAGS-adjusted)" in top.make.
# One invokes adlc.make, and the other invokes vm.make.
# The adjustment propagates the desired concurrency
# level down to the sub-make (of the adlc or vm).
# The default behavior of gnumake is to run all
# sub-makes without concurrency ("-j1").
# Also, we use a make variable rather than an explicit
# "-j<N>" argument to control this setting, so that
# the concurrency setting (which must be tuned separately
# for each MP system) can be set via an environment variable.
# The recommended setting is 1.5x to 2x the number of available
# CPUs on the MP system, which is large enough to keep the CPUs
# busy (even though some jobs may be I/O bound) but not too large,
# we may presume, to overflow the system's swap space.
set -eu
default_build_jobs=4
case $# in
[12]) true;;
*) >&2 echo "Usage: $0 ${MFLAGS} ${HOTSPOT_BUILD_JOBS}"; exit 2;;
esac
MFLAGS=$1
HOTSPOT_BUILD_JOBS=${2-}
# Normalize any -jN argument to the form " -j${HBJ}"
MFLAGS=`
echo "$MFLAGS" \
| sed '
s/^-/ -/
s/ -\([^ ][^ ]*\)j/ -\1 -j/
s/ -j[0-9][0-9]*/ -j/
s/ -j\([^ ]\)/ -j -\1/
s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/
' `
case ${HOTSPOT_BUILD_JOBS} in \
'') case ${MFLAGS} in
*\ -j*)
>&2 echo "# Note: -jN is ineffective for setting parallelism in this makefile."
>&2 echo "# please set HOTSPOT_BUILD_JOBS=${default_build_jobs} in the command line or environment."
esac;;
?*) case ${MFLAGS} in
*\ -j*) true;;
*) MFLAGS="-j${HOTSPOT_BUILD_JOBS} ${MFLAGS}";;
esac;;
esac
echo "${MFLAGS}"

View File

@ -0,0 +1,231 @@
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# This makefile (adlc.make) is included from the adlc.make in the
# build directories.
# It knows how to compile, link, and run the adlc.
include $(GAMMADIR)/make/$(Platform_os_family)/makefiles/rules.make
# #########################################################################
# OUTDIR must be the same as AD_Dir = $(GENERATED)/adfiles in top.make:
GENERATED = ../generated
OUTDIR = $(GENERATED)/adfiles
ARCH = $(Platform_arch)
OS = $(Platform_os_family)
SOURCE.AD = $(OUTDIR)/$(OS)_$(Platform_arch_model).ad
ifeq ("${Platform_arch_model}", "${Platform_arch}")
SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad)
else
SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad)
endif
EXEC = $(OUTDIR)/adlc
# set VPATH so make knows where to look for source files
Src_Dirs_V += $(GAMMADIR)/src/share/vm/adlc
VPATH += $(Src_Dirs_V:%=%:)
# set INCLUDES for C preprocessor
Src_Dirs_I += $(GAMMADIR)/src/share/vm/adlc $(GENERATED)
INCLUDES += $(Src_Dirs_I:%=-I%)
# set flags for adlc compilation
CXXFLAGS = $(SYSDEFS) $(INCLUDES)
# Force assertions on.
CXXFLAGS += -DASSERT
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
# Suppress warnings (for now)
CFLAGS_WARN = -w
CFLAGS += $(CFLAGS_WARN)
OBJECTNAMES = \
adlparse.o \
archDesc.o \
arena.o \
dfa.o \
dict2.o \
filebuff.o \
forms.o \
formsopt.o \
formssel.o \
main.o \
adlc-opcodes.o \
output_c.o \
output_h.o \
OBJECTS = $(OBJECTNAMES:%=$(OUTDIR)/%)
GENERATEDNAMES = \
ad_$(Platform_arch_model).cpp \
ad_$(Platform_arch_model).hpp \
ad_$(Platform_arch_model)_clone.cpp \
ad_$(Platform_arch_model)_expand.cpp \
ad_$(Platform_arch_model)_format.cpp \
ad_$(Platform_arch_model)_gen.cpp \
ad_$(Platform_arch_model)_misc.cpp \
ad_$(Platform_arch_model)_peephole.cpp \
ad_$(Platform_arch_model)_pipeline.cpp \
adGlobals_$(Platform_arch_model).hpp \
dfa_$(Platform_arch_model).cpp \
GENERATEDFILES = $(GENERATEDNAMES:%=$(OUTDIR)/%)
# #########################################################################
all: $(EXEC)
$(EXEC) : $(OBJECTS)
@echo Making adlc
$(QUIETLY) $(HOST.LINK_NOPROF.CXX) -o $(EXEC) $(OBJECTS)
# Random dependencies:
$(OBJECTS): opcodes.hpp classes.hpp adlc.hpp adlcVMDeps.hpp adlparse.hpp archDesc.hpp arena.hpp dict2.hpp filebuff.hpp forms.hpp formsopt.hpp formssel.hpp
# The source files refer to ostream.h, which sparcworks calls iostream.h
$(OBJECTS): ostream.h
ostream.h :
@echo >$@ '#include <iostream.h>'
dump:
: OUTDIR=$(OUTDIR)
: OBJECTS=$(OBJECTS)
: products = $(GENERATEDFILES)
all: $(GENERATEDFILES)
$(GENERATEDFILES): refresh_adfiles
# Get a unique temporary directory name, so multiple makes can run in parallel.
# Note that product files are updated via "mv", which is atomic.
TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
# Debuggable by default
CFLAGS += -g
# Pass -D flags into ADLC.
ADLCFLAGS += $(SYSDEFS)
# Note "+="; it is a hook so flags.make can add more flags, like -g or -DFOO.
ADLCFLAGS += -q -T
# Normally, debugging is done directly on the ad_<arch>*.cpp files.
# But -g will put #line directives in those files pointing back to <arch>.ad.
# Some builds of gcc 3.2 have a bug that gets tickled by the extra #line directives
# so skip it for 3.2 and ealier.
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
ADLCFLAGS += -g
endif
ifdef LP64
ADLCFLAGS += -D_LP64
else
ADLCFLAGS += -U_LP64
endif
#
# adlc_updater is a simple sh script, under sccs control. It is
# used to selectively update generated adlc files. This should
# provide a nice compilation speed improvement.
#
ADLC_UPDATER_DIRECTORY = $(GAMMADIR)/make/$(OS)
ADLC_UPDATER = adlc_updater
$(ADLC_UPDATER): $(ADLC_UPDATER_DIRECTORY)/$(ADLC_UPDATER)
$(QUIETLY) cp $< $@; chmod +x $@
# This action refreshes all generated adlc files simultaneously.
# The way it works is this:
# 1) create a scratch directory to work in.
# 2) if the current working directory does not have $(ADLC_UPDATER), copy it.
# 3) run the compiled adlc executable. This will create new adlc files in the scratch directory.
# 4) call $(ADLC_UPDATER) on each generated adlc file. It will selectively update changed or missing files.
# 5) If we actually updated any files, echo a notice.
#
refresh_adfiles: $(EXEC) $(SOURCE.AD) $(ADLC_UPDATER)
@rm -rf $(TEMPDIR); mkdir $(TEMPDIR)
$(QUIETLY) $(EXEC) $(ADLCFLAGS) $(SOURCE.AD) \
-c$(TEMPDIR)/ad_$(Platform_arch_model).cpp -h$(TEMPDIR)/ad_$(Platform_arch_model).hpp -a$(TEMPDIR)/dfa_$(Platform_arch_model).cpp -v$(TEMPDIR)/adGlobals_$(Platform_arch_model).hpp \
|| { rm -rf $(TEMPDIR); exit 1; }
$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model).cpp $(TEMPDIR) $(OUTDIR)
$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model).hpp $(TEMPDIR) $(OUTDIR)
$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_clone.cpp $(TEMPDIR) $(OUTDIR)
$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_expand.cpp $(TEMPDIR) $(OUTDIR)
$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_format.cpp $(TEMPDIR) $(OUTDIR)
$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_gen.cpp $(TEMPDIR) $(OUTDIR)
$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_misc.cpp $(TEMPDIR) $(OUTDIR)
$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_peephole.cpp $(TEMPDIR) $(OUTDIR)
$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_pipeline.cpp $(TEMPDIR) $(OUTDIR)
$(QUIETLY) ./$(ADLC_UPDATER) adGlobals_$(Platform_arch_model).hpp $(TEMPDIR) $(OUTDIR)
$(QUIETLY) ./$(ADLC_UPDATER) dfa_$(Platform_arch_model).cpp $(TEMPDIR) $(OUTDIR)
$(QUIETLY) [ -f $(TEMPDIR)/made-change ] \
|| echo "Rescanned $(SOURCE.AD) but encountered no changes."
$(QUIETLY) rm -rf $(TEMPDIR)
# #########################################################################
$(SOURCE.AD): $(SOURCES.AD)
$(QUIETLY) $(PROCESS_AD_FILES) $(SOURCES.AD) > $(SOURCE.AD)
#PROCESS_AD_FILES = cat
# Pass through #line directives, in case user enables -g option above:
PROCESS_AD_FILES = awk '{ \
if (CUR_FN != FILENAME) { CUR_FN=FILENAME; NR_BASE=NR-1; need_lineno=1 } \
if (need_lineno && $$0 !~ /\/\//) \
{ print "\n\n\#line " (NR-NR_BASE) " \"" FILENAME "\""; need_lineno=0 }; \
print }'
$(OUTDIR)/%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(HOST.COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
# Some object files are given a prefix, to disambiguate
# them from objects of the same name built for the VM.
$(OUTDIR)/adlc-%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(HOST.COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
# #########################################################################
clean:
rm $(OBJECTS)
cleanall:
rm $(OBJECTS) $(EXEC)
# #########################################################################
.PHONY: all dump refresh_adfiles clean cleanall

View File

@ -0,0 +1,18 @@
#!/bin/sh
# If we're cross compiling use that path for nm
if [ "$CROSS_COMPILE_ARCH" != "" ]; then
NM=$ALT_COMPILER_PATH/nm
else
# On AIX we have to prevent that we pick up the 'nm' version from the GNU binutils
# which may be installed under /opt/freeware/bin. So better use an absolute path here!
NM=/usr/bin/nm
fi
$NM -X64 -B -C $* \
| awk '{
if (($2="d" || $2="D") && ($3 ~ /^__vft/ || $3 ~ /^gHotSpotVM/)) print "\t" $3 ";"
if ($3 ~ /^UseSharedSpaces$/) print "\t" $3 ";"
if ($3 ~ /^SharedArchivePath__9Arguments$/) print "\t" $3 ";"
}' \
| sort -u

View File

@ -0,0 +1,364 @@
#
# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Usage:
#
# $(MAKE) -f buildtree.make SRCARCH=srcarch BUILDARCH=buildarch LIBARCH=libarch
# GAMMADIR=dir OS_FAMILY=os VARIANT=variant
#
# The macros ARCH, GAMMADIR, OS_FAMILY and VARIANT must be defined in the
# environment or on the command-line:
#
# ARCH - sparc, i486, ... HotSpot cpu and os_cpu source directory
# BUILDARCH - build directory
# LIBARCH - the corresponding directory in JDK/JRE
# GAMMADIR - top of workspace
# OS_FAMILY - operating system
# VARIANT - core, compiler1, compiler2, or tiered
# HOTSPOT_RELEASE_VERSION - <major>.<minor>-b<nn> (11.0-b07)
# HOTSPOT_BUILD_VERSION - internal, internal-$(USER_RELEASE_SUFFIX) or empty
# JRE_RELEASE_VERSION - <major>.<minor>.<micro> (1.7.0)
#
# Builds the directory trees with makefiles plus some convenience files in
# each directory:
#
# Makefile - for "make foo"
# flags.make - with macro settings
# vm.make - to support making "$(MAKE) -v vm.make" in makefiles
# adlc.make -
# trace.make - generate tracing event and type definitions
# jvmti.make - generate JVMTI bindings from the spec (JSR-163)
# sa.make - generate SA jar file and natives
#
# The makefiles are split this way so that "make foo" will run faster by not
# having to read the dependency files for the vm.
-include $(SPEC)
include $(GAMMADIR)/make/scm.make
include $(GAMMADIR)/make/defs.make
include $(GAMMADIR)/make/altsrc.make
# 'gmake MAKE_VERBOSE=y' or 'gmake QUIETLY=' gives all the gory details.
QUIETLY$(MAKE_VERBOSE) = @
ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
PLATFORM_FILE = $(shell dirname $(shell dirname $(shell pwd)))/platform_zero
else
ifdef USE_SUNCC
PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH).suncc
else
PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH)
endif
endif
# Allow overriding of the arch part of the directory but default
# to BUILDARCH if nothing is specified
ifeq ($(VARIANTARCH),)
VARIANTARCH=$(BUILDARCH)
endif
ifdef FORCE_TIERED
ifeq ($(VARIANT),tiered)
PLATFORM_DIR = $(OS_FAMILY)_$(VARIANTARCH)_compiler2
else
PLATFORM_DIR = $(OS_FAMILY)_$(VARIANTARCH)_$(VARIANT)
endif
else
PLATFORM_DIR = $(OS_FAMILY)_$(VARIANTARCH)_$(VARIANT)
endif
#
# We do two levels of exclusion in the shared directory.
# TOPLEVEL excludes are pruned, they are not recursively searched,
# but lower level directories can be named without fear of collision.
# ALWAYS excludes are excluded at any level in the directory tree.
#
ALWAYS_EXCLUDE_DIRS = $(SCM_DIRS)
ifeq ($(VARIANT),tiered)
TOPLEVEL_EXCLUDE_DIRS = $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name agent
else
ifeq ($(VARIANT),compiler2)
TOPLEVEL_EXCLUDE_DIRS = $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name c1 -o -name agent
else
# compiler1 and core use the same exclude list
TOPLEVEL_EXCLUDE_DIRS = $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name opto -o -name libadt -o -name agent
endif
endif
# Get things from the platform file.
COMPILER = $(shell sed -n 's/^compiler[ ]*=[ ]*//p' $(PLATFORM_FILE))
SIMPLE_DIRS = \
$(PLATFORM_DIR)/generated/dependencies \
$(PLATFORM_DIR)/generated/adfiles \
$(PLATFORM_DIR)/generated/jvmtifiles \
$(PLATFORM_DIR)/generated/tracefiles
TARGETS = debug fastdebug optimized product
SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
# For dependencies and recursive makes.
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
# Define variables to be set in flags.make.
# Default values are set in make/defs.make.
ifeq ($(HOTSPOT_BUILD_VERSION),)
HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION)
else
HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION)
endif
# Set BUILD_USER from system-dependent hints: $LOGNAME, $(whoami)
ifndef HOTSPOT_BUILD_USER
HOTSPOT_BUILD_USER := $(shell echo $$LOGNAME)
endif
ifndef HOTSPOT_BUILD_USER
HOTSPOT_BUILD_USER := $(shell whoami)
endif
# Define HOTSPOT_VM_DISTRO based on settings in make/openjdk_distro
# or make/hotspot_distro.
ifndef HOTSPOT_VM_DISTRO
ifeq ($(call if-has-altsrc,$(HS_COMMON_SRC)/,true,false),true)
include $(GAMMADIR)/make/hotspot_distro
else
include $(GAMMADIR)/make/openjdk_distro
endif
endif
# if hotspot-only build and/or OPENJDK isn't passed down, need to set OPENJDK
ifndef OPENJDK
ifneq ($(call if-has-altsrc,$(HS_COMMON_SRC)/,true,false),true)
OPENJDK=true
endif
endif
BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION= JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
BUILDTREE = \
$(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_TARGETS) $(BUILDTREE_VARS)
BUILDTREE_COMMENT = echo "\# Generated by $(BUILDTREE_MAKE)"
all: $(SUBMAKE_DIRS)
# Run make in each subdirectory recursively.
$(SUBMAKE_DIRS): $(SIMPLE_DIRS) FORCE
$(QUIETLY) [ -d $@ ] || { mkdir -p $@; }
$(QUIETLY) cd $@ && $(BUILDTREE) TARGET=$(@F)
$(QUIETLY) touch $@
$(SIMPLE_DIRS):
$(QUIETLY) mkdir -p $@
# Convenience macro which takes a source relative path, applies $(1) to the
# absolute path, and then replaces $(GAMMADIR) in the result with a
# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
# This bit is needed to enable local rebuilds.
# Unless the makefile itself sets LP64, any environmental
# setting of LP64 will interfere with the build.
LP64_SETTING/32 = LP64 = \#empty
LP64_SETTING/64 = LP64 = 1
DATA_MODE/ppc64 = 64
DATA_MODE = $(DATA_MODE/$(BUILDARCH))
flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
@echo Creating $@ ...
$(QUIETLY) ( \
$(BUILDTREE_COMMENT); \
echo; \
echo "Platform_file = $(PLATFORM_FILE)" | sed 's|$(GAMMADIR)|$$(GAMMADIR)|'; \
sed -n '/=/s/^ */Platform_/p' < $(PLATFORM_FILE); \
echo; \
echo "GAMMADIR = $(GAMMADIR)"; \
echo "HS_ALT_MAKE = $(HS_ALT_MAKE)"; \
echo "OSNAME = $(OSNAME)"; \
echo "SYSDEFS = \$$(Platform_sysdefs)"; \
echo "SRCARCH = $(SRCARCH)"; \
echo "BUILDARCH = $(BUILDARCH)"; \
echo "LIBARCH = $(LIBARCH)"; \
echo "TARGET = $(TARGET)"; \
echo "HS_BUILD_VER = $(HS_BUILD_VER)"; \
echo "JRE_RELEASE_VER = $(JRE_RELEASE_VERSION)"; \
echo "SA_BUILD_VERSION = $(HS_BUILD_VER)"; \
echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \
echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \
echo "OPENJDK = $(OPENJDK)"; \
echo "$(LP64_SETTING/$(DATA_MODE))"; \
echo; \
echo "# Used for platform dispatching"; \
echo "TARGET_DEFINES = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \
echo "TARGET_DEFINES += -DTARGET_ARCH_\$$(Platform_arch)"; \
echo "TARGET_DEFINES += -DTARGET_ARCH_MODEL_\$$(Platform_arch_model)"; \
echo "TARGET_DEFINES += -DTARGET_OS_ARCH_\$$(Platform_os_arch)"; \
echo "TARGET_DEFINES += -DTARGET_OS_ARCH_MODEL_\$$(Platform_os_arch_model)"; \
echo "TARGET_DEFINES += -DTARGET_COMPILER_\$$(Platform_compiler)"; \
echo "CFLAGS += \$$(TARGET_DEFINES)"; \
echo; \
echo "Src_Dirs_V = \\"; \
sed 's/$$/ \\/;s|$(GAMMADIR)|$$(GAMMADIR)|' ../shared_dirs.lst; \
echo "$(call gamma-path,altsrc,cpu/$(SRCARCH)/vm) \\"; \
echo "$(call gamma-path,commonsrc,cpu/$(SRCARCH)/vm) \\"; \
echo "$(call gamma-path,altsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
echo "$(call gamma-path,commonsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
echo "$(call gamma-path,altsrc,os/$(OS_FAMILY)/vm) \\"; \
echo "$(call gamma-path,commonsrc,os/$(OS_FAMILY)/vm) \\"; \
echo "$(call gamma-path,altsrc,os/posix/vm) \\"; \
echo "$(call gamma-path,commonsrc,os/posix/vm)"; \
echo; \
echo "Src_Dirs_I = \\"; \
echo "$(call gamma-path,altsrc,share/vm/prims) \\"; \
echo "$(call gamma-path,commonsrc,share/vm/prims) \\"; \
echo "$(call gamma-path,altsrc,share/vm) \\"; \
echo "$(call gamma-path,commonsrc,share/vm) \\"; \
echo "$(call gamma-path,altsrc,share/vm/precompiled) \\"; \
echo "$(call gamma-path,commonsrc,share/vm/precompiled) \\"; \
echo "$(call gamma-path,altsrc,cpu/$(SRCARCH)/vm) \\"; \
echo "$(call gamma-path,commonsrc,cpu/$(SRCARCH)/vm) \\"; \
echo "$(call gamma-path,altsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
echo "$(call gamma-path,commonsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
echo "$(call gamma-path,altsrc,os/$(OS_FAMILY)/vm) \\"; \
echo "$(call gamma-path,commonsrc,os/$(OS_FAMILY)/vm) \\"; \
echo "$(call gamma-path,altsrc,os/posix/vm) \\"; \
echo "$(call gamma-path,commonsrc,os/posix/vm)"; \
[ -n "$(CFLAGS_BROWSE)" ] && \
echo && echo "CFLAGS_BROWSE = $(CFLAGS_BROWSE)"; \
[ -n "$(ENABLE_FULL_DEBUG_SYMBOLS)" ] && \
echo && echo "ENABLE_FULL_DEBUG_SYMBOLS = $(ENABLE_FULL_DEBUG_SYMBOLS)"; \
[ -n "$(OBJCOPY)" ] && \
echo && echo "OBJCOPY = $(OBJCOPY)"; \
[ -n "$(STRIP_POLICY)" ] && \
echo && echo "STRIP_POLICY = $(STRIP_POLICY)"; \
[ -n "$(ZIP_DEBUGINFO_FILES)" ] && \
echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
[ -n "$(ZIPEXE)" ] && \
echo && echo "ZIPEXE = $(ZIPEXE)"; \
[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
echo && \
echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \
[ -n "$(INCLUDE_TRACE)" ] && \
echo && echo "INCLUDE_TRACE = $(INCLUDE_TRACE)"; \
echo; \
[ -n "$(SPEC)" ] && \
echo "include $(SPEC)"; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \
echo "include \$$(GAMMADIR)/make/excludeSrc.make"; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \
) > $@
flags_vm.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
@echo Creating $@ ...
$(QUIETLY) ( \
$(BUILDTREE_COMMENT); \
echo; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(TARGET).make"; \
) > $@
../shared_dirs.lst: $(BUILDTREE_MAKE) $(GAMMADIR)/src/share/vm
@echo Creating directory list $@
$(QUIETLY) if [ -d $(HS_ALT_SRC)/share/vm ]; then \
find $(HS_ALT_SRC)/share/vm/* -prune \
-type d \! \( $(TOPLEVEL_EXCLUDE_DIRS) \) -exec find {} \
\( $(ALWAYS_EXCLUDE_DIRS) \) -prune -o -type d -print \; > $@; \
fi;
$(QUIETLY) find $(HS_COMMON_SRC)/share/vm/* -prune \
-type d \! \( $(TOPLEVEL_EXCLUDE_DIRS) \) -exec find {} \
\( $(ALWAYS_EXCLUDE_DIRS) \) -prune -o -type d -print \; >> $@
Makefile: $(BUILDTREE_MAKE)
@echo Creating $@ ...
$(QUIETLY) ( \
$(BUILDTREE_COMMENT); \
echo; \
echo include flags.make; \
echo; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/top.make"; \
) > $@
vm.make: $(BUILDTREE_MAKE)
@echo Creating $@ ...
$(QUIETLY) ( \
$(BUILDTREE_COMMENT); \
echo; \
echo include flags.make; \
echo include flags_vm.make; \
echo; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
) > $@
adlc.make: $(BUILDTREE_MAKE)
@echo Creating $@ ...
$(QUIETLY) ( \
$(BUILDTREE_COMMENT); \
echo; \
echo include flags.make; \
echo; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
) > $@
jvmti.make: $(BUILDTREE_MAKE)
@echo Creating $@ ...
$(QUIETLY) ( \
$(BUILDTREE_COMMENT); \
echo; \
echo include flags.make; \
echo; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
) > $@
trace.make: $(BUILDTREE_MAKE)
@echo Creating $@ ...
$(QUIETLY) ( \
$(BUILDTREE_COMMENT); \
echo; \
echo include flags.make; \
echo; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
) > $@
sa.make: $(BUILDTREE_MAKE)
@echo Creating $@ ...
$(QUIETLY) ( \
$(BUILDTREE_COMMENT); \
echo; \
echo include flags.make; \
echo; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
) > $@
FORCE:
.PHONY: all FORCE

View File

@ -0,0 +1,32 @@
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Sets make macros for making server version of VM
TYPE=COMPILER2
VM_SUBDIR = server
CFLAGS += -DCOMPILER2

View File

@ -0,0 +1,33 @@
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Sets make macros for making core version of VM
# Select which files to use (in top.make)
TYPE=CORE
# There is no "core" directory in JDK. Install core build in server directory.
VM_SUBDIR = server
# Note: macros.hpp defines CORE

View File

@ -0,0 +1,41 @@
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Sets make macros for making debug version of VM
# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make
DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS)
DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@))
CFLAGS += $(DEBUG_CFLAGS/BYFILE)
# Set the environment variable HOTSPARC_GENERIC to "true"
# to inhibit the effect of the previous line on CFLAGS.
# Linker mapfile
MAPFILE = $(GAMMADIR)/make/aix/makefiles/mapfile-vers-debug
VERSION = debug
SYSDEFS += -DASSERT -DDEBUG
PICFLAGS = DEFAULT

View File

@ -0,0 +1,231 @@
#
# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# The common definitions for hotspot AIX builds.
# Include the top level defs.make under make directory instead of this one.
# This file is included into make/defs.make.
SLASH_JAVA ?= /java
# Need PLATFORM (os-arch combo names) for jdk and hotspot, plus libarch name
#ARCH:=$(shell uname -m)
PATH_SEP = :
ifeq ($(LP64), 1)
ARCH_DATA_MODEL ?= 64
else
ARCH_DATA_MODEL ?= 32
endif
ifeq ($(ARCH_DATA_MODEL), 64)
ARCH = ppc64
else
ARCH = ppc
endif
# PPC
ifeq ($(ARCH), ppc)
#ARCH_DATA_MODEL = 32
PLATFORM = aix-ppc
VM_PLATFORM = aix_ppc
HS_ARCH = ppc
endif
# PPC64
ifeq ($(ARCH), ppc64)
#ARCH_DATA_MODEL = 64
MAKE_ARGS += LP64=1
PLATFORM = aix-ppc64
VM_PLATFORM = aix_ppc64
HS_ARCH = ppc
endif
# On 32 bit aix we build server and client, on 64 bit just server.
ifeq ($(JVM_VARIANTS),)
ifeq ($(ARCH_DATA_MODEL), 32)
JVM_VARIANTS:=client,server
JVM_VARIANT_CLIENT:=true
JVM_VARIANT_SERVER:=true
else
JVM_VARIANTS:=server
JVM_VARIANT_SERVER:=true
endif
endif
# determine if HotSpot is being built in JDK6 or earlier version
JDK6_OR_EARLIER=0
ifeq "$(shell expr \( '$(JDK_MAJOR_VERSION)' != '' \& '$(JDK_MINOR_VERSION)' != '' \& '$(JDK_MICRO_VERSION)' != '' \))" "1"
# if the longer variable names (newer build style) are set, then check those
ifeq "$(shell expr \( $(JDK_MAJOR_VERSION) = 1 \& $(JDK_MINOR_VERSION) \< 7 \))" "1"
JDK6_OR_EARLIER=1
endif
else
# the longer variables aren't set so check the shorter variable names
ifeq "$(shell expr \( '$(JDK_MAJOR_VER)' = 1 \& '$(JDK_MINOR_VER)' \< 7 \))" "1"
JDK6_OR_EARLIER=1
endif
endif
ifeq ($(JDK6_OR_EARLIER),0)
# Full Debug Symbols is supported on JDK7 or newer.
# The Full Debug Symbols (FDS) default for BUILD_FLAVOR == product
# builds is enabled with debug info files ZIP'ed to save space. For
# BUILD_FLAVOR != product builds, FDS is always enabled, after all a
# debug build without debug info isn't very useful.
# The ZIP_DEBUGINFO_FILES option only has meaning when FDS is enabled.
#
# If you invoke a build with FULL_DEBUG_SYMBOLS=0, then FDS will be
# disabled for a BUILD_FLAVOR == product build.
#
# Note: Use of a different variable name for the FDS override option
# versus the FDS enabled check is intentional (FULL_DEBUG_SYMBOLS
# versus ENABLE_FULL_DEBUG_SYMBOLS). For auto build systems that pass
# in options via environment variables, use of distinct variables
# prevents strange behaviours. For example, in a BUILD_FLAVOR !=
# product build, the FULL_DEBUG_SYMBOLS environment variable will be
# 0, but the ENABLE_FULL_DEBUG_SYMBOLS make variable will be 1. If
# the same variable name is used, then different values can be picked
# up by different parts of the build. Just to be clear, we only need
# two variable names because the incoming option value can be
# overridden in some situations, e.g., a BUILD_FLAVOR != product
# build.
# Due to the multiple sub-make processes that occur this logic gets
# executed multiple times. We reduce the noise by at least checking that
# BUILD_FLAVOR has been set.
ifneq ($(BUILD_FLAVOR),)
ifeq ($(BUILD_FLAVOR), product)
FULL_DEBUG_SYMBOLS ?= 1
ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
else
# debug variants always get Full Debug Symbols (if available)
ENABLE_FULL_DEBUG_SYMBOLS = 1
endif
_JUNK_ := $(shell \
echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
# since objcopy is optional, we set ZIP_DEBUGINFO_FILES later
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
# Default OBJCOPY comes from GNU Binutils on Linux
ifeq ($(CROSS_COMPILE_ARCH),)
DEF_OBJCOPY=/usr/bin/objcopy
else
# Assume objcopy is part of the cross-compilation toolset
ifneq ($(ALT_COMPILER_PATH),)
DEF_OBJCOPY=$(ALT_COMPILER_PATH)/objcopy
endif
endif
OBJCOPY=$(shell test -x $(DEF_OBJCOPY) && echo $(DEF_OBJCOPY))
ifneq ($(ALT_OBJCOPY),)
_JUNK_ := $(shell echo >&2 "INFO: ALT_OBJCOPY=$(ALT_OBJCOPY)")
OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
endif
ifeq ($(OBJCOPY),)
_JUNK_ := $(shell \
echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo files. You may need to set ALT_OBJCOPY.")
ENABLE_FULL_DEBUG_SYMBOLS=0
_JUNK_ := $(shell \
echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
else
_JUNK_ := $(shell \
echo >&2 "INFO: $(OBJCOPY) cmd found so will create .debuginfo files.")
# Library stripping policies for .debuginfo configs:
# all_strip - strips everything from the library
# min_strip - strips most stuff from the library; leaves minimum symbols
# no_strip - does not strip the library at all
#
# Oracle security policy requires "all_strip". A waiver was granted on
# 2011.09.01 that permits using "min_strip" in the Java JDK and Java JRE.
#
# Currently, STRIP_POLICY is only used when Full Debug Symbols is enabled.
#
STRIP_POLICY ?= min_strip
_JUNK_ := $(shell \
echo >&2 "INFO: STRIP_POLICY=$(STRIP_POLICY)")
ZIP_DEBUGINFO_FILES ?= 1
_JUNK_ := $(shell \
echo >&2 "INFO: ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES)")
endif
endif # ENABLE_FULL_DEBUG_SYMBOLS=1
endif # BUILD_FLAVOR
endif # JDK_6_OR_EARLIER
# unused JDK_INCLUDE_SUBDIR=aix
# Library suffix
LIBRARY_SUFFIX=so
EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
# client and server subdirectories have symbolic links to ../libjsig.so
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
#ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
# ifeq ($(ZIP_DEBUGINFO_FILES),1)
# EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
# else
# EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
# endif
#endif
EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK) $(JVM_VARIANT_CORE)), true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
# ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
# ifeq ($(ZIP_DEBUGINFO_FILES),1)
# EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.diz
# else
# EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.debuginfo
# endif
# endif
endif
ifeq ($(JVM_VARIANT_CLIENT),true)
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
# ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
# ifeq ($(ZIP_DEBUGINFO_FILES),1)
# EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.diz
# else
# EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
# endif
# endif
endif
# Serviceability Binaries
# No SA Support for PPC or zero
ADD_SA_BINARIES/ppc =
ADD_SA_BINARIES/ppc64 =
ADD_SA_BINARIES/zero =
EXPORT_LIST += $(ADD_SA_BINARIES/$(HS_ARCH))

View File

@ -0,0 +1,27 @@
#
# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Linux does not build jvm_db
LIBJVM_DB =

View File

@ -0,0 +1,73 @@
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Sets make macros for making debug version of VM
# Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
# Pare down optimization to -O2 if xlCV10.1 is in use.
OPT_CFLAGS/DEFAULT= $(OPT_CFLAGS) $(QV10_OPT_CONSERVATIVE)
OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@))
# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
ifeq ($(BUILDARCH), ia64)
# Bug in GCC, causes hang. -O1 will override the -O3 specified earlier
OPT_CFLAGS/callGenerator.o += -O1
OPT_CFLAGS/ciTypeFlow.o += -O1
OPT_CFLAGS/compile.o += -O1
OPT_CFLAGS/concurrentMarkSweepGeneration.o += -O1
OPT_CFLAGS/doCall.o += -O1
OPT_CFLAGS/generateOopMap.o += -O1
OPT_CFLAGS/generateOptoStub.o += -O1
OPT_CFLAGS/graphKit.o += -O1
OPT_CFLAGS/instanceKlass.o += -O1
OPT_CFLAGS/interpreterRT_ia64.o += -O1
OPT_CFLAGS/output.o += -O1
OPT_CFLAGS/parse1.o += -O1
OPT_CFLAGS/runtime.o += -O1
OPT_CFLAGS/synchronizer.o += -O1
endif
# If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings
CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
# Set the environment variable HOTSPARC_GENERIC to "true"
# to inhibit the effect of the previous line on CFLAGS.
# Linker mapfile
MAPFILE = $(GAMMADIR)/make/aix/makefiles/mapfile-vers-debug
# xlc 10.1 parameters for ipa linkage.
# - remove ipa linkage altogether. Does not seem to benefit performance,
# but increases code footprint.
# - this is a debug build in the end. Extra effort for ipa linkage is thus
# not justified.
LFLAGS_QIPA=
G_SUFFIX = _g
VERSION = optimized
SYSDEFS += -DASSERT -DFASTDEBUG
PICFLAGS = DEFAULT

View File

@ -0,0 +1,87 @@
#
# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Rules to build signal interposition library, used by vm.make
# libjsig.so: signal interposition library
JSIG = jsig
LIBJSIG = lib$(JSIG).so
LIBJSIG_DEBUGINFO = lib$(JSIG).debuginfo
LIBJSIG_DIZ = lib$(JSIG).diz
JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
DEST_JSIG = $(JDK_LIBDIR)/$(LIBJSIG)
DEST_JSIG_DEBUGINFO = $(JDK_LIBDIR)/$(LIBJSIG_DEBUGINFO)
DEST_JSIG_DIZ = $(JDK_LIBDIR)/$(LIBJSIG_DIZ)
LIBJSIG_MAPFILE = $(MAKEFILES_DIR)/mapfile-vers-jsig
# On Linux we really dont want a mapfile, as this library is small
# and preloaded using LD_PRELOAD, making functions private will
# cause problems with interposing. See CR: 6466665
# LFLAGS_JSIG += $(MAPFLAG:FILENAME=$(LIBJSIG_MAPFILE))
LFLAGS_JSIG += -D_GNU_SOURCE -D_REENTRANT $(LDFLAGS_HASH_STYLE)
LFLAGS_JSIG += $(BIN_UTILS)
# DEBUG_BINARIES overrides everything, use full -g debug information
ifeq ($(DEBUG_BINARIES), true)
JSIG_DEBUG_CFLAGS = -g
endif
$(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
@echo Making signal interposition lib...
$(QUIETLY) $(CXX) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
$(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl
#ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
# $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
# ifeq ($(STRIP_POLICY),all_strip)
# $(QUIETLY) $(STRIP) $@
# else
# ifeq ($(STRIP_POLICY),min_strip)
# $(QUIETLY) $(STRIP) -g $@
# # implied else here is no stripping at all
# endif
# endif
# ifeq ($(ZIP_DEBUGINFO_FILES),1)
# $(ZIPEXE) -q -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO)
# $(RM) $(LIBJSIG_DEBUGINFO)
# endif
#endif
install_jsig: $(LIBJSIG)
@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \
cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
$(QUIETLY) test -f $(LIBJSIG_DIZ) && \
cp -f $(LIBJSIG_DIZ) $(DEST_JSIG_DIZ)
$(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done"
.PHONY: install_jsig

View File

@ -0,0 +1,118 @@
#
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# This makefile (jvmti.make) is included from the jvmti.make in the
# build directories.
#
# It knows how to build and run the tools to generate jvmti.
include $(GAMMADIR)/make/aix/makefiles/rules.make
# #########################################################################
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
JvmtiOutDir = $(GENERATED)/jvmtifiles
JvmtiSrcDir = $(GAMMADIR)/src/share/vm/prims
InterpreterSrcDir = $(GAMMADIR)/src/share/vm/interpreter
# set VPATH so make knows where to look for source files
Src_Dirs_V += $(JvmtiSrcDir)
VPATH += $(Src_Dirs_V:%=%:)
JvmtiGeneratedNames = \
jvmtiEnv.hpp \
jvmtiEnter.cpp \
jvmtiEnterTrace.cpp \
jvmtiEnvRecommended.cpp \
bytecodeInterpreterWithChecks.cpp \
jvmti.h \
JvmtiEnvFillSource = $(JvmtiSrcDir)/jvmtiEnvFill.java
JvmtiEnvFillClass = $(JvmtiOutDir)/jvmtiEnvFill.class
JvmtiGenSource = $(JvmtiSrcDir)/jvmtiGen.java
JvmtiGenClass = $(JvmtiOutDir)/jvmtiGen.class
JvmtiGeneratedFiles = $(JvmtiGeneratedNames:%=$(JvmtiOutDir)/%)
XSLT = $(QUIETLY) $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
.PHONY: all jvmtidocs clean cleanall
# #########################################################################
all: $(JvmtiGeneratedFiles)
both = $(JvmtiGenClass) $(JvmtiSrcDir)/jvmti.xml $(JvmtiSrcDir)/jvmtiLib.xsl
$(JvmtiGenClass): $(JvmtiGenSource)
$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -d $(JvmtiOutDir) $(JvmtiGenSource)
$(JvmtiEnvFillClass): $(JvmtiEnvFillSource)
$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -d $(JvmtiOutDir) $(JvmtiEnvFillSource)
$(JvmtiOutDir)/jvmtiEnter.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl
@echo Generating $@
$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnter.xsl -OUT $(JvmtiOutDir)/jvmtiEnter.cpp -PARAM interface jvmti
$(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp: $(JvmtiGenClass) $(InterpreterSrcDir)/bytecodeInterpreter.cpp $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xml $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xsl
@echo Generating $@
$(XSLT) -IN $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xml -XSL $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xsl -OUT $(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp
$(JvmtiOutDir)/jvmtiEnterTrace.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl
@echo Generating $@
$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnter.xsl -OUT $(JvmtiOutDir)/jvmtiEnterTrace.cpp -PARAM interface jvmti -PARAM trace Trace
$(JvmtiOutDir)/jvmtiEnvRecommended.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnv.xsl $(JvmtiSrcDir)/jvmtiEnv.cpp $(JvmtiEnvFillClass)
@echo Generating $@
$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnv.xsl -OUT $(JvmtiOutDir)/jvmtiEnvStub.cpp
$(QUIETLY) $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiEnvFill $(JvmtiSrcDir)/jvmtiEnv.cpp $(JvmtiOutDir)/jvmtiEnvStub.cpp $(JvmtiOutDir)/jvmtiEnvRecommended.cpp
$(JvmtiOutDir)/jvmtiEnv.hpp: $(both) $(JvmtiSrcDir)/jvmtiHpp.xsl
@echo Generating $@
$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiHpp.xsl -OUT $(JvmtiOutDir)/jvmtiEnv.hpp
$(JvmtiOutDir)/jvmti.h: $(both) $(JvmtiSrcDir)/jvmtiH.xsl
@echo Generating $@
$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiH.xsl -OUT $(JvmtiOutDir)/jvmti.h
jvmtidocs: $(JvmtiOutDir)/jvmti.html
$(JvmtiOutDir)/jvmti.html: $(both) $(JvmtiSrcDir)/jvmti.xsl
@echo Generating $@
$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmti.xsl -OUT $(JvmtiOutDir)/jvmti.html
# #########################################################################
clean :
rm $(JvmtiGenClass) $(JvmtiEnvFillClass) $(JvmtiGeneratedFiles)
cleanall :
rm $(JvmtiGenClass) $(JvmtiEnvFillClass) $(JvmtiGeneratedFiles)
# #########################################################################

View File

@ -0,0 +1,274 @@
#
# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Define public interface.
SUNWprivate_1.1 {
global:
# JNI
JNI_CreateJavaVM;
JNI_GetCreatedJavaVMs;
JNI_GetDefaultJavaVMInitArgs;
# JVM
JVM_Accept;
JVM_ActiveProcessorCount;
JVM_AllocateNewArray;
JVM_AllocateNewObject;
JVM_ArrayCopy;
JVM_AssertionStatusDirectives;
JVM_Available;
JVM_Bind;
JVM_ClassDepth;
JVM_ClassLoaderDepth;
JVM_Clone;
JVM_Close;
JVM_CX8Field;
JVM_CompileClass;
JVM_CompileClasses;
JVM_CompilerCommand;
JVM_Connect;
JVM_ConstantPoolGetClassAt;
JVM_ConstantPoolGetClassAtIfLoaded;
JVM_ConstantPoolGetDoubleAt;
JVM_ConstantPoolGetFieldAt;
JVM_ConstantPoolGetFieldAtIfLoaded;
JVM_ConstantPoolGetFloatAt;
JVM_ConstantPoolGetIntAt;
JVM_ConstantPoolGetLongAt;
JVM_ConstantPoolGetMethodAt;
JVM_ConstantPoolGetMethodAtIfLoaded;
JVM_ConstantPoolGetMemberRefInfoAt;
JVM_ConstantPoolGetSize;
JVM_ConstantPoolGetStringAt;
JVM_ConstantPoolGetUTF8At;
JVM_CountStackFrames;
JVM_CurrentClassLoader;
JVM_CurrentLoadedClass;
JVM_CurrentThread;
JVM_CurrentTimeMillis;
JVM_DefineClass;
JVM_DefineClassWithSource;
JVM_DefineClassWithSourceCond;
JVM_DesiredAssertionStatus;
JVM_DisableCompiler;
JVM_DoPrivileged;
JVM_DTraceGetVersion;
JVM_DTraceActivate;
JVM_DTraceIsProbeEnabled;
JVM_DTraceIsSupported;
JVM_DTraceDispose;
JVM_DumpAllStacks;
JVM_DumpThreads;
JVM_EnableCompiler;
JVM_Exit;
JVM_FillInStackTrace;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
JVM_FindLibraryEntry;
JVM_FindLoadedClass;
JVM_FindPrimitiveClass;
JVM_FindSignal;
JVM_FreeMemory;
JVM_GC;
JVM_GetAllThreads;
JVM_GetArrayElement;
JVM_GetArrayLength;
JVM_GetCPClassNameUTF;
JVM_GetCPFieldClassNameUTF;
JVM_GetCPFieldModifiers;
JVM_GetCPFieldNameUTF;
JVM_GetCPFieldSignatureUTF;
JVM_GetCPMethodClassNameUTF;
JVM_GetCPMethodModifiers;
JVM_GetCPMethodNameUTF;
JVM_GetCPMethodSignatureUTF;
JVM_GetCallerClass;
JVM_GetClassAccessFlags;
JVM_GetClassAnnotations;
JVM_GetClassCPEntriesCount;
JVM_GetClassCPTypes;
JVM_GetClassConstantPool;
JVM_GetClassContext;
JVM_GetClassDeclaredConstructors;
JVM_GetClassDeclaredFields;
JVM_GetClassDeclaredMethods;
JVM_GetClassFieldsCount;
JVM_GetClassInterfaces;
JVM_GetClassLoader;
JVM_GetClassMethodsCount;
JVM_GetClassModifiers;
JVM_GetClassName;
JVM_GetClassNameUTF;
JVM_GetClassSignature;
JVM_GetClassSigners;
JVM_GetClassTypeAnnotations;
JVM_GetComponentType;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
JVM_GetEnclosingMethodInfo;
JVM_GetFieldAnnotations;
JVM_GetFieldIxModifiers;
JVM_GetFieldTypeAnnotations;
JVM_GetHostName;
JVM_GetInheritedAccessControlContext;
JVM_GetInterfaceVersion;
JVM_GetLastErrorString;
JVM_GetManagement;
JVM_GetMethodAnnotations;
JVM_GetMethodDefaultAnnotationValue;
JVM_GetMethodIxArgsSize;
JVM_GetMethodIxByteCode;
JVM_GetMethodIxByteCodeLength;
JVM_GetMethodIxExceptionIndexes;
JVM_GetMethodIxExceptionTableEntry;
JVM_GetMethodIxExceptionTableLength;
JVM_GetMethodIxExceptionsCount;
JVM_GetMethodIxLocalsCount;
JVM_GetMethodIxMaxStack;
JVM_GetMethodIxModifiers;
JVM_GetMethodIxNameUTF;
JVM_GetMethodIxSignatureUTF;
JVM_GetMethodParameterAnnotations;
JVM_GetMethodParameters;
JVM_GetMethodTypeAnnotations;
JVM_GetPrimitiveArrayElement;
JVM_GetProtectionDomain;
JVM_GetSockName;
JVM_GetSockOpt;
JVM_GetStackAccessControlContext;
JVM_GetStackTraceDepth;
JVM_GetStackTraceElement;
JVM_GetSystemPackage;
JVM_GetSystemPackages;
JVM_GetThreadStateNames;
JVM_GetThreadStateValues;
JVM_GetVersionInfo;
JVM_Halt;
JVM_HoldsLock;
JVM_IHashCode;
JVM_InitAgentProperties;
JVM_InitProperties;
JVM_InitializeCompiler;
JVM_InitializeSocketLibrary;
JVM_InternString;
JVM_Interrupt;
JVM_InvokeMethod;
JVM_IsArrayClass;
JVM_IsConstructorIx;
JVM_IsInterface;
JVM_IsInterrupted;
JVM_IsNaN;
JVM_IsPrimitiveClass;
JVM_IsSameClassPackage;
JVM_IsSilentCompiler;
JVM_IsSupportedJNIVersion;
JVM_IsThreadAlive;
JVM_IsVMGeneratedMethodIx;
JVM_LatestUserDefinedLoader;
JVM_Listen;
JVM_LoadClass0;
JVM_LoadLibrary;
JVM_Lseek;
JVM_MaxObjectInspectionAge;
JVM_MaxMemory;
JVM_MonitorNotify;
JVM_MonitorNotifyAll;
JVM_MonitorWait;
JVM_NanoTime;
JVM_NativePath;
JVM_NewArray;
JVM_NewInstanceFromConstructor;
JVM_NewMultiArray;
JVM_OnExit;
JVM_Open;
JVM_RaiseSignal;
JVM_RawMonitorCreate;
JVM_RawMonitorDestroy;
JVM_RawMonitorEnter;
JVM_RawMonitorExit;
JVM_Read;
JVM_Recv;
JVM_RecvFrom;
JVM_RegisterSignal;
JVM_ReleaseUTF;
JVM_ResolveClass;
JVM_ResumeThread;
JVM_Send;
JVM_SendTo;
JVM_SetArrayElement;
JVM_SetClassSigners;
JVM_SetLength;
JVM_SetNativeThreadName;
JVM_SetPrimitiveArrayElement;
JVM_SetProtectionDomain;
JVM_SetSockOpt;
JVM_SetThreadPriority;
JVM_Sleep;
JVM_Socket;
JVM_SocketAvailable;
JVM_SocketClose;
JVM_SocketShutdown;
JVM_StartThread;
JVM_StopThread;
JVM_SuspendThread;
JVM_SupportsCX8;
JVM_Sync;
JVM_Timeout;
JVM_TotalMemory;
JVM_TraceInstructions;
JVM_TraceMethodCalls;
JVM_UnloadLibrary;
JVM_Write;
JVM_Yield;
JVM_handle_linux_signal;
# debug JVM
JVM_AccessVMBooleanFlag;
JVM_AccessVMIntFlag;
JVM_VMBreakPoint;
# miscellaneous functions
jio_fprintf;
jio_printf;
jio_snprintf;
jio_vfprintf;
jio_vsnprintf;
fork1;
numa_warn;
numa_error;
# Needed because there is no JVM interface for this.
sysThreadAvailableStackWithSlack;
# This is for Forte Analyzer profiling support.
AsyncGetCallTrace;
# INSERT VTABLE SYMBOLS HERE
local:
*;
};

View File

@ -0,0 +1,38 @@
#
# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Define library interface.
SUNWprivate_1.1 {
global:
JVM_begin_signal_setting;
JVM_end_signal_setting;
JVM_get_libjsig_version;
JVM_get_signal_action;
sigaction;
signal;
sigset;
local:
*;
};

View File

@ -0,0 +1,267 @@
#
# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Define public interface.
SUNWprivate_1.1 {
global:
# JNI
JNI_CreateJavaVM;
JNI_GetCreatedJavaVMs;
JNI_GetDefaultJavaVMInitArgs;
# JVM
JVM_Accept;
JVM_ActiveProcessorCount;
JVM_AllocateNewArray;
JVM_AllocateNewObject;
JVM_ArrayCopy;
JVM_AssertionStatusDirectives;
JVM_Available;
JVM_Bind;
JVM_ClassDepth;
JVM_ClassLoaderDepth;
JVM_Clone;
JVM_Close;
JVM_CX8Field;
JVM_CompileClass;
JVM_CompileClasses;
JVM_CompilerCommand;
JVM_Connect;
JVM_ConstantPoolGetClassAt;
JVM_ConstantPoolGetClassAtIfLoaded;
JVM_ConstantPoolGetDoubleAt;
JVM_ConstantPoolGetFieldAt;
JVM_ConstantPoolGetFieldAtIfLoaded;
JVM_ConstantPoolGetFloatAt;
JVM_ConstantPoolGetIntAt;
JVM_ConstantPoolGetLongAt;
JVM_ConstantPoolGetMethodAt;
JVM_ConstantPoolGetMethodAtIfLoaded;
JVM_ConstantPoolGetMemberRefInfoAt;
JVM_ConstantPoolGetSize;
JVM_ConstantPoolGetStringAt;
JVM_ConstantPoolGetUTF8At;
JVM_CountStackFrames;
JVM_CurrentClassLoader;
JVM_CurrentLoadedClass;
JVM_CurrentThread;
JVM_CurrentTimeMillis;
JVM_DefineClass;
JVM_DefineClassWithSource;
JVM_DefineClassWithSourceCond;
JVM_DesiredAssertionStatus;
JVM_DisableCompiler;
JVM_DoPrivileged;
JVM_DTraceGetVersion;
JVM_DTraceActivate;
JVM_DTraceIsProbeEnabled;
JVM_DTraceIsSupported;
JVM_DTraceDispose;
JVM_DumpAllStacks;
JVM_DumpThreads;
JVM_EnableCompiler;
JVM_Exit;
JVM_FillInStackTrace;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
JVM_FindLibraryEntry;
JVM_FindLoadedClass;
JVM_FindPrimitiveClass;
JVM_FindSignal;
JVM_FreeMemory;
JVM_GC;
JVM_GetAllThreads;
JVM_GetArrayElement;
JVM_GetArrayLength;
JVM_GetCPClassNameUTF;
JVM_GetCPFieldClassNameUTF;
JVM_GetCPFieldModifiers;
JVM_GetCPFieldNameUTF;
JVM_GetCPFieldSignatureUTF;
JVM_GetCPMethodClassNameUTF;
JVM_GetCPMethodModifiers;
JVM_GetCPMethodNameUTF;
JVM_GetCPMethodSignatureUTF;
JVM_GetCallerClass;
JVM_GetClassAccessFlags;
JVM_GetClassAnnotations;
JVM_GetClassCPEntriesCount;
JVM_GetClassCPTypes;
JVM_GetClassConstantPool;
JVM_GetClassContext;
JVM_GetClassDeclaredConstructors;
JVM_GetClassDeclaredFields;
JVM_GetClassDeclaredMethods;
JVM_GetClassFieldsCount;
JVM_GetClassInterfaces;
JVM_GetClassLoader;
JVM_GetClassMethodsCount;
JVM_GetClassModifiers;
JVM_GetClassName;
JVM_GetClassNameUTF;
JVM_GetClassSignature;
JVM_GetClassSigners;
JVM_GetClassTypeAnnotations;
JVM_GetComponentType;
JVM_GetDeclaredClasses;
JVM_GetDeclaringClass;
JVM_GetEnclosingMethodInfo;
JVM_GetFieldAnnotations;
JVM_GetFieldIxModifiers;
JVM_GetHostName;
JVM_GetInheritedAccessControlContext;
JVM_GetInterfaceVersion;
JVM_GetLastErrorString;
JVM_GetManagement;
JVM_GetMethodAnnotations;
JVM_GetMethodDefaultAnnotationValue;
JVM_GetMethodIxArgsSize;
JVM_GetMethodIxByteCode;
JVM_GetMethodIxByteCodeLength;
JVM_GetMethodIxExceptionIndexes;
JVM_GetMethodIxExceptionTableEntry;
JVM_GetMethodIxExceptionTableLength;
JVM_GetMethodIxExceptionsCount;
JVM_GetMethodIxLocalsCount;
JVM_GetMethodIxMaxStack;
JVM_GetMethodIxModifiers;
JVM_GetMethodIxNameUTF;
JVM_GetMethodIxSignatureUTF;
JVM_GetMethodParameterAnnotations;
JVM_GetMethodParameters;
JVM_GetPrimitiveArrayElement;
JVM_GetProtectionDomain;
JVM_GetSockName;
JVM_GetSockOpt;
JVM_GetStackAccessControlContext;
JVM_GetStackTraceDepth;
JVM_GetStackTraceElement;
JVM_GetSystemPackage;
JVM_GetSystemPackages;
JVM_GetThreadStateNames;
JVM_GetThreadStateValues;
JVM_GetVersionInfo;
JVM_Halt;
JVM_HoldsLock;
JVM_IHashCode;
JVM_InitAgentProperties;
JVM_InitProperties;
JVM_InitializeCompiler;
JVM_InitializeSocketLibrary;
JVM_InternString;
JVM_Interrupt;
JVM_InvokeMethod;
JVM_IsArrayClass;
JVM_IsConstructorIx;
JVM_IsInterface;
JVM_IsInterrupted;
JVM_IsNaN;
JVM_IsPrimitiveClass;
JVM_IsSameClassPackage;
JVM_IsSilentCompiler;
JVM_IsSupportedJNIVersion;
JVM_IsThreadAlive;
JVM_IsVMGeneratedMethodIx;
JVM_LatestUserDefinedLoader;
JVM_Listen;
JVM_LoadClass0;
JVM_LoadLibrary;
JVM_Lseek;
JVM_MaxObjectInspectionAge;
JVM_MaxMemory;
JVM_MonitorNotify;
JVM_MonitorNotifyAll;
JVM_MonitorWait;
JVM_NanoTime;
JVM_NativePath;
JVM_NewArray;
JVM_NewInstanceFromConstructor;
JVM_NewMultiArray;
JVM_OnExit;
JVM_Open;
JVM_RaiseSignal;
JVM_RawMonitorCreate;
JVM_RawMonitorDestroy;
JVM_RawMonitorEnter;
JVM_RawMonitorExit;
JVM_Read;
JVM_Recv;
JVM_RecvFrom;
JVM_RegisterSignal;
JVM_ReleaseUTF;
JVM_ResolveClass;
JVM_ResumeThread;
JVM_Send;
JVM_SendTo;
JVM_SetArrayElement;
JVM_SetClassSigners;
JVM_SetLength;
JVM_SetNativeThreadName;
JVM_SetPrimitiveArrayElement;
JVM_SetProtectionDomain;
JVM_SetSockOpt;
JVM_SetThreadPriority;
JVM_Sleep;
JVM_Socket;
JVM_SocketAvailable;
JVM_SocketClose;
JVM_SocketShutdown;
JVM_StartThread;
JVM_StopThread;
JVM_SuspendThread;
JVM_SupportsCX8;
JVM_Sync;
JVM_Timeout;
JVM_TotalMemory;
JVM_TraceInstructions;
JVM_TraceMethodCalls;
JVM_UnloadLibrary;
JVM_Write;
JVM_Yield;
JVM_handle_linux_signal;
# miscellaneous functions
jio_fprintf;
jio_printf;
jio_snprintf;
jio_vfprintf;
jio_vsnprintf;
fork1;
numa_warn;
numa_error;
# Needed because there is no JVM interface for this.
sysThreadAvailableStackWithSlack;
# This is for Forte Analyzer profiling support.
AsyncGetCallTrace;
# INSERT VTABLE SYMBOLS HERE
local:
*;
};

View File

@ -0,0 +1,94 @@
#
# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Produce 64 bits object files.
CFLAGS += -q64
# Balanced tuning for recent versions of the POWER architecture (if supported by xlc).
QTUNE=$(if $(CXX_SUPPORTS_BALANCED_TUNING),balanced,pwr5)
# Try to speed up the interpreter: use ppc64 instructions and inline
# glue code for external functions.
OPT_CFLAGS += -qarch=ppc64 -qtune=$(QTUNE) -qinlglue
# We need variable length arrays
CFLAGS += -qlanglvl=c99vla
# Just to check for unwanted macro redefinitions
CFLAGS += -qlanglvl=noredefmac
# Suppress those "implicit private" warnings xlc gives.
# - The omitted keyword "private" is assumed for base class "...".
CFLAGS += -qsuppress=1540-0198
# Suppress the following numerous warning:
# - 1540-1090 (I) The destructor of "..." might not be called.
# - 1500-010: (W) WARNING in ...: Infinite loop. Program may not stop.
# There are several infinite loops in the vm, suppress.
CFLAGS += -qsuppress=1540-1090 -qsuppress=1500-010
# Suppress
# - 540-1088 (W) The exception specification is being ignored.
# caused by throw() in declaration of new() in nmethod.hpp.
CFLAGS += -qsuppress=1540-1088
# Turn off floating-point optimizations that may alter program semantics
OPT_CFLAGS += -qstrict
# Disable aggressive optimizations for functions in sharedRuntimeTrig.cpp
# and sharedRuntimeTrans.cpp on ppc64.
# -qstrict turns off the following optimizations:
# * Performing code motion and scheduling on computations such as loads
# and floating-point computations that may trigger an exception.
# * Relaxing conformance to IEEE rules.
# * Reassociating floating-point expressions.
# When using '-qstrict' there still remains one problem
# in javasoft.sqe.tests.api.java.lang.Math.sin5Tests when run in compile-all
# mode, so don't optimize sharedRuntimeTrig.cpp at all.
OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT)
OPT_CFLAGS/sharedRuntimeTrans.o = $(OPT_CFLAGS/NOOPT)
# xlc 10.01 parameters for ipa compile.
QIPA_COMPILE=$(if $(CXX_IS_V10),-qipa)
# Xlc 10.1 parameters for aggressive optimization:
# - qhot=level=1: Most aggressive loop optimizations.
# - qignerrno: Assume errno is not modified by system calls.
# - qinline: Inline method calls. No suboptions for c++ compiles.
# - qxflag=ASMMIDCOALFIX: Activate fix for -O3 problem in interpreter loop.
# - qxflag=asmfastsync: Activate fix for performance problem with inline assembler with memory clobber.
QV10_OPT=$(if $(CXX_IS_V10),-qxflag=ASMMIDCOALFIX -qxflag=asmfastsync)
QV10_OPT_AGGRESSIVE=$(if $(CXX_IS_V10),-qhot=level=1 -qignerrno -qinline)
QV10_OPT_CONSERVATIVE=$(if $(CXX_IS_V10),-qhot=level=1 -qignerrno -qinline)
# Disallow inlining for synchronizer.cpp, but perform O3 optimizations.
OPT_CFLAGS/synchronizer.o = $(OPT_CFLAGS) -qnoinline
# Set all the xlC V10.1 options here.
OPT_CFLAGS += $(QIPA_COMPILE) $(QV10_OPT) $(QV10_OPT_AGGRESSIVE)
export OBJECT_MODE=64
# Also build launcher as 64 bit executable.
LAUNCHERFLAGS += -q64

View File

@ -0,0 +1,58 @@
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Sets make macros for making optimized version of Gamma VM
# (This is the "product", not the "release" version.)
# Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
OPT_CFLAGS/DEFAULT= $(OPT_CFLAGS)
OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@))
# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
# If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings
CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
# Set the environment variable HOTSPARC_GENERIC to "true"
# to inhibit the effect of the previous line on CFLAGS.
# Linker mapfile
MAPFILE = $(GAMMADIR)/make/aix/makefiles/mapfile-vers-product
# Remove ipa linkage altogether. Does not seem to benfit performance, but increases code footprint.
LFLAGS_QIPA=
SYSDEFS += -DPRODUCT
VERSION = optimized
# use -g to strip library as -x will discard its symbol table; -x is fine for
# executables.
# Note: these macros are not used in .debuginfo configs
STRIP_LIBJVM = $(STRIP) -g $@ || exit 1;
STRIP_AOUT = $(STRIP) -x $@ || exit 1;
# If we can create .debuginfo files, then the VM is stripped in vm.make
# and this macro is not used.
# LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))

View File

@ -0,0 +1,203 @@
#
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Common rules/macros for the vm, adlc.
# Tell make that .cpp is important
.SUFFIXES: .cpp $(SUFFIXES)
DEMANGLER = c++filt
DEMANGLE = $(DEMANGLER) < $@ > .$@ && mv -f .$@ $@
# $(CC) is the c compiler (cc/gcc), $(CXX) is the c++ compiler (CC/g++).
CC_COMPILE = $(CC) $(CXXFLAGS) $(CFLAGS)
CXX_COMPILE = $(CXX) $(CXXFLAGS) $(CFLAGS)
AS.S = $(AS) $(ASFLAGS)
COMPILE.CC = $(CC_COMPILE) -c
GENASM.CC = $(CC_COMPILE) -S
LINK.CC = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
LINK_LIB.CC = $(CC) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.CC = $(CC_COMPILE) -E
COMPILE.CXX = $(CXX_COMPILE) -c
GENASM.CXX = $(CXX_COMPILE) -S
LINK.CXX = $(CXX) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
LINK_NOPROF.CXX = $(CXX) $(LFLAGS) $(AOUT_FLAGS)
LINK_LIB.CXX = $(CXX) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.CXX = $(CXX_COMPILE) -E
# cross compiling the jvm with c2 requires host compilers to build
# adlc tool
HOST.CXX_COMPILE = $(HOSTCXX) $(CXXFLAGS) $(CFLAGS)
HOST.COMPILE.CXX = $(HOST.CXX_COMPILE) -c
HOST.LINK_NOPROF.CXX = $(HOSTCXX) $(LFLAGS) $(AOUT_FLAGS)
# Effect of REMOVE_TARGET is to delete out-of-date files during "gnumake -k".
REMOVE_TARGET = rm -f $@
# Note use of ALT_BOOTDIR to explicitly specify location of java and
# javac; this is the same environment variable used in the J2SE build
# process for overriding the default spec, which is BOOTDIR.
# Note also that we fall back to using JAVA_HOME if neither of these is
# specified.
ifdef ALT_BOOTDIR
RUN.JAVA = $(ALT_BOOTDIR)/bin/java
RUN.JAVAP = $(ALT_BOOTDIR)/bin/javap
RUN.JAVAH = $(ALT_BOOTDIR)/bin/javah
RUN.JAR = $(ALT_BOOTDIR)/bin/jar
COMPILE.JAVAC = $(ALT_BOOTDIR)/bin/javac
COMPILE.RMIC = $(ALT_BOOTDIR)/bin/rmic
BOOT_JAVA_HOME = $(ALT_BOOTDIR)
else
ifdef BOOTDIR
RUN.JAVA = $(BOOTDIR)/bin/java
RUN.JAVAP = $(BOOTDIR)/bin/javap
RUN.JAVAH = $(BOOTDIR)/bin/javah
RUN.JAR = $(BOOTDIR)/bin/jar
COMPILE.JAVAC = $(BOOTDIR)/bin/javac
COMPILE.RMIC = $(BOOTDIR)/bin/rmic
BOOT_JAVA_HOME = $(BOOTDIR)
else
ifdef JAVA_HOME
RUN.JAVA = $(JAVA_HOME)/bin/java
RUN.JAVAP = $(JAVA_HOME)/bin/javap
RUN.JAVAH = $(JAVA_HOME)/bin/javah
RUN.JAR = $(JAVA_HOME)/bin/jar
COMPILE.JAVAC = $(JAVA_HOME)/bin/javac
COMPILE.RMIC = $(JAVA_HOME)/bin/rmic
BOOT_JAVA_HOME = $(JAVA_HOME)
else
# take from the PATH, if ALT_BOOTDIR, BOOTDIR and JAVA_HOME are not defined
# note that this is to support hotspot build without SA. To build
# SA along with hotspot, you need to define ALT_BOOTDIR, BOOTDIR or JAVA_HOME
RUN.JAVA = java
RUN.JAVAP = javap
RUN.JAVAH = javah
RUN.JAR = jar
COMPILE.JAVAC = javac
COMPILE.RMIC = rmic
endif
endif
endif
COMPILE.JAVAC += $(BOOTSTRAP_JAVAC_FLAGS)
SUM = /usr/bin/sum
# 'gmake MAKE_VERBOSE=y' gives all the gory details.
QUIETLY$(MAKE_VERBOSE) = @
RUN.JAR$(MAKE_VERBOSE) += >/dev/null
# Settings for javac
BOOT_SOURCE_LANGUAGE_VERSION = 6
BOOT_TARGET_CLASS_VERSION = 6
JAVAC_FLAGS = -g -encoding ascii
BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION)
# With parallel makes, print a message at the end of compilation.
ifeq ($(findstring j,$(MFLAGS)),j)
COMPILE_DONE = && { echo Done with $<; }
endif
# Include $(NONPIC_OBJ_FILES) definition
ifndef LP64
include $(GAMMADIR)/make/pic.make
endif
include $(GAMMADIR)/make/altsrc.make
# The non-PIC object files are only generated for 32 bit platforms.
ifdef LP64
%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
else
%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(if $(findstring $@, $(NONPIC_OBJ_FILES)), \
$(subst $(VM_PICFLAG), ,$(COMPILE.CXX)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
$(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
endif
%.o: %.s
@echo Assembling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(AS.S) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
%.s: %.cpp
@echo Generating assembly for $<
$(QUIETLY) $(GENASM.CXX) -o $@ $<
$(QUIETLY) $(DEMANGLE) $(COMPILE_DONE)
# Intermediate files (for debugging macros)
%.i: %.cpp
@echo Preprocessing $< to $@
$(QUIETLY) $(PREPROCESS.CXX) $< > $@ $(COMPILE_DONE)
# Override gnumake built-in rules which do sccs get operations badly.
# (They put the checked out code in the current directory, not in the
# directory of the original file.) Since this is a symptom of a teamware
# failure, and since not all problems can be detected by gnumake due
# to incomplete dependency checking... just complain and stop.
%:: s.%
@echo "========================================================="
@echo File $@
@echo is out of date with respect to its SCCS file.
@echo This file may be from an unresolved Teamware conflict.
@echo This is also a symptom of a Teamware bringover/putback failure
@echo in which SCCS files are updated but not checked out.
@echo Check for other out of date files in your workspace.
@echo "========================================================="
@exit 666
%:: SCCS/s.%
@echo "========================================================="
@echo File $@
@echo is out of date with respect to its SCCS file.
@echo This file may be from an unresolved Teamware conflict.
@echo This is also a symptom of a Teamware bringover/putback failure
@echo in which SCCS files are updated but not checked out.
@echo Check for other out of date files in your workspace.
@echo "========================================================="
@exit 666
.PHONY: default

View File

@ -0,0 +1,116 @@
#
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# This makefile (sa.make) is included from the sa.make in the
# build directories.
# This makefile is used to build Serviceability Agent java code
# and generate JNI header file for native methods.
include $(GAMMADIR)/make/aix/makefiles/rules.make
include $(GAMMADIR)/make/defs.make
AGENT_DIR = $(GAMMADIR)/agent
include $(GAMMADIR)/make/sa.files
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
# tools.jar is needed by the JDI - SA binding
SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
# TODO: if it's a modules image, check if SA module is installed.
MODULELIB_PATH= $(BOOT_JAVA_HOME)/lib/modules
AGENT_FILES_LIST := $(GENERATED)/agent.classes.list
SA_CLASSDIR = $(GENERATED)/saclasses
SA_BUILD_VERSION_PROP = "sun.jvm.hotspot.runtime.VM.saBuildVersion=$(SA_BUILD_VERSION)"
SA_PROPERTIES = $(SA_CLASSDIR)/sa.properties
# if $(AGENT_DIR) does not exist, we don't build SA
# also, we don't build SA on Itanium, PowerPC, ARM or zero.
all:
if [ -d $(AGENT_DIR) -a "$(SRCARCH)" != "ia64" \
-a "$(SRCARCH)" != "arm" \
-a "$(SRCARCH)" != "ppc" \
-a "$(SRCARCH)" != "zero" ] ; then \
$(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \
fi
$(GENERATED)/sa-jdi.jar: $(AGENT_FILES)
$(QUIETLY) echo "Making $@"
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
exit 1; \
fi
$(QUIETLY) if [ ! -f $(SA_CLASSPATH) -a ! -d $(MODULELIB_PATH) ] ; then \
echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\
echo ""; \
exit 1; \
fi
$(QUIETLY) if [ ! -d $(SA_CLASSDIR) ] ; then \
mkdir -p $(SA_CLASSDIR); \
fi
# Note: When indented, make tries to execute the '$(shell' comment.
# In some environments, cmd processors have limited line length.
# To prevent the javac invocation in the next block from using
# a very long cmd line, we use javac's @file-list option. We
# generate the file lists using make's built-in 'foreach' control
# flow which also avoids cmd processor line length issues. Since
# the 'foreach' is done as part of make's macro expansion phase,
# the initialization of the lists is also done in the same phase
# using '$(shell rm ...' instead of using the more traditional
# 'rm ...' rule.
$(shell rm -rf $(AGENT_FILES_LIST))
# gnumake 3.78.1 does not accept the *'s that
# are in AGENT_FILES, so use the shell to expand them.
# Be extra carefull to not produce too long command lines in the shell!
$(foreach file,$(AGENT_FILES),$(shell ls -1 $(file) >> $(AGENT_FILES_LIST)))
$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES_LIST)
$(QUIETLY) $(REMOTE) $(COMPILE.RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
$(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql
$(QUIETLY) mkdir -p $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/*
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/
$(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)/
$(QUIETLY) $(REMOTE) $(RUN.JAR) cf $@ -C $(SA_CLASSDIR)/ .
$(QUIETLY) $(REMOTE) $(RUN.JAR) uf $@ -C $(AGENT_SRC_DIR) META-INF/services/com.sun.jdi.connect.Connector
$(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext
$(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.ia64.IA64ThreadContext
$(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.amd64.AMD64ThreadContext
$(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.sparc.SPARCThreadContext
clean:
rm -rf $(SA_CLASSDIR)
rm -rf $(GENERATED)/sa-jdi.jar
rm -rf $(AGENT_FILES_LIST)

View File

@ -0,0 +1,117 @@
#
# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
include $(GAMMADIR)/make/defs.make
# Rules to build serviceability agent library, used by vm.make
# libsaproc.so: serviceability agent
SAPROC = saproc
LIBSAPROC = lib$(SAPROC).so
LIBSAPROC_DEBUGINFO = lib$(SAPROC).debuginfo
LIBSAPROC_DIZ = lib$(SAPROC).diz
AGENT_DIR = $(GAMMADIR)/agent
SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)
SASRCFILES = $(SASRCDIR)/salibelf.c \
$(SASRCDIR)/symtab.c \
$(SASRCDIR)/libproc_impl.c \
$(SASRCDIR)/ps_proc.c \
$(SASRCDIR)/ps_core.c \
$(SASRCDIR)/LinuxDebuggerLocal.c \
SAMAPFILE = $(SASRCDIR)/mapfile
DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC)
DEST_SAPROC_DEBUGINFO = $(JDK_LIBDIR)/$(LIBSAPROC_DEBUGINFO)
DEST_SAPROC_DIZ = $(JDK_LIBDIR)/$(LIBSAPROC_DIZ)
# DEBUG_BINARIES overrides everything, use full -g debug information
ifeq ($(DEBUG_BINARIES), true)
SA_DEBUG_CFLAGS = -g
endif
# if $(AGENT_DIR) does not exist, we don't build SA
# also, we don't build SA on Itanium, PPC, ARM or zero.
ifneq ($(wildcard $(AGENT_DIR)),)
ifneq ($(filter-out ia64 arm ppc zero,$(SRCARCH)),)
BUILDLIBSAPROC = $(LIBSAPROC)
endif
endif
SA_LFLAGS = $(MAPFLAG:FILENAME=$(SAMAPFILE)) $(LDFLAGS_HASH_STYLE)
$(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
exit 1; \
fi
@echo Making SA debugger back-end...
$(QUIETLY) $(CC) -D$(BUILDARCH) -D_GNU_SOURCE \
-D_FILE_OFFSET_BITS=64 \
$(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
$(BIN_UTILS) \
-I$(SASRCDIR) \
-I$(GENERATED) \
-I$(BOOT_JAVA_HOME)/include \
-I$(BOOT_JAVA_HOME)/include/$(Platform_os_family) \
$(SASRCFILES) \
$(SA_LFLAGS) \
$(SA_DEBUG_CFLAGS) \
-o $@ \
-lthread_db
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
ifeq ($(STRIP_POLICY),all_strip)
$(QUIETLY) $(STRIP) $@
else
ifeq ($(STRIP_POLICY),min_strip)
$(QUIETLY) $(STRIP) -g $@
# implied else here is no stripping at all
endif
endif
ifeq ($(ZIP_DEBUGINFO_FILES),1)
$(ZIPEXE) -q -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO)
$(RM) $(LIBSAPROC_DEBUGINFO)
endif
endif
install_saproc: $(BUILDLIBSAPROC)
$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then \
echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)"; \
test -f $(LIBSAPROC_DEBUGINFO) && \
cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO); \
test -f $(LIBSAPROC_DIZ) && \
cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ); \
cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done"; \
fi
.PHONY: install_saproc

View File

@ -0,0 +1,144 @@
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# top.make is included in the Makefile in the build directories.
# It DOES NOT include the vm dependency info in order to be faster.
# Its main job is to implement the incremental form of make lists.
# It also:
# -builds and runs adlc via adlc.make
# -generates JVMTI source and docs via jvmti.make (JSR-163)
# -generate sa-jdi.jar (JDI binding to core files)
# It assumes the following flags are set:
# CFLAGS Platform_file, Src_Dirs_I, Src_Dirs_V, SYSDEFS, AOUT, Obj_Files
# -- D. Ungar (5/97) from a file by Bill Bush
# Don't override the built-in $(MAKE).
# Instead, use "gmake" (or "gnumake") from the command line. --Rose
#MAKE = gmake
include $(GAMMADIR)/make/altsrc.make
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
VM = $(GAMMADIR)/src/share/vm
Plat_File = $(Platform_file)
CDG = cd $(GENERATED);
ifneq ($(USE_PRECOMPILED_HEADER),0)
UpdatePCH = $(MAKE) -f vm.make $(PRECOMPILED_HEADER) $(MFLAGS)
else
UpdatePCH = \# precompiled header is not used
endif
Cached_plat = $(GENERATED)/platform.current
AD_Dir = $(GENERATED)/adfiles
ADLC = $(AD_Dir)/adlc
AD_Spec = $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad)
AD_Src = $(call altsrc-replace,$(HS_COMMON_SRC)/share/vm/adlc)
AD_Names = ad_$(Platform_arch_model).hpp ad_$(Platform_arch_model).cpp
AD_Files = $(AD_Names:%=$(AD_Dir)/%)
# AD_Files_If_Required/COMPILER1 = ad_stuff
AD_Files_If_Required/COMPILER2 = ad_stuff
AD_Files_If_Required/TIERED = ad_stuff
AD_Files_If_Required = $(AD_Files_If_Required/$(TYPE))
# Wierd argument adjustment for "gnumake -j..."
adjust-mflags = $(GENERATED)/adjust-mflags
MFLAGS-adjusted = -r `$(adjust-mflags) "$(MFLAGS)" "$(HOTSPOT_BUILD_JOBS)"`
# default target: update lists, make vm
# done in stages to force sequential order with parallel make
#
default: vm_build_preliminaries the_vm
@echo All done.
# This is an explicit dependency for the sake of parallel makes.
vm_build_preliminaries: checks $(Cached_plat) $(AD_Files_If_Required) trace_stuff jvmti_stuff sa_stuff
@# We need a null action here, so implicit rules don't get consulted.
$(Cached_plat): $(Plat_File)
$(CDG) cp $(Plat_File) $(Cached_plat)
# make AD files as necessary
ad_stuff: $(Cached_plat) $(adjust-mflags)
@$(MAKE) -f adlc.make $(MFLAGS-adjusted)
# generate JVMTI files from the spec
jvmti_stuff: $(Cached_plat) $(adjust-mflags)
@$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
# generate trace files
trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
@$(MAKE) -f trace.make $(MFLAGS-adjusted)
# generate SA jar files and native header
sa_stuff:
@$(MAKE) -f sa.make $(MFLAGS-adjusted)
# and the VM: must use other makefile with dependencies included
# We have to go to great lengths to get control over the -jN argument
# to the recursive invocation of vm.make. The problem is that gnumake
# resets -jN to -j1 for recursive runs. (How helpful.)
# Note that the user must specify the desired parallelism level via a
# command-line or environment variable name HOTSPOT_BUILD_JOBS.
$(adjust-mflags): $(GAMMADIR)/make/$(Platform_os_family)/makefiles/adjust-mflags.sh
@+rm -f $@ $@+
@+cat $< > $@+
@+chmod +x $@+
@+mv $@+ $@
the_vm: vm_build_preliminaries $(adjust-mflags)
@$(UpdatePCH)
@$(MAKE) -f vm.make $(MFLAGS-adjusted)
install gamma: the_vm
@$(MAKE) -f vm.make $@
# next rules support "make foo.[ois]"
%.o %.i %.s:
$(UpdatePCH)
$(MAKE) -f vm.make $(MFLAGS) $@
#$(MAKE) -f vm.make $@
# this should force everything to be rebuilt
clean:
rm -f $(GENERATED)/*.class
$(MAKE) -f vm.make $(MFLAGS) clean
# just in case it doesn't, this should do it
realclean:
$(MAKE) -f vm.make $(MFLAGS) clean
rm -fr $(GENERATED)
.PHONY: default vm_build_preliminaries
.PHONY: lists ad_stuff jvmti_stuff sa_stuff the_vm clean realclean
.PHONY: checks check_os_version install

View File

@ -0,0 +1,120 @@
#
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# This makefile (trace.make) is included from the trace.make in the
# build directories.
#
# It knows how to build and run the tools to generate trace files.
include $(GAMMADIR)/make/linux/makefiles/rules.make
include $(GAMMADIR)/make/altsrc.make
# #########################################################################
HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
echo "true"; else echo "false";\
fi)
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
JvmtiOutDir = $(GENERATED)/jvmtifiles
TraceOutDir = $(GENERATED)/tracefiles
TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
# set VPATH so make knows where to look for source files
Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
VPATH += $(Src_Dirs_V:%=%:)
TraceGeneratedNames = \
traceEventClasses.hpp \
traceEventIds.hpp \
traceTypes.hpp
ifeq ($(HAS_ALT_SRC), true)
TraceGeneratedNames += \
traceRequestables.hpp \
traceEventControl.hpp
ifneq ($(INCLUDE_TRACE), false)
TraceGeneratedNames += traceProducer.cpp
endif
endif
TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
endif
.PHONY: all clean cleanall
# #########################################################################
all: $(TraceGeneratedFiles)
GENERATE_CODE= \
$(QUIETLY) echo Generating $@; \
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
test -f $@
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(GENERATE_CODE)
ifeq ($(HAS_ALT_SRC), false)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(GENERATE_CODE)
endif
# #########################################################################
clean cleanall:
rm $(TraceGeneratedFiles)

View File

@ -0,0 +1,377 @@
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Rules to build JVM and related libraries, included from vm.make in the build
# directory.
# Common build rules.
MAKEFILES_DIR=$(GAMMADIR)/make/$(Platform_os_family)/makefiles
include $(MAKEFILES_DIR)/rules.make
include $(GAMMADIR)/make/altsrc.make
default: build
#----------------------------------------------------------------------
# Defs
GENERATED = ../generated
DEP_DIR = $(GENERATED)/dependencies
# reads the generated files defining the set of .o's and the .o .h dependencies
-include $(DEP_DIR)/*.d
# read machine-specific adjustments (%%% should do this via buildtree.make?)
ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
include $(MAKEFILES_DIR)/zeroshark.make
else
include $(MAKEFILES_DIR)/$(BUILDARCH).make
endif
# set VPATH so make knows where to look for source files
# Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
# The adfiles directory contains ad_<arch>.[ch]pp.
# The jvmtifiles directory contains jvmti*.[ch]pp
Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
VPATH += $(Src_Dirs_V:%=%:)
# set INCLUDES for C preprocessor.
Src_Dirs_I += $(GENERATED)
# The order is important for the precompiled headers to work.
INCLUDES += $(PRECOMPILED_HEADER_DIR:%=-I%) $(Src_Dirs_I:%=-I%)
# SYMFLAG is used by {jsig,saproc}.make
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
# always build with debug info when we can create .debuginfo files
SYMFLAG = -g
else
ifeq (${VERSION}, debug)
SYMFLAG = -g
else
SYMFLAG =
endif
endif
# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
# in $(GAMMADIR)/make/defs.make
ifeq ($(HOTSPOT_BUILD_VERSION),)
BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\""
else
BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION)\""
endif
# The following variables are defined in the generated flags.make file.
BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HS_BUILD_VER)\""
JRE_VERSION = -DJRE_RELEASE_VERSION="\"$(JRE_RELEASE_VER)\""
HS_LIB_ARCH = -DHOTSPOT_LIB_ARCH=\"$(LIBARCH)\"
BUILD_TARGET = -DHOTSPOT_BUILD_TARGET="\"$(TARGET)\""
BUILD_USER = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\""
VM_DISTRO = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\""
CXXFLAGS = \
${SYSDEFS} \
${INCLUDES} \
${BUILD_VERSION} \
${BUILD_TARGET} \
${BUILD_USER} \
${HS_LIB_ARCH} \
${VM_DISTRO}
# This is VERY important! The version define must only be supplied to vm_version.o
# If not, ccache will not re-use the cache at all, since the version string might contain
# a time and date.
vm_version.o: CXXFLAGS += ${JRE_VERSION}
CXXFLAGS/BYFILE = $(CXXFLAGS/$@)
# File specific flags
CXXFLAGS += $(CXXFLAGS/BYFILE)
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
CFLAGS += $(CFLAGS_WARN/BYFILE)
# Do not use C++ exception handling
CFLAGS += $(CFLAGS/NOEX)
# Extra flags from gnumake's invocation or environment
CFLAGS += $(EXTRA_CFLAGS)
LFLAGS += $(EXTRA_CFLAGS)
# Don't set excutable bit on stack segment
# the same could be done by separate execstack command
#LFLAGS += -Xlinker -z -Xlinker noexecstack
LIBS += -lm -ldl -lpthread
# By default, link the *.o into the library, not the executable.
LINK_INTO$(LINK_INTO) = LIBJVM
JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH)
#----------------------------------------------------------------------
# jvm_db & dtrace
include $(MAKEFILES_DIR)/dtrace.make
#----------------------------------------------------------------------
# JVM
JVM = jvm
LIBJVM = lib$(JVM).so
CFLAGS += -DALLOW_OPERATOR_NEW_USAGE
LIBJVM_DEBUGINFO = lib$(JVM).debuginfo
LIBJVM_DIZ = lib$(JVM).diz
SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt
SOURCE_PATHS=\
$(shell find $(HS_COMMON_SRC)/share/vm/* -type d \! \
\( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) \))
SOURCE_PATHS+=$(HS_COMMON_SRC)/os/$(Platform_os_family)/vm
SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(SRCARCH)/vm
SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_family)_$(SRCARCH)/vm
CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
ifneq ($(INCLUDE_TRACE), false)
CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
find $(HS_ALT_SRC)/share/vm/jfr -type d; \
fi)
endif
COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1
COMPILER2_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/opto)
COMPILER2_PATHS += $(call altsrc,$(HS_COMMON_SRC)/share/vm/libadt)
COMPILER2_PATHS += $(HS_COMMON_SRC)/share/vm/opto
COMPILER2_PATHS += $(HS_COMMON_SRC)/share/vm/libadt
COMPILER2_PATHS += $(GENERATED)/adfiles
SHARK_PATHS := $(GAMMADIR)/src/share/vm/shark
# Include dirs per type.
Src_Dirs/CORE := $(CORE_PATHS)
Src_Dirs/COMPILER1 := $(CORE_PATHS) $(COMPILER1_PATHS)
Src_Dirs/COMPILER2 := $(CORE_PATHS) $(COMPILER2_PATHS)
Src_Dirs/TIERED := $(CORE_PATHS) $(COMPILER1_PATHS) $(COMPILER2_PATHS)
Src_Dirs/ZERO := $(CORE_PATHS)
Src_Dirs/SHARK := $(CORE_PATHS) $(SHARK_PATHS)
Src_Dirs := $(Src_Dirs/$(TYPE))
COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\*
COMPILER1_SPECIFIC_FILES := c1_\*
SHARK_SPECIFIC_FILES := shark
ZERO_SPECIFIC_FILES := zero
# Always exclude these.
Src_Files_EXCLUDE += jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
# Exclude per type.
Src_Files_EXCLUDE/CORE := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
Src_Files_EXCLUDE/TIERED := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
Src_Files_EXCLUDE/ZERO := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
Src_Files_EXCLUDE/SHARK := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES)
Src_Files_EXCLUDE += $(Src_Files_EXCLUDE/$(TYPE))
# Disable ELF decoder on AIX (AIX uses XCOFF).
Src_Files_EXCLUDE += decoder_elf.cpp elfFile.cpp elfStringTable.cpp elfSymbolTable.cpp elfFuncDescTable.cpp
# Special handling of arch model.
ifeq ($(Platform_arch_model), x86_32)
Src_Files_EXCLUDE += \*x86_64\*
endif
ifeq ($(Platform_arch_model), x86_64)
Src_Files_EXCLUDE += \*x86_32\*
endif
# Locate all source files in the given directory, excluding files in Src_Files_EXCLUDE.
define findsrc
$(notdir $(shell find $(1)/. ! -name . -prune \
-a \( -name \*.c -o -name \*.cpp -o -name \*.s \) \
-a ! \( -name DUMMY $(addprefix -o -name ,$(Src_Files_EXCLUDE)) \)))
endef
Src_Files := $(foreach e,$(Src_Dirs),$(call findsrc,$(e)))
Obj_Files = $(sort $(addsuffix .o,$(basename $(Src_Files))))
JVM_OBJ_FILES = $(Obj_Files)
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
mapfile : $(MAPFILE) vm.def
rm -f $@
awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \
{ system ("cat vm.def"); } \
else \
{ print $$0 } \
}' > $@ < $(MAPFILE)
mapfile_reorder : mapfile $(REORDERFILE)
rm -f $@
cat $^ > $@
vm.def: $(Res_Files) $(Obj_Files)
sh $(GAMMADIR)/make/aix/makefiles/build_vm_def.sh *.o > $@
ifeq ($(JVM_VARIANT_ZEROSHARK), true)
STATIC_CXX = false
else
ifeq ($(ZERO_LIBARCH), ppc64)
STATIC_CXX = false
else
STATIC_CXX = true
endif
endif
ifeq ($(LINK_INTO),AOUT)
LIBJVM.o =
LIBJVM_MAPFILE =
LIBS_VM = $(LIBS)
else
LIBJVM.o = $(JVM_OBJ_FILES)
LIBJVM_MAPFILE$(LDNOMAP) = mapfile_reorder
LFLAGS_VM$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE))
# xlC_r ignores the -o= syntax
# LFLAGS_VM += $(SONAMEFLAG:SONAME=$(LIBJVM))
# JVM is statically linked with libgcc[_s] and libstdc++; this is needed to
# get around library dependency and compatibility issues. Must use gcc not
# g++ to link.
LIBS_VM += $(STATIC_STDCXX) $(LIBS)
endif
LINK_VM = $(LINK_LIB.CXX)
# create loadmap for libjvm.so by default. Helps in diagnosing some problems.
LFLAGS_VM += -bloadmap:libjvm.loadmap
# rule for building precompiled header
$(PRECOMPILED_HEADER):
$(QUIETLY) echo Generating precompiled header $@
$(QUIETLY) mkdir -p $(PRECOMPILED_HEADER_DIR)
$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -x c++-header $(PRECOMPILED_HEADER_SRC) -o $@ $(COMPILE_DONE)
# making the library:
ifneq ($(JVM_BASE_ADDR),)
# By default shared library is linked at base address == 0. Modify the
# linker script if JVM prefers a different base location. It can also be
# implemented with 'prelink -r'. But 'prelink' is not (yet) available on
# our build platform (AS-2.1).
LD_SCRIPT = libjvm.so.lds
$(LD_SCRIPT): $(LIBJVM_MAPFILE)
$(QUIETLY) { \
rm -rf $@; \
$(LINK_VM) -Wl,--verbose $(LFLAGS_VM) 2>&1 | \
sed -e '/^======/,/^======/!d' \
-e '/^======/d' \
-e 's/0\( + SIZEOF_HEADERS\)/$(JVM_BASE_ADDR)\1/' \
> $@; \
}
LD_SCRIPT_FLAG = -Wl,-T,$(LD_SCRIPT)
endif
# With more recent Redhat releases (or the cutting edge version Fedora), if
# SELinux is configured to be enabled, the runtime linker will fail to apply
# the text relocation to libjvm.so considering that it is built as a non-PIC
# DSO. To workaround that, we run chcon to libjvm.so after it is built. See
# details in bug 6538311.
$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
$(QUIETLY) { \
echo Linking vm...; \
$(LINK_LIB.CXX/PRE_HOOK) \
$(LINK_VM) $(LD_SCRIPT_FLAG) \
$(LFLAGS_VM) -o $@ $(sort $(LIBJVM.o)) $(LIBS_VM); \
$(LINK_LIB.CXX/POST_HOOK) \
rm -f $@.1; ln -s $@ $@.1; \
}
# No security contexts on AIX
# if [ \"$(CROSS_COMPILE_ARCH)\" = \"\" ] ; then \
# if [ -x /usr/sbin/selinuxenabled ] ; then \
# /usr/sbin/selinuxenabled; \
# if [ $$? = 0 ] ; then \
# /usr/bin/chcon -t textrel_shlib_t $@; \
# if [ $$? != 0 ]; then \
# echo "ERROR: Cannot chcon $@"; \
# fi \
# fi \
# fi \
# fi \
# }
#ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
# $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO)
# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
# ifeq ($(STRIP_POLICY),all_strip)
# $(QUIETLY) $(STRIP) $@
# else
# ifeq ($(STRIP_POLICY),min_strip)
# $(QUIETLY) $(STRIP) -g $@
# # implied else here is no stripping at all
# endif
# endif
# ifeq ($(ZIP_DEBUGINFO_FILES),1)
# $(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO)
# $(RM) $(LIBJVM_DEBUGINFO)
# endif
#endif
DEST_SUBDIR = $(JDK_LIBDIR)/$(VM_SUBDIR)
DEST_JVM = $(DEST_SUBDIR)/$(LIBJVM)
DEST_JVM_DEBUGINFO = $(DEST_SUBDIR)/$(LIBJVM_DEBUGINFO)
DEST_JVM_DIZ = $(DEST_SUBDIR)/$(LIBJVM_DIZ)
install_jvm: $(LIBJVM)
@echo "Copying $(LIBJVM) to $(DEST_JVM)"
$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \
cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
$(QUIETLY) test -f $(LIBJVM_DIZ) && \
cp -f $(LIBJVM_DIZ) $(DEST_JVM_DIZ)
$(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done"
#----------------------------------------------------------------------
# Other files
# Signal interposition library
include $(MAKEFILES_DIR)/jsig.make
# Serviceability agent
include $(MAKEFILES_DIR)/saproc.make
#----------------------------------------------------------------------
build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC)
install: install_jvm install_jsig install_saproc
.PHONY: default build install install_jvm

View File

@ -0,0 +1,159 @@
#
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2012, 2013 SAP. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
#------------------------------------------------------------------------
# CC, CXX & AS
# Set compiler explicitly
CXX = $(COMPILER_PATH)xlC_r
CC = $(COMPILER_PATH)xlc_r
HOSTCXX = $(CXX)
HOSTCC = $(CC)
AS = $(CC) -c
# get xlc version
CXX_VERSION := $(shell $(CXX) -qversion 2>&1 | sed -n 's/.*Version: \([0-9.]*\)/\1/p')
# xlc 08.00.0000.0023 and higher supports -qtune=balanced
CXX_SUPPORTS_BALANCED_TUNING=$(shell if [ $(subst .,,$(CXX_VERSION)) -ge 080000000023 ] ; then echo "true" ; fi)
# xlc 10.01 is used with aggressive optimizations to boost performance
CXX_IS_V10=$(shell if [ $(subst .,,$(CXX_VERSION)) -ge 100100000000 ] ; then echo "true" ; fi)
# check for precompiled headers support
# Switch off the precompiled header support. Neither xlC 8.0 nor xlC 10.0
# support precompiled headers. Both "understand" the command line switches "-qusepcomp" and
# "-qgenpcomp" but when we specify them the following message is printed:
# "1506-755 (W) The -qusepcomp option is not supported in this release."
USE_PRECOMPILED_HEADER = 0
ifneq ($(USE_PRECOMPILED_HEADER),0)
PRECOMPILED_HEADER_DIR=.
PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
endif
#------------------------------------------------------------------------
# Compiler flags
# position-independent code
PICFLAG = -qpic=large
VM_PICFLAG/LIBJVM = $(PICFLAG)
VM_PICFLAG/AOUT =
VM_PICFLAG = $(VM_PICFLAG/$(LINK_INTO))
CFLAGS += $(VM_PICFLAG)
CFLAGS += -qnortti
CFLAGS += -qnoeh
CFLAGS += -D_REENTRANT
# no xlc counterpart for -fcheck-new
# CFLAGS += -fcheck-new
ARCHFLAG = -q64
CFLAGS += $(ARCHFLAG)
AOUT_FLAGS += $(ARCHFLAG)
LFLAGS += $(ARCHFLAG)
ASFLAGS += $(ARCHFLAG)
# Use C++ Interpreter
ifdef CC_INTERP
CFLAGS += -DCC_INTERP
endif
# Keep temporary files (.ii, .s)
# no counterpart on xlc for -save-temps, -pipe
# Compiler warnings are treated as errors
# Do not treat warnings as errors
# WARNINGS_ARE_ERRORS = -Werror
# Except for a few acceptable ones
# ACCEPTABLE_WARNINGS = -Wpointer-arith -Wconversion -Wsign-compare
# CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(ACCEPTABLE_WARNINGS)
CFLAGS_WARN/COMMON =
CFLAGS_WARN/DEFAULT = $(CFLAGS_WARN/COMMON) $(EXTRA_WARNINGS)
# Special cases
CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))
# The flags to use for an optimized build
OPT_CFLAGS += -O3
# Hotspot uses very unstrict aliasing turn this optimization off
OPT_CFLAGS += -qalias=noansi
OPT_CFLAGS/NOOPT=-qnoopt
DEPFLAGS = -qmakedep=gcc -MF $(DEP_DIR)/$(@:%=%.d)
#------------------------------------------------------------------------
# Linker flags
# statically link libstdc++.so, work with gcc but ignored by g++
STATIC_STDCXX = -Wl,-lC_r
# Enable linker optimization
# no counterpart on xlc for this
# LFLAGS += -Xlinker -O1
# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
# MAPFLAG = -Xlinker --version-script=FILENAME
# Build shared library
SHARED_FLAG = -q64 -b64 -bexpall -G -bnoentry -qmkshrobj -brtl -bnolibpath
#------------------------------------------------------------------------
# Debug flags
# Always compile with '-g' to get symbols in the stacktraces in the hs_err file
DEBUG_CFLAGS += -g
FASTDEBUG_CFLAGS += -g
OPT_CFLAGS += -g
# DEBUG_BINARIES overrides everything, use full -g debug information
ifeq ($(DEBUG_BINARIES), true)
DEBUG_CFLAGS = -g
CFLAGS += $(DEBUG_CFLAGS)
endif
# If we are building HEADLESS, pass on to VM
# so it can set the java.awt.headless property
ifdef HEADLESS
CFLAGS += -DHEADLESS
endif
# We are building Embedded for a small device
# favor code space over speed
ifdef MINIMIZE_RAM_USAGE
CFLAGS += -DMINIMIZE_RAM_USAGE
endif
ifdef CROSS_COMPILE_ARCH
STRIP = $(ALT_COMPILER_PATH)/strip
else
STRIP = strip
endif

View File

@ -0,0 +1,17 @@
os_family = aix
arch = ppc
arch_model = ppc_64
os_arch = aix_ppc
os_arch_model = aix_ppc_64
lib_arch = ppc64
compiler = xlc
gnu_dis_arch = ppc64
sysdefs = -DAIX -DPPC64

View File

@ -260,7 +260,7 @@ ifeq ($(USE_CLANG), true)
WARNINGS_ARE_ERRORS += -Wno-empty-body
endif
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wformat=2 -Wno-error=format-nonliteral
ifeq ($(USE_CLANG),)
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
@ -289,7 +289,7 @@ CFLAGS += -fno-strict-aliasing
# The flags to use for an Optimized g++ build
ifeq ($(OS_VENDOR), Darwin)
# use -Os by default, unless -O3 can be proved to be worth the cost, as per policy
# <http://wikis.sun.com/display/OpenJDK/Mac+OS+X+Port+Compilers>
# <https://wiki.openjdk.java.net/display/MacOSXPort/Compiler+Errata>
OPT_CFLAGS_DEFAULT ?= SIZE
else
OPT_CFLAGS_DEFAULT ?= SPEED

View File

@ -176,11 +176,15 @@ ifeq ($(OS),)
HOST := $(shell uname -n)
endif
# If not SunOS, not Linux and not BSD, assume Windows
# If not SunOS, not Linux not BSD and not AIX, assume Windows
ifneq ($(OS), Linux)
ifneq ($(OS), SunOS)
ifneq ($(OS), bsd)
OSNAME=windows
ifneq ($(OS), AIX)
OSNAME=windows
else
OSNAME=aix
endif
else
OSNAME=bsd
endif
@ -269,7 +273,7 @@ ifneq ($(OSNAME),windows)
# Use uname output for SRCARCH, but deal with platform differences. If ARCH
# is not explicitly listed below, it is treated as x86.
SRCARCH = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc zero,$(ARCH)))
SRCARCH = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc ppc64 zero,$(ARCH)))
ARCH/ = x86
ARCH/sparc = sparc
ARCH/sparc64= sparc
@ -295,6 +299,11 @@ ifneq ($(OSNAME),windows)
BUILDARCH = sparcv9
endif
endif
ifeq ($(BUILDARCH), ppc)
ifdef LP64
BUILDARCH = ppc64
endif
endif
# LIBARCH is 1:1 mapping from BUILDARCH
LIBARCH = $(LIBARCH/$(BUILDARCH))
@ -303,12 +312,12 @@ ifneq ($(OSNAME),windows)
LIBARCH/sparc = sparc
LIBARCH/sparcv9 = sparcv9
LIBARCH/ia64 = ia64
LIBARCH/ppc64 = ppc
LIBARCH/ppc64 = ppc64
LIBARCH/ppc = ppc
LIBARCH/arm = arm
LIBARCH/zero = $(ZERO_LIBARCH)
LP64_ARCH = sparcv9 amd64 ia64 zero
LP64_ARCH = sparcv9 amd64 ia64 ppc64 zero
endif
# Required make macro settings for all platforms

View File

@ -193,6 +193,7 @@ DATA_MODE/i486 = 32
DATA_MODE/sparc = 32
DATA_MODE/sparcv9 = 64
DATA_MODE/amd64 = 64
DATA_MODE/ppc64 = 64
DATA_MODE = $(DATA_MODE/$(BUILDARCH))

View File

@ -120,6 +120,15 @@ ifeq ($(ARCH), ppc)
HS_ARCH = ppc
endif
# PPC64
ifeq ($(ARCH), ppc64)
ARCH_DATA_MODEL = 64
MAKE_ARGS += LP64=1
PLATFORM = linux-ppc64
VM_PLATFORM = linux_ppc64
HS_ARCH = ppc
endif
# On 32 bit linux we build server and client, on 64 bit just server.
ifeq ($(JVM_VARIANTS),)
ifeq ($(ARCH_DATA_MODEL), 32)
@ -255,7 +264,7 @@ EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK) $(JVM_VARIANT_CORE)), true)
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)

View File

@ -181,6 +181,7 @@ ARCHFLAG/zero = $(ZERO_ARCHFLAG)
ifndef E500V2
ARCHFLAG/ppc = -mcpu=powerpc
endif
ARCHFLAG/ppc64 = -m64
CFLAGS += $(ARCHFLAG)
AOUT_FLAGS += $(ARCHFLAG)
@ -214,7 +215,7 @@ ifeq ($(USE_CLANG), true)
WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
endif
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2 -Wno-error=format-nonliteral
ifeq ($(USE_CLANG),)
# Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
@ -346,6 +347,7 @@ else
DEBUG_CFLAGS/amd64 = -g
DEBUG_CFLAGS/arm = -g
DEBUG_CFLAGS/ppc = -g
DEBUG_CFLAGS/ppc64 = -g
DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
ifeq ($(USE_CLANG), true)
@ -361,6 +363,7 @@ else
FASTDEBUG_CFLAGS/amd64 = -g
FASTDEBUG_CFLAGS/arm = -g
FASTDEBUG_CFLAGS/ppc = -g
FASTDEBUG_CFLAGS/ppc64 = -g
FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
ifeq ($(USE_CLANG), true)
@ -375,6 +378,7 @@ else
OPT_CFLAGS/amd64 = -g
OPT_CFLAGS/arm = -g
OPT_CFLAGS/ppc = -g
OPT_CFLAGS/ppc64 = -g
OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
ifeq ($(USE_CLANG), true)

View File

@ -0,0 +1,39 @@
#
# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright 2012, 2013 SAP AG. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# make c code know it is on a 64 bit platform.
CFLAGS += -D_LP64=1
# fixes `relocation truncated to fit' error for gcc 4.1.
CFLAGS += -mminimal-toc
# finds use ppc64 instructions, but schedule for power5
CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
# let linker find external 64 bit libs.
LFLAGS_VM += -L/lib64
# specify lib format.
LFLAGS_VM += -Wl,-melf64ppc

View File

@ -2,11 +2,11 @@ os_family = linux
arch = ppc
arch_model = ppc
arch_model = ppc_32
os_arch = linux_ppc
os_arch_model = linux_ppc
os_arch_model = linux_ppc_32
lib_arch = ppc
@ -14,4 +14,4 @@ compiler = gcc
gnu_dis_arch = ppc
sysdefs = -DLINUX -D_GNU_SOURCE -DPPC
sysdefs = -DLINUX -D_GNU_SOURCE -DPPC32

View File

@ -0,0 +1,17 @@
os_family = linux
arch = ppc
arch_model = ppc_64
os_arch = linux_ppc
os_arch_model = linux_ppc_64
lib_arch = ppc64
compiler = gcc
gnu_dis_arch = ppc64
sysdefs = -DLINUX -D_GNU_SOURCE -DPPC64

View File

@ -118,7 +118,7 @@ endif
# Compiler warnings are treated as errors
WARNINGS_ARE_ERRORS = -Werror
# Enable these warnings. See 'info gcc' about details on these options
WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef
WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef -Wformat=2 -Wno-error=format-nonliteral
CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
# Special cases
CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))

View File

@ -0,0 +1,699 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#endif // INCLUDE_ALL_GCS
#ifdef PRODUCT
#define BLOCK_COMMENT(str) // nothing
#else
#define BLOCK_COMMENT(str) block_comment(str)
#endif
int AbstractAssembler::code_fill_byte() {
return 0x00; // illegal instruction 0x00000000
}
void Assembler::print_instruction(int inst) {
Unimplemented();
}
// Patch instruction `inst' at offset `inst_pos' to refer to
// `dest_pos' and return the resulting instruction. We should have
// pcs, not offsets, but since all is relative, it will work out fine.
int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {
int m = 0; // mask for displacement field
int v = 0; // new value for displacement field
switch (inv_op_ppc(inst)) {
case b_op: m = li(-1); v = li(disp(dest_pos, inst_pos)); break;
case bc_op: m = bd(-1); v = bd(disp(dest_pos, inst_pos)); break;
default: ShouldNotReachHere();
}
return inst & ~m | v;
}
// Return the offset, relative to _code_begin, of the destination of
// the branch inst at offset pos.
int Assembler::branch_destination(int inst, int pos) {
int r = 0;
switch (inv_op_ppc(inst)) {
case b_op: r = bxx_destination_offset(inst, pos); break;
case bc_op: r = inv_bd_field(inst, pos); break;
default: ShouldNotReachHere();
}
return r;
}
// Low-level andi-one-instruction-macro.
void Assembler::andi(Register a, Register s, const int ui16) {
assert(is_uimm(ui16, 16), "must be 16-bit unsigned immediate");
if (is_power_of_2_long(((jlong) ui16)+1)) {
// pow2minus1
clrldi(a, s, 64-log2_long((((jlong) ui16)+1)));
} else if (is_power_of_2_long((jlong) ui16)) {
// pow2
rlwinm(a, s, 0, 31-log2_long((jlong) ui16), 31-log2_long((jlong) ui16));
} else if (is_power_of_2_long((jlong)-ui16)) {
// negpow2
clrrdi(a, s, log2_long((jlong)-ui16));
} else {
andi_(a, s, ui16);
}
}
// RegisterOrConstant version.
void Assembler::ld(Register d, RegisterOrConstant roc, Register s1) {
if (roc.is_constant()) {
if (s1 == noreg) {
int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
Assembler::ld(d, simm16_rest, d);
} else if (is_simm(roc.as_constant(), 16)) {
Assembler::ld(d, roc.as_constant(), s1);
} else {
load_const_optimized(d, roc.as_constant());
Assembler::ldx(d, d, s1);
}
} else {
if (s1 == noreg)
Assembler::ld(d, 0, roc.as_register());
else
Assembler::ldx(d, roc.as_register(), s1);
}
}
void Assembler::lwa(Register d, RegisterOrConstant roc, Register s1) {
if (roc.is_constant()) {
if (s1 == noreg) {
int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
Assembler::lwa(d, simm16_rest, d);
} else if (is_simm(roc.as_constant(), 16)) {
Assembler::lwa(d, roc.as_constant(), s1);
} else {
load_const_optimized(d, roc.as_constant());
Assembler::lwax(d, d, s1);
}
} else {
if (s1 == noreg)
Assembler::lwa(d, 0, roc.as_register());
else
Assembler::lwax(d, roc.as_register(), s1);
}
}
void Assembler::lwz(Register d, RegisterOrConstant roc, Register s1) {
if (roc.is_constant()) {
if (s1 == noreg) {
int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
Assembler::lwz(d, simm16_rest, d);
} else if (is_simm(roc.as_constant(), 16)) {
Assembler::lwz(d, roc.as_constant(), s1);
} else {
load_const_optimized(d, roc.as_constant());
Assembler::lwzx(d, d, s1);
}
} else {
if (s1 == noreg)
Assembler::lwz(d, 0, roc.as_register());
else
Assembler::lwzx(d, roc.as_register(), s1);
}
}
void Assembler::lha(Register d, RegisterOrConstant roc, Register s1) {
if (roc.is_constant()) {
if (s1 == noreg) {
int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
Assembler::lha(d, simm16_rest, d);
} else if (is_simm(roc.as_constant(), 16)) {
Assembler::lha(d, roc.as_constant(), s1);
} else {
load_const_optimized(d, roc.as_constant());
Assembler::lhax(d, d, s1);
}
} else {
if (s1 == noreg)
Assembler::lha(d, 0, roc.as_register());
else
Assembler::lhax(d, roc.as_register(), s1);
}
}
void Assembler::lhz(Register d, RegisterOrConstant roc, Register s1) {
if (roc.is_constant()) {
if (s1 == noreg) {
int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
Assembler::lhz(d, simm16_rest, d);
} else if (is_simm(roc.as_constant(), 16)) {
Assembler::lhz(d, roc.as_constant(), s1);
} else {
load_const_optimized(d, roc.as_constant());
Assembler::lhzx(d, d, s1);
}
} else {
if (s1 == noreg)
Assembler::lhz(d, 0, roc.as_register());
else
Assembler::lhzx(d, roc.as_register(), s1);
}
}
void Assembler::lbz(Register d, RegisterOrConstant roc, Register s1) {
if (roc.is_constant()) {
if (s1 == noreg) {
int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
Assembler::lbz(d, simm16_rest, d);
} else if (is_simm(roc.as_constant(), 16)) {
Assembler::lbz(d, roc.as_constant(), s1);
} else {
load_const_optimized(d, roc.as_constant());
Assembler::lbzx(d, d, s1);
}
} else {
if (s1 == noreg)
Assembler::lbz(d, 0, roc.as_register());
else
Assembler::lbzx(d, roc.as_register(), s1);
}
}
void Assembler::std(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
if (roc.is_constant()) {
if (s1 == noreg) {
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
Assembler::std(d, simm16_rest, tmp);
} else if (is_simm(roc.as_constant(), 16)) {
Assembler::std(d, roc.as_constant(), s1);
} else {
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
load_const_optimized(tmp, roc.as_constant());
Assembler::stdx(d, tmp, s1);
}
} else {
if (s1 == noreg)
Assembler::std(d, 0, roc.as_register());
else
Assembler::stdx(d, roc.as_register(), s1);
}
}
void Assembler::stw(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
if (roc.is_constant()) {
if (s1 == noreg) {
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
Assembler::stw(d, simm16_rest, tmp);
} else if (is_simm(roc.as_constant(), 16)) {
Assembler::stw(d, roc.as_constant(), s1);
} else {
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
load_const_optimized(tmp, roc.as_constant());
Assembler::stwx(d, tmp, s1);
}
} else {
if (s1 == noreg)
Assembler::stw(d, 0, roc.as_register());
else
Assembler::stwx(d, roc.as_register(), s1);
}
}
void Assembler::sth(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
if (roc.is_constant()) {
if (s1 == noreg) {
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
Assembler::sth(d, simm16_rest, tmp);
} else if (is_simm(roc.as_constant(), 16)) {
Assembler::sth(d, roc.as_constant(), s1);
} else {
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
load_const_optimized(tmp, roc.as_constant());
Assembler::sthx(d, tmp, s1);
}
} else {
if (s1 == noreg)
Assembler::sth(d, 0, roc.as_register());
else
Assembler::sthx(d, roc.as_register(), s1);
}
}
void Assembler::stb(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
if (roc.is_constant()) {
if (s1 == noreg) {
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
Assembler::stb(d, simm16_rest, tmp);
} else if (is_simm(roc.as_constant(), 16)) {
Assembler::stb(d, roc.as_constant(), s1);
} else {
guarantee(tmp != noreg, "Need tmp reg to encode large constants");
load_const_optimized(tmp, roc.as_constant());
Assembler::stbx(d, tmp, s1);
}
} else {
if (s1 == noreg)
Assembler::stb(d, 0, roc.as_register());
else
Assembler::stbx(d, roc.as_register(), s1);
}
}
void Assembler::add(Register d, RegisterOrConstant roc, Register s1) {
if (roc.is_constant()) {
intptr_t c = roc.as_constant();
assert(is_simm(c, 16), "too big");
addi(d, s1, (int)c);
}
else add(d, roc.as_register(), s1);
}
void Assembler::subf(Register d, RegisterOrConstant roc, Register s1) {
if (roc.is_constant()) {
intptr_t c = roc.as_constant();
assert(is_simm(-c, 16), "too big");
addi(d, s1, (int)-c);
}
else subf(d, roc.as_register(), s1);
}
void Assembler::cmpd(ConditionRegister d, RegisterOrConstant roc, Register s1) {
if (roc.is_constant()) {
intptr_t c = roc.as_constant();
assert(is_simm(c, 16), "too big");
cmpdi(d, s1, (int)c);
}
else cmpd(d, roc.as_register(), s1);
}
// Load a 64 bit constant. Patchable.
void Assembler::load_const(Register d, long x, Register tmp) {
// 64-bit value: x = xa xb xc xd
int xa = (x >> 48) & 0xffff;
int xb = (x >> 32) & 0xffff;
int xc = (x >> 16) & 0xffff;
int xd = (x >> 0) & 0xffff;
if (tmp == noreg) {
Assembler::lis( d, (int)(short)xa);
Assembler::ori( d, d, (unsigned int)xb);
Assembler::sldi(d, d, 32);
Assembler::oris(d, d, (unsigned int)xc);
Assembler::ori( d, d, (unsigned int)xd);
} else {
// exploit instruction level parallelism if we have a tmp register
assert_different_registers(d, tmp);
Assembler::lis(tmp, (int)(short)xa);
Assembler::lis(d, (int)(short)xc);
Assembler::ori(tmp, tmp, (unsigned int)xb);
Assembler::ori(d, d, (unsigned int)xd);
Assembler::insrdi(d, tmp, 32, 0);
}
}
// Load a 64 bit constant, optimized, not identifyable.
// Tmp can be used to increase ILP. Set return_simm16_rest=true to get a
// 16 bit immediate offset.
int Assembler::load_const_optimized(Register d, long x, Register tmp, bool return_simm16_rest) {
// Avoid accidentally trying to use R0 for indexed addressing.
assert(d != R0, "R0 not allowed");
assert_different_registers(d, tmp);
short xa, xb, xc, xd; // Four 16-bit chunks of const.
long rem = x; // Remaining part of const.
xd = rem & 0xFFFF; // Lowest 16-bit chunk.
rem = (rem >> 16) + ((unsigned short)xd >> 15); // Compensation for sign extend.
if (rem == 0) { // opt 1: simm16
li(d, xd);
return 0;
}
xc = rem & 0xFFFF; // Next 16-bit chunk.
rem = (rem >> 16) + ((unsigned short)xc >> 15); // Compensation for sign extend.
if (rem == 0) { // opt 2: simm32
lis(d, xc);
} else { // High 32 bits needed.
if (tmp != noreg) { // opt 3: We have a temp reg.
// No carry propagation between xc and higher chunks here (use logical instructions).
xa = (x >> 48) & 0xffff;
xb = (x >> 32) & 0xffff; // No sign compensation, we use lis+ori or li to allow usage of R0.
bool load_xa = (xa != 0) || (xb < 0);
bool return_xd = false;
if (load_xa) lis(tmp, xa);
if (xc) lis(d, xc);
if (load_xa) {
if (xb) ori(tmp, tmp, xb); // No addi, we support tmp == R0.
} else {
li(tmp, xb); // non-negative
}
if (xc) {
if (return_simm16_rest && xd >= 0) { return_xd = true; } // >= 0 to avoid carry propagation after insrdi/rldimi.
else if (xd) { addi(d, d, xd); }
} else {
li(d, xd);
}
insrdi(d, tmp, 32, 0);
return return_xd ? xd : 0; // non-negative
}
xb = rem & 0xFFFF; // Next 16-bit chunk.
rem = (rem >> 16) + ((unsigned short)xb >> 15); // Compensation for sign extend.
xa = rem & 0xFFFF; // Highest 16-bit chunk.
// opt 4: avoid adding 0
if (xa) { // Highest 16-bit needed?
lis(d, xa);
if (xb) addi(d, d, xb);
} else {
li(d, xb);
}
sldi(d, d, 32);
if (xc) addis(d, d, xc);
}
// opt 5: Return offset to be inserted into following instruction.
if (return_simm16_rest) return xd;
if (xd) addi(d, d, xd);
return 0;
}
#ifndef PRODUCT
// Test of ppc assembler.
void Assembler::test_asm() {
// PPC 1, section 3.3.8, Fixed-Point Arithmetic Instructions
addi( R0, R1, 10);
addis( R5, R2, 11);
addic_( R3, R31, 42);
subfic( R21, R12, 2112);
add( R3, R2, R1);
add_( R11, R22, R30);
subf( R7, R6, R5);
subf_( R8, R9, R4);
addc( R11, R12, R13);
addc_( R14, R14, R14);
subfc( R15, R16, R17);
subfc_( R18, R20, R19);
adde( R20, R22, R24);
adde_( R29, R27, R26);
subfe( R28, R1, R0);
subfe_( R21, R11, R29);
neg( R21, R22);
neg_( R13, R23);
mulli( R0, R11, -31);
mulld( R1, R18, R21);
mulld_( R2, R17, R22);
mullw( R3, R16, R23);
mullw_( R4, R15, R24);
divd( R5, R14, R25);
divd_( R6, R13, R26);
divw( R7, R12, R27);
divw_( R8, R11, R28);
li( R3, -4711);
// PPC 1, section 3.3.9, Fixed-Point Compare Instructions
cmpi( CCR7, 0, R27, 4711);
cmp( CCR0, 1, R14, R11);
cmpli( CCR5, 1, R17, 45);
cmpl( CCR3, 0, R9, R10);
cmpwi( CCR7, R27, 4711);
cmpw( CCR0, R14, R11);
cmplwi( CCR5, R17, 45);
cmplw( CCR3, R9, R10);
cmpdi( CCR7, R27, 4711);
cmpd( CCR0, R14, R11);
cmpldi( CCR5, R17, 45);
cmpld( CCR3, R9, R10);
// PPC 1, section 3.3.11, Fixed-Point Logical Instructions
andi_( R4, R5, 0xff);
andis_( R12, R13, 0x7b51);
ori( R1, R4, 13);
oris( R3, R5, 177);
xori( R7, R6, 51);
xoris( R29, R0, 1);
andr( R17, R21, R16);
and_( R3, R5, R15);
orr( R2, R1, R9);
or_( R17, R15, R11);
xorr( R19, R18, R10);
xor_( R31, R21, R11);
nand( R5, R7, R3);
nand_( R3, R1, R0);
nor( R2, R3, R5);
nor_( R3, R6, R8);
andc( R25, R12, R11);
andc_( R24, R22, R21);
orc( R20, R10, R12);
orc_( R22, R2, R13);
nop();
// PPC 1, section 3.3.12, Fixed-Point Rotate and Shift Instructions
sld( R5, R6, R8);
sld_( R3, R5, R9);
slw( R2, R1, R10);
slw_( R6, R26, R16);
srd( R16, R24, R8);
srd_( R21, R14, R7);
srw( R22, R25, R29);
srw_( R5, R18, R17);
srad( R7, R11, R0);
srad_( R9, R13, R1);
sraw( R7, R15, R2);
sraw_( R4, R17, R3);
sldi( R3, R18, 63);
sldi_( R2, R20, 30);
slwi( R1, R21, 30);
slwi_( R7, R23, 8);
srdi( R0, R19, 2);
srdi_( R12, R24, 5);
srwi( R13, R27, 6);
srwi_( R14, R29, 7);
sradi( R15, R30, 9);
sradi_( R16, R31, 19);
srawi( R17, R31, 15);
srawi_( R18, R31, 12);
clrrdi( R3, R30, 5);
clrldi( R9, R10, 11);
rldicr( R19, R20, 13, 15);
rldicr_(R20, R20, 16, 14);
rldicl( R21, R21, 30, 33);
rldicl_(R22, R1, 20, 25);
rlwinm( R23, R2, 25, 10, 11);
rlwinm_(R24, R3, 12, 13, 14);
// PPC 1, section 3.3.2 Fixed-Point Load Instructions
lwzx( R3, R5, R7);
lwz( R11, 0, R1);
lwzu( R31, -4, R11);
lwax( R3, R5, R7);
lwa( R31, -4, R11);
lhzx( R3, R5, R7);
lhz( R31, -4, R11);
lhzu( R31, -4, R11);
lhax( R3, R5, R7);
lha( R31, -4, R11);
lhau( R11, 0, R1);
lbzx( R3, R5, R7);
lbz( R31, -4, R11);
lbzu( R11, 0, R1);
ld( R31, -4, R11);
ldx( R3, R5, R7);
ldu( R31, -4, R11);
// PPC 1, section 3.3.3 Fixed-Point Store Instructions
stwx( R3, R5, R7);
stw( R31, -4, R11);
stwu( R11, 0, R1);
sthx( R3, R5, R7 );
sth( R31, -4, R11);
sthu( R31, -4, R11);
stbx( R3, R5, R7);
stb( R31, -4, R11);
stbu( R31, -4, R11);
std( R31, -4, R11);
stdx( R3, R5, R7);
stdu( R31, -4, R11);
// PPC 1, section 3.3.13 Move To/From System Register Instructions
mtlr( R3);
mflr( R3);
mtctr( R3);
mfctr( R3);
mtcrf( 0xff, R15);
mtcr( R15);
mtcrf( 0x03, R15);
mtcr( R15);
mfcr( R15);
// PPC 1, section 2.4.1 Branch Instructions
Label lbl1, lbl2, lbl3;
bind(lbl1);
b(pc());
b(pc() - 8);
b(lbl1);
b(lbl2);
b(lbl3);
bl(pc() - 8);
bl(lbl1);
bl(lbl2);
bcl(4, 10, pc() - 8);
bcl(4, 10, lbl1);
bcl(4, 10, lbl2);
bclr( 4, 6, 0);
bclrl(4, 6, 0);
bind(lbl2);
bcctr( 4, 6, 0);
bcctrl(4, 6, 0);
blt(CCR0, lbl2);
bgt(CCR1, lbl2);
beq(CCR2, lbl2);
bso(CCR3, lbl2);
bge(CCR4, lbl2);
ble(CCR5, lbl2);
bne(CCR6, lbl2);
bns(CCR7, lbl2);
bltl(CCR0, lbl2);
bgtl(CCR1, lbl2);
beql(CCR2, lbl2);
bsol(CCR3, lbl2);
bgel(CCR4, lbl2);
blel(CCR5, lbl2);
bnel(CCR6, lbl2);
bnsl(CCR7, lbl2);
blr();
sync();
icbi( R1, R2);
dcbst(R2, R3);
// FLOATING POINT instructions ppc.
// PPC 1, section 4.6.2 Floating-Point Load Instructions
lfs( F1, -11, R3);
lfsu(F2, 123, R4);
lfsx(F3, R5, R6);
lfd( F4, 456, R7);
lfdu(F5, 789, R8);
lfdx(F6, R10, R11);
// PPC 1, section 4.6.3 Floating-Point Store Instructions
stfs( F7, 876, R12);
stfsu( F8, 543, R13);
stfsx( F9, R14, R15);
stfd( F10, 210, R16);
stfdu( F11, 111, R17);
stfdx( F12, R18, R19);
// PPC 1, section 4.6.4 Floating-Point Move Instructions
fmr( F13, F14);
fmr_( F14, F15);
fneg( F16, F17);
fneg_( F18, F19);
fabs( F20, F21);
fabs_( F22, F23);
fnabs( F24, F25);
fnabs_(F26, F27);
// PPC 1, section 4.6.5.1 Floating-Point Elementary Arithmetic
// Instructions
fadd( F28, F29, F30);
fadd_( F31, F0, F1);
fadds( F2, F3, F4);
fadds_(F5, F6, F7);
fsub( F8, F9, F10);
fsub_( F11, F12, F13);
fsubs( F14, F15, F16);
fsubs_(F17, F18, F19);
fmul( F20, F21, F22);
fmul_( F23, F24, F25);
fmuls( F26, F27, F28);
fmuls_(F29, F30, F31);
fdiv( F0, F1, F2);
fdiv_( F3, F4, F5);
fdivs( F6, F7, F8);
fdivs_(F9, F10, F11);
// PPC 1, section 4.6.6 Floating-Point Rounding and Conversion
// Instructions
frsp( F12, F13);
fctid( F14, F15);
fctidz(F16, F17);
fctiw( F18, F19);
fctiwz(F20, F21);
fcfid( F22, F23);
// PPC 1, section 4.6.7 Floating-Point Compare Instructions
fcmpu( CCR7, F24, F25);
tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", code()->insts_begin(), code()->insts_end());
code()->decode();
}
#endif // !PRODUCT

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,813 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_ASSEMBLER_PPC_INLINE_HPP
#define CPU_PPC_VM_ASSEMBLER_PPC_INLINE_HPP
#include "asm/assembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "code/codeCache.hpp"
inline void Assembler::emit_int32(int x) {
AbstractAssembler::emit_int32(x);
}
inline void Assembler::emit_data(int x) {
emit_int32(x);
}
inline void Assembler::emit_data(int x, relocInfo::relocType rtype) {
relocate(rtype);
emit_int32(x);
}
inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
relocate(rspec);
emit_int32(x);
}
// Emit an address
inline address Assembler::emit_addr(const address addr) {
address start = pc();
emit_address(addr);
return start;
}
// Emit a function descriptor with the specified entry point, TOC, and
// ENV. If the entry point is NULL, the descriptor will point just
// past the descriptor.
inline address Assembler::emit_fd(address entry, address toc, address env) {
FunctionDescriptor* fd = (FunctionDescriptor*)pc();
assert(sizeof(FunctionDescriptor) == 3*sizeof(address), "function descriptor size");
(void)emit_addr();
(void)emit_addr();
(void)emit_addr();
fd->set_entry(entry == NULL ? pc() : entry);
fd->set_toc(toc);
fd->set_env(env);
return (address)fd;
}
// Issue an illegal instruction. 0 is guaranteed to be an illegal instruction.
inline void Assembler::illtrap() { Assembler::emit_int32(0); }
inline bool Assembler::is_illtrap(int x) { return x == 0; }
// PPC 1, section 3.3.8, Fixed-Point Arithmetic Instructions
inline void Assembler::addi( Register d, Register a, int si16) { assert(a != R0, "r0 not allowed"); addi_r0ok( d, a, si16); }
inline void Assembler::addis( Register d, Register a, int si16) { assert(a != R0, "r0 not allowed"); addis_r0ok(d, a, si16); }
inline void Assembler::addi_r0ok(Register d,Register a,int si16) { emit_int32(ADDI_OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
inline void Assembler::addis_r0ok(Register d,Register a,int si16) { emit_int32(ADDIS_OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
inline void Assembler::addic_( Register d, Register a, int si16) { emit_int32(ADDIC__OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
inline void Assembler::subfic( Register d, Register a, int si16) { emit_int32(SUBFIC_OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
inline void Assembler::add( Register d, Register a, Register b) { emit_int32(ADD_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
inline void Assembler::add_( Register d, Register a, Register b) { emit_int32(ADD_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
inline void Assembler::subf( Register d, Register a, Register b) { emit_int32(SUBF_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
inline void Assembler::sub( Register d, Register a, Register b) { subf(d, b, a); }
inline void Assembler::subf_( Register d, Register a, Register b) { emit_int32(SUBF_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
inline void Assembler::addc( Register d, Register a, Register b) { emit_int32(ADDC_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
inline void Assembler::addc_( Register d, Register a, Register b) { emit_int32(ADDC_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
inline void Assembler::subfc( Register d, Register a, Register b) { emit_int32(SUBFC_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
inline void Assembler::subfc_( Register d, Register a, Register b) { emit_int32(SUBFC_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
inline void Assembler::adde( Register d, Register a, Register b) { emit_int32(ADDE_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
inline void Assembler::adde_( Register d, Register a, Register b) { emit_int32(ADDE_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
inline void Assembler::subfe( Register d, Register a, Register b) { emit_int32(SUBFE_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
inline void Assembler::subfe_( Register d, Register a, Register b) { emit_int32(SUBFE_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
inline void Assembler::neg( Register d, Register a) { emit_int32(NEG_OPCODE | rt(d) | ra(a) | oe(0) | rc(0)); }
inline void Assembler::neg_( Register d, Register a) { emit_int32(NEG_OPCODE | rt(d) | ra(a) | oe(0) | rc(1)); }
inline void Assembler::mulli( Register d, Register a, int si16) { emit_int32(MULLI_OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
inline void Assembler::mulld( Register d, Register a, Register b) { emit_int32(MULLD_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
inline void Assembler::mulld_( Register d, Register a, Register b) { emit_int32(MULLD_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
inline void Assembler::mullw( Register d, Register a, Register b) { emit_int32(MULLW_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
inline void Assembler::mullw_( Register d, Register a, Register b) { emit_int32(MULLW_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
inline void Assembler::mulhw( Register d, Register a, Register b) { emit_int32(MULHW_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); }
inline void Assembler::mulhw_( Register d, Register a, Register b) { emit_int32(MULHW_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); }
inline void Assembler::mulhd( Register d, Register a, Register b) { emit_int32(MULHD_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); }
inline void Assembler::mulhd_( Register d, Register a, Register b) { emit_int32(MULHD_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); }
inline void Assembler::mulhdu( Register d, Register a, Register b) { emit_int32(MULHDU_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); }
inline void Assembler::mulhdu_(Register d, Register a, Register b) { emit_int32(MULHDU_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); }
inline void Assembler::divd( Register d, Register a, Register b) { emit_int32(DIVD_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
inline void Assembler::divd_( Register d, Register a, Register b) { emit_int32(DIVD_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
inline void Assembler::divw( Register d, Register a, Register b) { emit_int32(DIVW_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
inline void Assembler::divw_( Register d, Register a, Register b) { emit_int32(DIVW_OPCODE | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
// extended mnemonics
inline void Assembler::li( Register d, int si16) { Assembler::addi_r0ok( d, R0, si16); }
inline void Assembler::lis( Register d, int si16) { Assembler::addis_r0ok(d, R0, si16); }
inline void Assembler::addir(Register d, int si16, Register a) { Assembler::addi(d, a, si16); }
// PPC 1, section 3.3.9, Fixed-Point Compare Instructions
inline void Assembler::cmpi( ConditionRegister f, int l, Register a, int si16) { emit_int32( CMPI_OPCODE | bf(f) | l10(l) | ra(a) | simm(si16,16)); }
inline void Assembler::cmp( ConditionRegister f, int l, Register a, Register b) { emit_int32( CMP_OPCODE | bf(f) | l10(l) | ra(a) | rb(b)); }
inline void Assembler::cmpli( ConditionRegister f, int l, Register a, int ui16) { emit_int32( CMPLI_OPCODE | bf(f) | l10(l) | ra(a) | uimm(ui16,16)); }
inline void Assembler::cmpl( ConditionRegister f, int l, Register a, Register b) { emit_int32( CMPL_OPCODE | bf(f) | l10(l) | ra(a) | rb(b)); }
// extended mnemonics of Compare Instructions
inline void Assembler::cmpwi( ConditionRegister crx, Register a, int si16) { Assembler::cmpi( crx, 0, a, si16); }
inline void Assembler::cmpdi( ConditionRegister crx, Register a, int si16) { Assembler::cmpi( crx, 1, a, si16); }
inline void Assembler::cmpw( ConditionRegister crx, Register a, Register b) { Assembler::cmp( crx, 0, a, b); }
inline void Assembler::cmpd( ConditionRegister crx, Register a, Register b) { Assembler::cmp( crx, 1, a, b); }
inline void Assembler::cmplwi(ConditionRegister crx, Register a, int ui16) { Assembler::cmpli(crx, 0, a, ui16); }
inline void Assembler::cmpldi(ConditionRegister crx, Register a, int ui16) { Assembler::cmpli(crx, 1, a, ui16); }
inline void Assembler::cmplw( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 0, a, b); }
inline void Assembler::cmpld( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 1, a, b); }
inline void Assembler::isel(Register d, Register a, Register b, int c) { emit_int32(ISEL_OPCODE | rt(d) | ra(a) | rb(b) | bc(c)); }
// PPC 1, section 3.3.11, Fixed-Point Logical Instructions
inline void Assembler::andi_( Register a, Register s, int ui16) { emit_int32(ANDI_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
inline void Assembler::andis_( Register a, Register s, int ui16) { emit_int32(ANDIS_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
inline void Assembler::ori( Register a, Register s, int ui16) { emit_int32(ORI_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
inline void Assembler::oris( Register a, Register s, int ui16) { emit_int32(ORIS_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
inline void Assembler::xori( Register a, Register s, int ui16) { emit_int32(XORI_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
inline void Assembler::xoris( Register a, Register s, int ui16) { emit_int32(XORIS_OPCODE | rta(a) | rs(s) | uimm(ui16, 16)); }
inline void Assembler::andr( Register a, Register s, Register b) { emit_int32(AND_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::and_( Register a, Register s, Register b) { emit_int32(AND_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
inline void Assembler::or_unchecked(Register a, Register s, Register b){ emit_int32(OR_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::orr( Register a, Register s, Register b) { if (a==s && s==b) { Assembler::nop(); } else { Assembler::or_unchecked(a,s,b); } }
inline void Assembler::or_( Register a, Register s, Register b) { emit_int32(OR_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
inline void Assembler::xorr( Register a, Register s, Register b) { emit_int32(XOR_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::xor_( Register a, Register s, Register b) { emit_int32(XOR_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
inline void Assembler::nand( Register a, Register s, Register b) { emit_int32(NAND_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::nand_( Register a, Register s, Register b) { emit_int32(NAND_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
inline void Assembler::nor( Register a, Register s, Register b) { emit_int32(NOR_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::nor_( Register a, Register s, Register b) { emit_int32(NOR_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
inline void Assembler::andc( Register a, Register s, Register b) { emit_int32(ANDC_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::andc_( Register a, Register s, Register b) { emit_int32(ANDC_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
inline void Assembler::orc( Register a, Register s, Register b) { emit_int32(ORC_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::orc_( Register a, Register s, Register b) { emit_int32(ORC_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
inline void Assembler::extsb( Register a, Register s) { emit_int32(EXTSB_OPCODE | rta(a) | rs(s) | rc(0)); }
inline void Assembler::extsh( Register a, Register s) { emit_int32(EXTSH_OPCODE | rta(a) | rs(s) | rc(0)); }
inline void Assembler::extsw( Register a, Register s) { emit_int32(EXTSW_OPCODE | rta(a) | rs(s) | rc(0)); }
// extended mnemonics
inline void Assembler::nop() { Assembler::ori(R0, R0, 0); }
// NOP for FP and BR units (different versions to allow them to be in one group)
inline void Assembler::fpnop0() { Assembler::fmr(F30, F30); }
inline void Assembler::fpnop1() { Assembler::fmr(F31, F31); }
inline void Assembler::brnop0() { Assembler::mcrf(CCR2, CCR2); }
inline void Assembler::brnop1() { Assembler::mcrf(CCR3, CCR3); }
inline void Assembler::brnop2() { Assembler::mcrf(CCR4, CCR4); }
inline void Assembler::mr( Register d, Register s) { Assembler::orr(d, s, s); }
inline void Assembler::ori_opt( Register d, int ui16) { if (ui16!=0) Assembler::ori( d, d, ui16); }
inline void Assembler::oris_opt(Register d, int ui16) { if (ui16!=0) Assembler::oris(d, d, ui16); }
inline void Assembler::endgroup() { Assembler::ori(R1, R1, 0); }
// count instructions
inline void Assembler::cntlzw( Register a, Register s) { emit_int32(CNTLZW_OPCODE | rta(a) | rs(s) | rc(0)); }
inline void Assembler::cntlzw_( Register a, Register s) { emit_int32(CNTLZW_OPCODE | rta(a) | rs(s) | rc(1)); }
inline void Assembler::cntlzd( Register a, Register s) { emit_int32(CNTLZD_OPCODE | rta(a) | rs(s) | rc(0)); }
inline void Assembler::cntlzd_( Register a, Register s) { emit_int32(CNTLZD_OPCODE | rta(a) | rs(s) | rc(1)); }
// PPC 1, section 3.3.12, Fixed-Point Rotate and Shift Instructions
inline void Assembler::sld( Register a, Register s, Register b) { emit_int32(SLD_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::sld_( Register a, Register s, Register b) { emit_int32(SLD_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
inline void Assembler::slw( Register a, Register s, Register b) { emit_int32(SLW_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::slw_( Register a, Register s, Register b) { emit_int32(SLW_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
inline void Assembler::srd( Register a, Register s, Register b) { emit_int32(SRD_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::srd_( Register a, Register s, Register b) { emit_int32(SRD_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
inline void Assembler::srw( Register a, Register s, Register b) { emit_int32(SRW_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::srw_( Register a, Register s, Register b) { emit_int32(SRW_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
inline void Assembler::srad( Register a, Register s, Register b) { emit_int32(SRAD_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::srad_( Register a, Register s, Register b) { emit_int32(SRAD_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
inline void Assembler::sraw( Register a, Register s, Register b) { emit_int32(SRAW_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::sraw_( Register a, Register s, Register b) { emit_int32(SRAW_OPCODE | rta(a) | rs(s) | rb(b) | rc(1)); }
inline void Assembler::sradi( Register a, Register s, int sh6) { emit_int32(SRADI_OPCODE | rta(a) | rs(s) | sh162030(sh6) | rc(0)); }
inline void Assembler::sradi_( Register a, Register s, int sh6) { emit_int32(SRADI_OPCODE | rta(a) | rs(s) | sh162030(sh6) | rc(1)); }
inline void Assembler::srawi( Register a, Register s, int sh5) { emit_int32(SRAWI_OPCODE | rta(a) | rs(s) | sh1620(sh5) | rc(0)); }
inline void Assembler::srawi_( Register a, Register s, int sh5) { emit_int32(SRAWI_OPCODE | rta(a) | rs(s) | sh1620(sh5) | rc(1)); }
// extended mnemonics for Shift Instructions
inline void Assembler::sldi( Register a, Register s, int sh6) { Assembler::rldicr(a, s, sh6, 63-sh6); }
inline void Assembler::sldi_( Register a, Register s, int sh6) { Assembler::rldicr_(a, s, sh6, 63-sh6); }
inline void Assembler::slwi( Register a, Register s, int sh5) { Assembler::rlwinm(a, s, sh5, 0, 31-sh5); }
inline void Assembler::slwi_( Register a, Register s, int sh5) { Assembler::rlwinm_(a, s, sh5, 0, 31-sh5); }
inline void Assembler::srdi( Register a, Register s, int sh6) { Assembler::rldicl(a, s, 64-sh6, sh6); }
inline void Assembler::srdi_( Register a, Register s, int sh6) { Assembler::rldicl_(a, s, 64-sh6, sh6); }
inline void Assembler::srwi( Register a, Register s, int sh5) { Assembler::rlwinm(a, s, 32-sh5, sh5, 31); }
inline void Assembler::srwi_( Register a, Register s, int sh5) { Assembler::rlwinm_(a, s, 32-sh5, sh5, 31); }
inline void Assembler::clrrdi( Register a, Register s, int ui6) { Assembler::rldicr(a, s, 0, 63-ui6); }
inline void Assembler::clrrdi_( Register a, Register s, int ui6) { Assembler::rldicr_(a, s, 0, 63-ui6); }
inline void Assembler::clrldi( Register a, Register s, int ui6) { Assembler::rldicl(a, s, 0, ui6); }
inline void Assembler::clrldi_( Register a, Register s, int ui6) { Assembler::rldicl_(a, s, 0, ui6); }
inline void Assembler::clrlsldi( Register a, Register s, int clrl6, int shl6) { Assembler::rldic( a, s, shl6, clrl6-shl6); }
inline void Assembler::clrlsldi_(Register a, Register s, int clrl6, int shl6) { Assembler::rldic_(a, s, shl6, clrl6-shl6); }
inline void Assembler::extrdi( Register a, Register s, int n, int b){ Assembler::rldicl(a, s, b+n, 64-n); }
// testbit with condition register.
inline void Assembler::testbitdi(ConditionRegister cr, Register a, Register s, int ui6) {
if (cr == CCR0) {
Assembler::rldicr_(a, s, 63-ui6, 0);
} else {
Assembler::rldicr(a, s, 63-ui6, 0);
Assembler::cmpdi(cr, a, 0);
}
}
// rotate instructions
inline void Assembler::rotldi( Register a, Register s, int n) { Assembler::rldicl(a, s, n, 0); }
inline void Assembler::rotrdi( Register a, Register s, int n) { Assembler::rldicl(a, s, 64-n, 0); }
inline void Assembler::rotlwi( Register a, Register s, int n) { Assembler::rlwinm(a, s, n, 0, 31); }
inline void Assembler::rotrwi( Register a, Register s, int n) { Assembler::rlwinm(a, s, 32-n, 0, 31); }
inline void Assembler::rldic( Register a, Register s, int sh6, int mb6) { emit_int32(RLDIC_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(0)); }
inline void Assembler::rldic_( Register a, Register s, int sh6, int mb6) { emit_int32(RLDIC_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(1)); }
inline void Assembler::rldicr( Register a, Register s, int sh6, int mb6) { emit_int32(RLDICR_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(0)); }
inline void Assembler::rldicr_( Register a, Register s, int sh6, int mb6) { emit_int32(RLDICR_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(1)); }
inline void Assembler::rldicl( Register a, Register s, int sh6, int me6) { emit_int32(RLDICL_OPCODE | rta(a) | rs(s) | sh162030(sh6) | me2126(me6) | rc(0)); }
inline void Assembler::rldicl_( Register a, Register s, int sh6, int me6) { emit_int32(RLDICL_OPCODE | rta(a) | rs(s) | sh162030(sh6) | me2126(me6) | rc(1)); }
inline void Assembler::rlwinm( Register a, Register s, int sh5, int mb5, int me5){ emit_int32(RLWINM_OPCODE | rta(a) | rs(s) | sh1620(sh5) | mb2125(mb5) | me2630(me5) | rc(0)); }
inline void Assembler::rlwinm_( Register a, Register s, int sh5, int mb5, int me5){ emit_int32(RLWINM_OPCODE | rta(a) | rs(s) | sh1620(sh5) | mb2125(mb5) | me2630(me5) | rc(1)); }
inline void Assembler::rldimi( Register a, Register s, int sh6, int mb6) { emit_int32(RLDIMI_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(0)); }
inline void Assembler::rlwimi( Register a, Register s, int sh5, int mb5, int me5){ emit_int32(RLWIMI_OPCODE | rta(a) | rs(s) | sh1620(sh5) | mb2125(mb5) | me2630(me5) | rc(0)); }
inline void Assembler::rldimi_( Register a, Register s, int sh6, int mb6) { emit_int32(RLDIMI_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(1)); }
inline void Assembler::insrdi( Register a, Register s, int n, int b) { Assembler::rldimi(a, s, 64-(b+n), b); }
inline void Assembler::insrwi( Register a, Register s, int n, int b) { Assembler::rlwimi(a, s, 32-(b+n), b, b+n-1); }
// PPC 1, section 3.3.2 Fixed-Point Load Instructions
inline void Assembler::lwzx( Register d, Register s1, Register s2) { emit_int32(LWZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::lwz( Register d, int si16, Register s1) { emit_int32(LWZ_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::lwzu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LWZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::lwax( Register d, Register s1, Register s2) { emit_int32(LWAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::lwa( Register d, int si16, Register s1) { emit_int32(LWA_OPCODE | rt(d) | ds(si16) | ra0mem(s1));}
inline void Assembler::lhzx( Register d, Register s1, Register s2) { emit_int32(LHZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::lhz( Register d, int si16, Register s1) { emit_int32(LHZ_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::lhzu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::lhax( Register d, Register s1, Register s2) { emit_int32(LHAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::lha( Register d, int si16, Register s1) { emit_int32(LHA_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::lhau( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHAU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::lbzx( Register d, Register s1, Register s2) { emit_int32(LBZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::lbz( Register d, int si16, Register s1) { emit_int32(LBZ_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::lbzu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LBZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::ld( Register d, int si16, Register s1) { emit_int32(LD_OPCODE | rt(d) | ds(si16) | ra0mem(s1));}
inline void Assembler::ldx( Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::ldu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));}
// PPC 1, section 3.3.3 Fixed-Point Store Instructions
inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stw( Register d, int si16, Register s1) { emit_int32(STW_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::stwu( Register d, int si16, Register s1) { emit_int32(STWU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::sthx( Register d, Register s1, Register s2) { emit_int32(STHX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::sth( Register d, int si16, Register s1) { emit_int32(STH_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::sthu( Register d, int si16, Register s1) { emit_int32(STHU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::stbx( Register d, Register s1, Register s2) { emit_int32(STBX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stb( Register d, int si16, Register s1) { emit_int32(STB_OPCODE | rs(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::stbu( Register d, int si16, Register s1) { emit_int32(STBU_OPCODE | rs(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::std( Register d, int si16, Register s1) { emit_int32(STD_OPCODE | rs(d) | ds(si16) | ra0mem(s1));}
inline void Assembler::stdx( Register d, Register s1, Register s2) { emit_int32(STDX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::stdu( Register d, int si16, Register s1) { emit_int32(STDU_OPCODE | rs(d) | ds(si16) | rta0mem(s1));}
inline void Assembler::stdux(Register s, Register a, Register b) { emit_int32(STDUX_OPCODE| rs(s) | rta0mem(a) | rb(b));}
// PPC 1, section 3.3.13 Move To/From System Register Instructions
inline void Assembler::mtlr( Register s1) { emit_int32(MTLR_OPCODE | rs(s1)); }
inline void Assembler::mflr( Register d ) { emit_int32(MFLR_OPCODE | rt(d)); }
inline void Assembler::mtctr(Register s1) { emit_int32(MTCTR_OPCODE | rs(s1)); }
inline void Assembler::mfctr(Register d ) { emit_int32(MFCTR_OPCODE | rt(d)); }
inline void Assembler::mtcrf(int afxm, Register s){ emit_int32(MTCRF_OPCODE | fxm(afxm) | rs(s)); }
inline void Assembler::mfcr( Register d ) { emit_int32(MFCR_OPCODE | rt(d)); }
inline void Assembler::mcrf( ConditionRegister crd, ConditionRegister cra)
{ emit_int32(MCRF_OPCODE | bf(crd) | bfa(cra)); }
inline void Assembler::mtcr( Register s) { Assembler::mtcrf(0xff, s); }
// SAP JVM 2006-02-13 PPC branch instruction.
// PPC 1, section 2.4.1 Branch Instructions
inline void Assembler::b( address a, relocInfo::relocType rt) { emit_data(BXX_OPCODE| li(disp( intptr_t(a), intptr_t(pc()))) |aa(0)|lk(0), rt); }
inline void Assembler::b( Label& L) { b( target(L)); }
inline void Assembler::bl(address a, relocInfo::relocType rt) { emit_data(BXX_OPCODE| li(disp( intptr_t(a), intptr_t(pc()))) |aa(0)|lk(1), rt); }
inline void Assembler::bl(Label& L) { bl(target(L)); }
inline void Assembler::bc( int boint, int biint, address a, relocInfo::relocType rt) { emit_data(BCXX_OPCODE| bo(boint) | bi(biint) | bd(disp( intptr_t(a), intptr_t(pc()))) | aa(0) | lk(0), rt); }
inline void Assembler::bc( int boint, int biint, Label& L) { bc(boint, biint, target(L)); }
inline void Assembler::bcl(int boint, int biint, address a, relocInfo::relocType rt) { emit_data(BCXX_OPCODE| bo(boint) | bi(biint) | bd(disp( intptr_t(a), intptr_t(pc()))) | aa(0)|lk(1)); }
inline void Assembler::bcl(int boint, int biint, Label& L) { bcl(boint, biint, target(L)); }
inline void Assembler::bclr( int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCLR_OPCODE | bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(0), rt); }
inline void Assembler::bclrl( int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCLR_OPCODE | bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(1), rt); }
inline void Assembler::bcctr( int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCCTR_OPCODE| bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(0), rt); }
inline void Assembler::bcctrl(int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCCTR_OPCODE| bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(1), rt); }
// helper function for b
inline bool Assembler::is_within_range_of_b(address a, address pc) {
// Guard against illegal branch targets, e.g. -1 (see CompiledStaticCall and ad-file).
if ((((uint64_t)a) & 0x3) != 0) return false;
const int range = 1 << (29-6); // li field is from bit 6 to bit 29.
int value = disp(intptr_t(a), intptr_t(pc));
bool result = -range <= value && value < range-1;
#ifdef ASSERT
if (result) li(value); // Assert that value is in correct range.
#endif
return result;
}
// helper functions for bcxx.
inline bool Assembler::is_within_range_of_bcxx(address a, address pc) {
// Guard against illegal branch targets, e.g. -1 (see CompiledStaticCall and ad-file).
if ((((uint64_t)a) & 0x3) != 0) return false;
const int range = 1 << (29-16); // bd field is from bit 16 to bit 29.
int value = disp(intptr_t(a), intptr_t(pc));
bool result = -range <= value && value < range-1;
#ifdef ASSERT
if (result) bd(value); // Assert that value is in correct range.
#endif
return result;
}
// Get the destination of a bxx branch (b, bl, ba, bla).
address Assembler::bxx_destination(address baddr) { return bxx_destination(*(int*)baddr, baddr); }
address Assembler::bxx_destination(int instr, address pc) { return (address)bxx_destination_offset(instr, (intptr_t)pc); }
intptr_t Assembler::bxx_destination_offset(int instr, intptr_t bxx_pos) {
intptr_t displ = inv_li_field(instr);
return bxx_pos + displ;
}
// Extended mnemonics for Branch Instructions
inline void Assembler::blt(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, less), L); }
inline void Assembler::bgt(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, greater), L); }
inline void Assembler::beq(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, equal), L); }
inline void Assembler::bso(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, summary_overflow), L); }
inline void Assembler::bge(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, less), L); }
inline void Assembler::ble(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, greater), L); }
inline void Assembler::bne(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, equal), L); }
inline void Assembler::bns(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, summary_overflow), L); }
// Branch instructions with static prediction hints.
inline void Assembler::blt_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken, bi0(crx, less), L); }
inline void Assembler::bgt_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken, bi0(crx, greater), L); }
inline void Assembler::beq_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken, bi0(crx, equal), L); }
inline void Assembler::bso_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken, bi0(crx, summary_overflow), L); }
inline void Assembler::bge_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken, bi0(crx, less), L); }
inline void Assembler::ble_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken, bi0(crx, greater), L); }
inline void Assembler::bne_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken, bi0(crx, equal), L); }
inline void Assembler::bns_predict_taken (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken, bi0(crx, summary_overflow), L); }
inline void Assembler::blt_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, less), L); }
inline void Assembler::bgt_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, greater), L); }
inline void Assembler::beq_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, equal), L); }
inline void Assembler::bso_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, summary_overflow), L); }
inline void Assembler::bge_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, less), L); }
inline void Assembler::ble_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, greater), L); }
inline void Assembler::bne_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, equal), L); }
inline void Assembler::bns_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, summary_overflow), L); }
// For use in conjunction with testbitdi:
inline void Assembler::btrue( ConditionRegister crx, Label& L) { Assembler::bne(crx, L); }
inline void Assembler::bfalse(ConditionRegister crx, Label& L) { Assembler::beq(crx, L); }
inline void Assembler::bltl(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, less), L); }
inline void Assembler::bgtl(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, greater), L); }
inline void Assembler::beql(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, equal), L); }
inline void Assembler::bsol(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, summary_overflow), L); }
inline void Assembler::bgel(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, less), L); }
inline void Assembler::blel(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, greater), L); }
inline void Assembler::bnel(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, equal), L); }
inline void Assembler::bnsl(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, summary_overflow), L); }
// Extended mnemonics for Branch Instructions via LR.
// We use `blr' for returns.
inline void Assembler::blr(relocInfo::relocType rt) { Assembler::bclr(bcondAlways, 0, bhintbhBCLRisReturn, rt); }
// Extended mnemonics for Branch Instructions with CTR.
// Bdnz means `decrement CTR and jump to L if CTR is not zero'.
inline void Assembler::bdnz(Label& L) { Assembler::bc(16, 0, L); }
// Decrement and branch if result is zero.
inline void Assembler::bdz(Label& L) { Assembler::bc(18, 0, L); }
// We use `bctr[l]' for jumps/calls in function descriptor glue
// code, e.g. for calls to runtime functions.
inline void Assembler::bctr( relocInfo::relocType rt) { Assembler::bcctr(bcondAlways, 0, bhintbhBCCTRisNotReturnButSame, rt); }
inline void Assembler::bctrl(relocInfo::relocType rt) { Assembler::bcctrl(bcondAlways, 0, bhintbhBCCTRisNotReturnButSame, rt); }
// Conditional jumps/branches via CTR.
inline void Assembler::beqctr( ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctr( bcondCRbiIs1, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
inline void Assembler::beqctrl(ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctrl(bcondCRbiIs1, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
inline void Assembler::bnectr( ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctr( bcondCRbiIs0, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
inline void Assembler::bnectrl(ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctrl(bcondCRbiIs0, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
// condition register logic instructions
inline void Assembler::crand( int d, int s1, int s2) { emit_int32(CRAND_OPCODE | bt(d) | ba(s1) | bb(s2)); }
inline void Assembler::crnand(int d, int s1, int s2) { emit_int32(CRNAND_OPCODE | bt(d) | ba(s1) | bb(s2)); }
inline void Assembler::cror( int d, int s1, int s2) { emit_int32(CROR_OPCODE | bt(d) | ba(s1) | bb(s2)); }
inline void Assembler::crxor( int d, int s1, int s2) { emit_int32(CRXOR_OPCODE | bt(d) | ba(s1) | bb(s2)); }
inline void Assembler::crnor( int d, int s1, int s2) { emit_int32(CRNOR_OPCODE | bt(d) | ba(s1) | bb(s2)); }
inline void Assembler::creqv( int d, int s1, int s2) { emit_int32(CREQV_OPCODE | bt(d) | ba(s1) | bb(s2)); }
inline void Assembler::crandc(int d, int s1, int s2) { emit_int32(CRANDC_OPCODE | bt(d) | ba(s1) | bb(s2)); }
inline void Assembler::crorc( int d, int s1, int s2) { emit_int32(CRORC_OPCODE | bt(d) | ba(s1) | bb(s2)); }
// Conditional move (>= Power7)
inline void Assembler::isel(Register d, ConditionRegister cr, Condition cc, bool inv, Register a, Register b) {
if (b == noreg) {
b = d; // Can be omitted if old value should be kept in "else" case.
}
Register first = a;
Register second = b;
if (inv) {
first = b;
second = a; // exchange
}
assert(first != R0, "r0 not allowed");
isel(d, first, second, bi0(cr, cc));
}
inline void Assembler::isel_0(Register d, ConditionRegister cr, Condition cc, Register b) {
if (b == noreg) {
b = d; // Can be omitted if old value should be kept in "else" case.
}
isel(d, R0, b, bi0(cr, cc));
}
// PPC 2, section 3.2.1 Instruction Cache Instructions
inline void Assembler::icbi( Register s1, Register s2) { emit_int32( ICBI_OPCODE | ra0mem(s1) | rb(s2) ); }
// PPC 2, section 3.2.2 Data Cache Instructions
//inline void Assembler::dcba( Register s1, Register s2) { emit_int32( DCBA_OPCODE | ra0mem(s1) | rb(s2) ); }
inline void Assembler::dcbz( Register s1, Register s2) { emit_int32( DCBZ_OPCODE | ra0mem(s1) | rb(s2) ); }
inline void Assembler::dcbst( Register s1, Register s2) { emit_int32( DCBST_OPCODE | ra0mem(s1) | rb(s2) ); }
inline void Assembler::dcbf( Register s1, Register s2) { emit_int32( DCBF_OPCODE | ra0mem(s1) | rb(s2) ); }
// dcache read hint
inline void Assembler::dcbt( Register s1, Register s2) { emit_int32( DCBT_OPCODE | ra0mem(s1) | rb(s2) ); }
inline void Assembler::dcbtct( Register s1, Register s2, int ct) { emit_int32( DCBT_OPCODE | ra0mem(s1) | rb(s2) | thct(ct)); }
inline void Assembler::dcbtds( Register s1, Register s2, int ds) { emit_int32( DCBT_OPCODE | ra0mem(s1) | rb(s2) | thds(ds)); }
// dcache write hint
inline void Assembler::dcbtst( Register s1, Register s2) { emit_int32( DCBTST_OPCODE | ra0mem(s1) | rb(s2) ); }
inline void Assembler::dcbtstct(Register s1, Register s2, int ct) { emit_int32( DCBTST_OPCODE | ra0mem(s1) | rb(s2) | thct(ct)); }
// machine barrier instructions:
inline void Assembler::sync(int a) { emit_int32( SYNC_OPCODE | l910(a)); }
inline void Assembler::sync() { Assembler::sync(0); }
inline void Assembler::lwsync() { Assembler::sync(1); }
inline void Assembler::ptesync() { Assembler::sync(2); }
inline void Assembler::eieio() { emit_int32( EIEIO_OPCODE); }
inline void Assembler::isync() { emit_int32( ISYNC_OPCODE); }
inline void Assembler::elemental_membar(int e) { assert(0 < e && e < 16, "invalid encoding"); emit_int32( SYNC_OPCODE | e1215(e)); }
// atomics
// Use ra0mem to disallow R0 as base.
inline void Assembler::lwarx_unchecked(Register d, Register a, Register b, int eh1) { emit_int32( LWARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
inline void Assembler::ldarx_unchecked(Register d, Register a, Register b, int eh1) { emit_int32( LDARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
inline bool Assembler::lxarx_hint_exclusive_access() { return VM_Version::has_lxarxeh(); }
inline void Assembler::lwarx( Register d, Register a, Register b, bool hint_exclusive_access) { lwarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::ldarx( Register d, Register a, Register b, bool hint_exclusive_access) { ldarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::stwcx_(Register s, Register a, Register b) { emit_int32( STWCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
inline void Assembler::stdcx_(Register s, Register a, Register b) { emit_int32( STDCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
// Instructions for adjusting thread priority
// for simultaneous multithreading (SMT) on POWER5.
inline void Assembler::smt_prio_very_low() { Assembler::or_unchecked(R31, R31, R31); }
inline void Assembler::smt_prio_low() { Assembler::or_unchecked(R1, R1, R1); }
inline void Assembler::smt_prio_medium_low() { Assembler::or_unchecked(R6, R6, R6); }
inline void Assembler::smt_prio_medium() { Assembler::or_unchecked(R2, R2, R2); }
inline void Assembler::smt_prio_medium_high() { Assembler::or_unchecked(R5, R5, R5); }
inline void Assembler::smt_prio_high() { Assembler::or_unchecked(R3, R3, R3); }
inline void Assembler::twi_0(Register a) { twi_unchecked(0, a, 0);}
// trap instructions
inline void Assembler::tdi_unchecked(int tobits, Register a, int si16){ emit_int32( TDI_OPCODE | to(tobits) | ra(a) | si(si16)); }
inline void Assembler::twi_unchecked(int tobits, Register a, int si16){ emit_int32( TWI_OPCODE | to(tobits) | ra(a) | si(si16)); }
inline void Assembler::tdi(int tobits, Register a, int si16) { assert(UseSIGTRAP, "precondition"); tdi_unchecked(tobits, a, si16); }
inline void Assembler::twi(int tobits, Register a, int si16) { assert(UseSIGTRAP, "precondition"); twi_unchecked(tobits, a, si16); }
inline void Assembler::td( int tobits, Register a, Register b) { assert(UseSIGTRAP, "precondition"); emit_int32( TD_OPCODE | to(tobits) | ra(a) | rb(b)); }
inline void Assembler::tw( int tobits, Register a, Register b) { assert(UseSIGTRAP, "precondition"); emit_int32( TW_OPCODE | to(tobits) | ra(a) | rb(b)); }
// FLOATING POINT instructions ppc.
// PPC 1, section 4.6.2 Floating-Point Load Instructions
// Use ra0mem instead of ra in some instructions below.
inline void Assembler::lfs( FloatRegister d, int si16, Register a) { emit_int32( LFS_OPCODE | frt(d) | ra0mem(a) | simm(si16,16)); }
inline void Assembler::lfsu(FloatRegister d, int si16, Register a) { emit_int32( LFSU_OPCODE | frt(d) | ra(a) | simm(si16,16)); }
inline void Assembler::lfsx(FloatRegister d, Register a, Register b) { emit_int32( LFSX_OPCODE | frt(d) | ra0mem(a) | rb(b)); }
inline void Assembler::lfd( FloatRegister d, int si16, Register a) { emit_int32( LFD_OPCODE | frt(d) | ra0mem(a) | simm(si16,16)); }
inline void Assembler::lfdu(FloatRegister d, int si16, Register a) { emit_int32( LFDU_OPCODE | frt(d) | ra(a) | simm(si16,16)); }
inline void Assembler::lfdx(FloatRegister d, Register a, Register b) { emit_int32( LFDX_OPCODE | frt(d) | ra0mem(a) | rb(b)); }
// PPC 1, section 4.6.3 Floating-Point Store Instructions
// Use ra0mem instead of ra in some instructions below.
inline void Assembler::stfs( FloatRegister s, int si16, Register a) { emit_int32( STFS_OPCODE | frs(s) | ra0mem(a) | simm(si16,16)); }
inline void Assembler::stfsu(FloatRegister s, int si16, Register a) { emit_int32( STFSU_OPCODE | frs(s) | ra(a) | simm(si16,16)); }
inline void Assembler::stfsx(FloatRegister s, Register a, Register b){ emit_int32( STFSX_OPCODE | frs(s) | ra0mem(a) | rb(b)); }
inline void Assembler::stfd( FloatRegister s, int si16, Register a) { emit_int32( STFD_OPCODE | frs(s) | ra0mem(a) | simm(si16,16)); }
inline void Assembler::stfdu(FloatRegister s, int si16, Register a) { emit_int32( STFDU_OPCODE | frs(s) | ra(a) | simm(si16,16)); }
inline void Assembler::stfdx(FloatRegister s, Register a, Register b){ emit_int32( STFDX_OPCODE | frs(s) | ra0mem(a) | rb(b)); }
// PPC 1, section 4.6.4 Floating-Point Move Instructions
inline void Assembler::fmr( FloatRegister d, FloatRegister b) { emit_int32( FMR_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fmr_(FloatRegister d, FloatRegister b) { emit_int32( FMR_OPCODE | frt(d) | frb(b) | rc(1)); }
// These are special Power6 opcodes, reused for "lfdepx" and "stfdepx"
// on Power7. Do not use.
//inline void Assembler::mffgpr( FloatRegister d, Register b) { emit_int32( MFFGPR_OPCODE | frt(d) | rb(b) | rc(0)); }
//inline void Assembler::mftgpr( Register d, FloatRegister b) { emit_int32( MFTGPR_OPCODE | rt(d) | frb(b) | rc(0)); }
// add cmpb and popcntb to detect ppc power version.
inline void Assembler::cmpb( Register a, Register s, Register b) { emit_int32( CMPB_OPCODE | rta(a) | rs(s) | rb(b) | rc(0)); }
inline void Assembler::popcntb(Register a, Register s) { emit_int32( POPCNTB_OPCODE | rta(a) | rs(s)); };
inline void Assembler::popcntw(Register a, Register s) { emit_int32( POPCNTW_OPCODE | rta(a) | rs(s)); };
inline void Assembler::popcntd(Register a, Register s) { emit_int32( POPCNTD_OPCODE | rta(a) | rs(s)); };
inline void Assembler::fneg( FloatRegister d, FloatRegister b) { emit_int32( FNEG_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fneg_( FloatRegister d, FloatRegister b) { emit_int32( FNEG_OPCODE | frt(d) | frb(b) | rc(1)); }
inline void Assembler::fabs( FloatRegister d, FloatRegister b) { emit_int32( FABS_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fabs_( FloatRegister d, FloatRegister b) { emit_int32( FABS_OPCODE | frt(d) | frb(b) | rc(1)); }
inline void Assembler::fnabs( FloatRegister d, FloatRegister b) { emit_int32( FNABS_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fnabs_(FloatRegister d, FloatRegister b) { emit_int32( FNABS_OPCODE | frt(d) | frb(b) | rc(1)); }
// PPC 1, section 4.6.5.1 Floating-Point Elementary Arithmetic Instructions
inline void Assembler::fadd( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADD_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
inline void Assembler::fadd_( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADD_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
inline void Assembler::fadds( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADDS_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
inline void Assembler::fadds_(FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADDS_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
inline void Assembler::fsub( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUB_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
inline void Assembler::fsub_( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUB_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
inline void Assembler::fsubs( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUBS_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
inline void Assembler::fsubs_(FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUBS_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
inline void Assembler::fmul( FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMUL_OPCODE | frt(d) | fra(a) | frc(c) | rc(0)); }
inline void Assembler::fmul_( FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMUL_OPCODE | frt(d) | fra(a) | frc(c) | rc(1)); }
inline void Assembler::fmuls( FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMULS_OPCODE | frt(d) | fra(a) | frc(c) | rc(0)); }
inline void Assembler::fmuls_(FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMULS_OPCODE | frt(d) | fra(a) | frc(c) | rc(1)); }
inline void Assembler::fdiv( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIV_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
inline void Assembler::fdiv_( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIV_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
inline void Assembler::fdivs( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIVS_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
inline void Assembler::fdivs_(FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIVS_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
// PPC 1, section 4.6.6 Floating-Point Rounding and Conversion Instructions
inline void Assembler::frsp( FloatRegister d, FloatRegister b) { emit_int32( FRSP_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fctid( FloatRegister d, FloatRegister b) { emit_int32( FCTID_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fctidz(FloatRegister d, FloatRegister b) { emit_int32( FCTIDZ_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fctiw( FloatRegister d, FloatRegister b) { emit_int32( FCTIW_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fctiwz(FloatRegister d, FloatRegister b) { emit_int32( FCTIWZ_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fcfid( FloatRegister d, FloatRegister b) { emit_int32( FCFID_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fcfids(FloatRegister d, FloatRegister b) { emit_int32( FCFIDS_OPCODE | frt(d) | frb(b) | rc(0)); }
// PPC 1, section 4.6.7 Floating-Point Compare Instructions
inline void Assembler::fcmpu( ConditionRegister crx, FloatRegister a, FloatRegister b) { emit_int32( FCMPU_OPCODE | bf(crx) | fra(a) | frb(b)); }
// PPC 1, section 5.2.1 Floating-Point Arithmetic Instructions
inline void Assembler::fsqrt( FloatRegister d, FloatRegister b) { emit_int32( FSQRT_OPCODE | frt(d) | frb(b) | rc(0)); }
inline void Assembler::fsqrts(FloatRegister d, FloatRegister b) { emit_int32( FSQRTS_OPCODE | frt(d) | frb(b) | rc(0)); }
// Vector instructions for >= Power6.
inline void Assembler::lvebx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEBX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::lvehx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEHX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::lvewx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEWX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::lvx( VectorRegister d, Register s1, Register s2) { emit_int32( LVX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::lvxl( VectorRegister d, Register s1, Register s2) { emit_int32( LVXL_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::stvebx(VectorRegister d, Register s1, Register s2) { emit_int32( STVEBX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::stvehx(VectorRegister d, Register s1, Register s2) { emit_int32( STVEHX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::stvewx(VectorRegister d, Register s1, Register s2) { emit_int32( STVEWX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::stvx( VectorRegister d, Register s1, Register s2) { emit_int32( STVX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::stvxl( VectorRegister d, Register s1, Register s2) { emit_int32( STVXL_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::lvsl( VectorRegister d, Register s1, Register s2) { emit_int32( LVSL_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::lvsr( VectorRegister d, Register s1, Register s2) { emit_int32( LVSR_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
inline void Assembler::vpkpx( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKPX_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vpkshss( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSHSS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vpkswss( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSWSS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vpkshus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSHUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vpkswus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSWUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vpkuhum( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUHUM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vpkuwum( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUWUM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vpkuhus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUHUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vpkuwus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUWUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vupkhpx( VectorRegister d, VectorRegister b) { emit_int32( VUPKHPX_OPCODE | vrt(d) | vrb(b)); }
inline void Assembler::vupkhsb( VectorRegister d, VectorRegister b) { emit_int32( VUPKHSB_OPCODE | vrt(d) | vrb(b)); }
inline void Assembler::vupkhsh( VectorRegister d, VectorRegister b) { emit_int32( VUPKHSH_OPCODE | vrt(d) | vrb(b)); }
inline void Assembler::vupklpx( VectorRegister d, VectorRegister b) { emit_int32( VUPKLPX_OPCODE | vrt(d) | vrb(b)); }
inline void Assembler::vupklsb( VectorRegister d, VectorRegister b) { emit_int32( VUPKLSB_OPCODE | vrt(d) | vrb(b)); }
inline void Assembler::vupklsh( VectorRegister d, VectorRegister b) { emit_int32( VUPKLSH_OPCODE | vrt(d) | vrb(b)); }
inline void Assembler::vmrghb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGHB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmrghw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGHW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmrghh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGHH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmrglb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGLB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmrglw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGLW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmrglh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGLH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsplt( VectorRegister d, int ui4, VectorRegister b) { emit_int32( VSPLT_OPCODE | vrt(d) | vsplt_uim(uimm(ui4,4)) | vrb(b)); }
inline void Assembler::vsplth( VectorRegister d, int ui3, VectorRegister b) { emit_int32( VSPLTH_OPCODE | vrt(d) | vsplt_uim(uimm(ui3,3)) | vrb(b)); }
inline void Assembler::vspltw( VectorRegister d, int ui2, VectorRegister b) { emit_int32( VSPLTW_OPCODE | vrt(d) | vsplt_uim(uimm(ui2,2)) | vrb(b)); }
inline void Assembler::vspltisb(VectorRegister d, int si5) { emit_int32( VSPLTISB_OPCODE| vrt(d) | vsplti_sim(simm(si5,5))); }
inline void Assembler::vspltish(VectorRegister d, int si5) { emit_int32( VSPLTISH_OPCODE| vrt(d) | vsplti_sim(simm(si5,5))); }
inline void Assembler::vspltisw(VectorRegister d, int si5) { emit_int32( VSPLTISW_OPCODE| vrt(d) | vsplti_sim(simm(si5,5))); }
inline void Assembler::vperm( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c){ emit_int32( VPERM_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
inline void Assembler::vsel( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c){ emit_int32( VSEL_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
inline void Assembler::vsl( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSL_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsldoi( VectorRegister d, VectorRegister a, VectorRegister b, int si4) { emit_int32( VSLDOI_OPCODE| vrt(d) | vra(a) | vrb(b) | vsldoi_shb(simm(si4,4))); }
inline void Assembler::vslo( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSLO_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsr( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsro( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRO_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vaddcuw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDCUW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vaddshs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDSHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vaddsbs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDSBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vaddsws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDSWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vaddubm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUBM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vadduwm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUWM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vadduhm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUHM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vaddubs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vadduws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vadduhs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsubcuw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBCUW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsubshs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBSHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsubsbs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBSBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsubsws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBSWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsububm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUBM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsubuwm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUWM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsubuhm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUHM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsububs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsubuws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsubuhs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmulesb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULESB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmuleub( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULEUB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmulesh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULESH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmuleuh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULEUH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmulosb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOSB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmuloub( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOUB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmulosh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOSH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmulouh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOUH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmhaddshs(VectorRegister d,VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMHADDSHS_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
inline void Assembler::vmhraddshs(VectorRegister d,VectorRegister a,VectorRegister b, VectorRegister c) { emit_int32( VMHRADDSHS_OPCODE| vrt(d) | vra(a) | vrb(b)| vrc(c)); }
inline void Assembler::vmladduhm(VectorRegister d,VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMLADDUHM_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
inline void Assembler::vmsubuhm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUBUHM_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
inline void Assembler::vmsummbm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMMBM_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
inline void Assembler::vmsumshm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMSHM_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
inline void Assembler::vmsumshs(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMSHS_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
inline void Assembler::vmsumuhm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMUHM_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
inline void Assembler::vmsumuhs(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMUHS_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
inline void Assembler::vsumsws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUMSWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsum2sws(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM2SWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsum4sbs(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM4SBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsum4ubs(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM4UBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsum4shs(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM4SHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vavgsb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGSB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vavgsw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGSW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vavgsh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGSH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vavgub( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGUB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vavguw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGUW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vavguh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGUH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmaxsb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXSB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmaxsw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXSW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmaxsh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXSH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmaxub( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXUB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmaxuw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXUW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vmaxuh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXUH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vminsb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINSB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vminsw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINSW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vminsh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINSH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vminub( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINUB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vminuw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINUW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vminuh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINUH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vcmpequb(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
inline void Assembler::vcmpequh(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
inline void Assembler::vcmpequw(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
inline void Assembler::vcmpgtsh(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
inline void Assembler::vcmpgtsb(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
inline void Assembler::vcmpgtsw(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
inline void Assembler::vcmpgtub(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
inline void Assembler::vcmpgtuh(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
inline void Assembler::vcmpgtuw(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
inline void Assembler::vcmpequb_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
inline void Assembler::vcmpequh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
inline void Assembler::vcmpequw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
inline void Assembler::vcmpgtsh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
inline void Assembler::vcmpgtsb_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
inline void Assembler::vcmpgtsw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
inline void Assembler::vcmpgtub_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
inline void Assembler::vcmpgtuh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
inline void Assembler::vcmpgtuw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
inline void Assembler::vand( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAND_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vandc( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VANDC_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vnor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vxor( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VXOR_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vrlb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vrlw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vrlh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vslb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSLB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vskw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSKW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vslh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSLH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsrb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsrw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsrh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsrab( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRAB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsraw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRAW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::vsrah( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRAH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
inline void Assembler::mtvscr( VectorRegister b) { emit_int32( MTVSCR_OPCODE | vrb(b)); }
inline void Assembler::mfvscr( VectorRegister d) { emit_int32( MFVSCR_OPCODE | vrt(d)); }
// ra0 version
inline void Assembler::lwzx( Register d, Register s2) { emit_int32( LWZX_OPCODE | rt(d) | rb(s2));}
inline void Assembler::lwz( Register d, int si16 ) { emit_int32( LWZ_OPCODE | rt(d) | d1(si16));}
inline void Assembler::lwax( Register d, Register s2) { emit_int32( LWAX_OPCODE | rt(d) | rb(s2));}
inline void Assembler::lwa( Register d, int si16 ) { emit_int32( LWA_OPCODE | rt(d) | ds(si16));}
inline void Assembler::lhzx( Register d, Register s2) { emit_int32( LHZX_OPCODE | rt(d) | rb(s2));}
inline void Assembler::lhz( Register d, int si16 ) { emit_int32( LHZ_OPCODE | rt(d) | d1(si16));}
inline void Assembler::lhax( Register d, Register s2) { emit_int32( LHAX_OPCODE | rt(d) | rb(s2));}
inline void Assembler::lha( Register d, int si16 ) { emit_int32( LHA_OPCODE | rt(d) | d1(si16));}
inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE | rt(d) | rb(s2));}
inline void Assembler::lbz( Register d, int si16 ) { emit_int32( LBZ_OPCODE | rt(d) | d1(si16));}
inline void Assembler::ld( Register d, int si16 ) { emit_int32( LD_OPCODE | rt(d) | ds(si16));}
inline void Assembler::ldx( Register d, Register s2) { emit_int32( LDX_OPCODE | rt(d) | rb(s2));}
inline void Assembler::stwx( Register d, Register s2) { emit_int32( STWX_OPCODE | rs(d) | rb(s2));}
inline void Assembler::stw( Register d, int si16 ) { emit_int32( STW_OPCODE | rs(d) | d1(si16));}
inline void Assembler::sthx( Register d, Register s2) { emit_int32( STHX_OPCODE | rs(d) | rb(s2));}
inline void Assembler::sth( Register d, int si16 ) { emit_int32( STH_OPCODE | rs(d) | d1(si16));}
inline void Assembler::stbx( Register d, Register s2) { emit_int32( STBX_OPCODE | rs(d) | rb(s2));}
inline void Assembler::stb( Register d, int si16 ) { emit_int32( STB_OPCODE | rs(d) | d1(si16));}
inline void Assembler::std( Register d, int si16 ) { emit_int32( STD_OPCODE | rs(d) | ds(si16));}
inline void Assembler::stdx( Register d, Register s2) { emit_int32( STDX_OPCODE | rs(d) | rb(s2));}
// ra0 version
inline void Assembler::icbi( Register s2) { emit_int32( ICBI_OPCODE | rb(s2) ); }
//inline void Assembler::dcba( Register s2) { emit_int32( DCBA_OPCODE | rb(s2) ); }
inline void Assembler::dcbz( Register s2) { emit_int32( DCBZ_OPCODE | rb(s2) ); }
inline void Assembler::dcbst( Register s2) { emit_int32( DCBST_OPCODE | rb(s2) ); }
inline void Assembler::dcbf( Register s2) { emit_int32( DCBF_OPCODE | rb(s2) ); }
inline void Assembler::dcbt( Register s2) { emit_int32( DCBT_OPCODE | rb(s2) ); }
inline void Assembler::dcbtct( Register s2, int ct) { emit_int32( DCBT_OPCODE | rb(s2) | thct(ct)); }
inline void Assembler::dcbtds( Register s2, int ds) { emit_int32( DCBT_OPCODE | rb(s2) | thds(ds)); }
inline void Assembler::dcbtst( Register s2) { emit_int32( DCBTST_OPCODE | rb(s2) ); }
inline void Assembler::dcbtstct(Register s2, int ct) { emit_int32( DCBTST_OPCODE | rb(s2) | thct(ct)); }
// ra0 version
inline void Assembler::lwarx_unchecked(Register d, Register b, int eh1) { emit_int32( LWARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
inline void Assembler::ldarx_unchecked(Register d, Register b, int eh1) { emit_int32( LDARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
inline void Assembler::lwarx( Register d, Register b, bool hint_exclusive_access){ lwarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::ldarx( Register d, Register b, bool hint_exclusive_access){ ldarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
inline void Assembler::stwcx_(Register s, Register b) { emit_int32( STWCX_OPCODE | rs(s) | rb(b) | rc(1)); }
inline void Assembler::stdcx_(Register s, Register b) { emit_int32( STDCX_OPCODE | rs(s) | rb(b) | rc(1)); }
// ra0 version
inline void Assembler::lfs( FloatRegister d, int si16) { emit_int32( LFS_OPCODE | frt(d) | simm(si16,16)); }
inline void Assembler::lfsx(FloatRegister d, Register b) { emit_int32( LFSX_OPCODE | frt(d) | rb(b)); }
inline void Assembler::lfd( FloatRegister d, int si16) { emit_int32( LFD_OPCODE | frt(d) | simm(si16,16)); }
inline void Assembler::lfdx(FloatRegister d, Register b) { emit_int32( LFDX_OPCODE | frt(d) | rb(b)); }
// ra0 version
inline void Assembler::stfs( FloatRegister s, int si16) { emit_int32( STFS_OPCODE | frs(s) | simm(si16, 16)); }
inline void Assembler::stfsx(FloatRegister s, Register b) { emit_int32( STFSX_OPCODE | frs(s) | rb(b)); }
inline void Assembler::stfd( FloatRegister s, int si16) { emit_int32( STFD_OPCODE | frs(s) | simm(si16, 16)); }
inline void Assembler::stfdx(FloatRegister s, Register b) { emit_int32( STFDX_OPCODE | frs(s) | rb(b)); }
// ra0 version
inline void Assembler::lvebx( VectorRegister d, Register s2) { emit_int32( LVEBX_OPCODE | vrt(d) | rb(s2)); }
inline void Assembler::lvehx( VectorRegister d, Register s2) { emit_int32( LVEHX_OPCODE | vrt(d) | rb(s2)); }
inline void Assembler::lvewx( VectorRegister d, Register s2) { emit_int32( LVEWX_OPCODE | vrt(d) | rb(s2)); }
inline void Assembler::lvx( VectorRegister d, Register s2) { emit_int32( LVX_OPCODE | vrt(d) | rb(s2)); }
inline void Assembler::lvxl( VectorRegister d, Register s2) { emit_int32( LVXL_OPCODE | vrt(d) | rb(s2)); }
inline void Assembler::stvebx(VectorRegister d, Register s2) { emit_int32( STVEBX_OPCODE | vrt(d) | rb(s2)); }
inline void Assembler::stvehx(VectorRegister d, Register s2) { emit_int32( STVEHX_OPCODE | vrt(d) | rb(s2)); }
inline void Assembler::stvewx(VectorRegister d, Register s2) { emit_int32( STVEWX_OPCODE | vrt(d) | rb(s2)); }
inline void Assembler::stvx( VectorRegister d, Register s2) { emit_int32( STVX_OPCODE | vrt(d) | rb(s2)); }
inline void Assembler::stvxl( VectorRegister d, Register s2) { emit_int32( STVXL_OPCODE | vrt(d) | rb(s2)); }
inline void Assembler::lvsl( VectorRegister d, Register s2) { emit_int32( LVSL_OPCODE | vrt(d) | rb(s2)); }
inline void Assembler::lvsr( VectorRegister d, Register s2) { emit_int32( LVSR_OPCODE | vrt(d) | rb(s2)); }
inline void Assembler::load_const(Register d, void* x, Register tmp) {
load_const(d, (long)x, tmp);
}
// Load a 64 bit constant encoded by a `Label'. This works for bound
// labels as well as unbound ones. For unbound labels, the code will
// be patched as soon as the label gets bound.
inline void Assembler::load_const(Register d, Label& L, Register tmp) {
load_const(d, target(L), tmp);
}
// Load a 64 bit constant encoded by an AddressLiteral. patchable.
inline void Assembler::load_const(Register d, AddressLiteral& a, Register tmp) {
assert(d != R0, "R0 not allowed");
// First relocate (we don't change the offset in the RelocationHolder,
// just pass a.rspec()), then delegate to load_const(Register, long).
relocate(a.rspec());
load_const(d, (long)a.value(), tmp);
}
#endif // CPU_PPC_VM_ASSEMBLER_PPC_INLINE_HPP

View File

@ -0,0 +1,106 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_BYTECODEINTERPRETER_PPC_HPP
#define CPU_PPC_VM_BYTECODEINTERPRETER_PPC_HPP
// Platform specific for C++ based Interpreter
#define LOTS_OF_REGS /* Lets interpreter use plenty of registers */
private:
// Save the bottom of the stack after frame manager setup. For ease of restoration after return
// from recursive interpreter call.
intptr_t* _frame_bottom; // Saved bottom of frame manager frame.
address _last_Java_pc; // Pc to return to in frame manager.
intptr_t* _last_Java_fp; // frame pointer
intptr_t* _last_Java_sp; // stack pointer
interpreterState _self_link; // Previous interpreter state // sometimes points to self???
double _native_fresult; // Save result of native calls that might return floats.
intptr_t _native_lresult; // Save result of native calls that might return handle/longs.
public:
address last_Java_pc(void) { return _last_Java_pc; }
intptr_t* last_Java_fp(void) { return _last_Java_fp; }
static ByteSize native_lresult_offset() {
return byte_offset_of(BytecodeInterpreter, _native_lresult);
}
static ByteSize native_fresult_offset() {
return byte_offset_of(BytecodeInterpreter, _native_fresult);
}
static void pd_layout_interpreterState(interpreterState istate, address last_Java_pc, intptr_t* last_Java_fp);
#define SET_LAST_JAVA_FRAME() THREAD->frame_anchor()->set(istate->_last_Java_sp, istate->_last_Java_pc);
#define RESET_LAST_JAVA_FRAME() THREAD->frame_anchor()->clear();
// Macros for accessing the stack.
#undef STACK_INT
#undef STACK_FLOAT
#undef STACK_ADDR
#undef STACK_OBJECT
#undef STACK_DOUBLE
#undef STACK_LONG
// JavaStack Implementation
#define STACK_SLOT(offset) ((address) &topOfStack[-(offset)])
#define STACK_INT(offset) (*((jint*) &topOfStack[-(offset)]))
#define STACK_FLOAT(offset) (*((jfloat *) &topOfStack[-(offset)]))
#define STACK_OBJECT(offset) (*((oop *) &topOfStack [-(offset)]))
#define STACK_DOUBLE(offset) (((VMJavaVal64*) &topOfStack[-(offset)])->d)
#define STACK_LONG(offset) (((VMJavaVal64 *) &topOfStack[-(offset)])->l)
#define SET_STACK_SLOT(value, offset) (*(intptr_t*)&topOfStack[-(offset)] = *(intptr_t*)(value))
#define SET_STACK_ADDR(value, offset) (*((address *)&topOfStack[-(offset)]) = (value))
#define SET_STACK_INT(value, offset) (*((jint *)&topOfStack[-(offset)]) = (value))
#define SET_STACK_FLOAT(value, offset) (*((jfloat *)&topOfStack[-(offset)]) = (value))
#define SET_STACK_OBJECT(value, offset) (*((oop *)&topOfStack[-(offset)]) = (value))
#define SET_STACK_DOUBLE(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = (value))
#define SET_STACK_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = \
((VMJavaVal64*)(addr))->d)
#define SET_STACK_LONG(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->l = (value))
#define SET_STACK_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->l = \
((VMJavaVal64*)(addr))->l)
// JavaLocals implementation
#define LOCALS_SLOT(offset) ((intptr_t*)&locals[-(offset)])
#define LOCALS_ADDR(offset) ((address)locals[-(offset)])
#define LOCALS_INT(offset) (*(jint*)&(locals[-(offset)]))
#define LOCALS_OBJECT(offset) (cast_to_oop(locals[-(offset)]))
#define LOCALS_LONG_AT(offset) (((address)&locals[-((offset) + 1)]))
#define LOCALS_DOUBLE_AT(offset) (((address)&locals[-((offset) + 1)]))
#define SET_LOCALS_SLOT(value, offset) (*(intptr_t*)&locals[-(offset)] = *(intptr_t *)(value))
#define SET_LOCALS_INT(value, offset) (*((jint *)&locals[-(offset)]) = (value))
#define SET_LOCALS_DOUBLE(value, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = (value))
#define SET_LOCALS_LONG(value, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = (value))
#define SET_LOCALS_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = \
((VMJavaVal64*)(addr))->d)
#endif // CPU_PPC_VM_BYTECODEINTERPRETER_PPC_PP

View File

@ -0,0 +1,290 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_BYTECODEINTERPRETER_PPC_INLINE_HPP
#define CPU_PPC_VM_BYTECODEINTERPRETER_PPC_INLINE_HPP
#ifdef CC_INTERP
// Inline interpreter functions for ppc.
#include <math.h>
inline jfloat BytecodeInterpreter::VMfloatAdd(jfloat op1, jfloat op2) { return op1 + op2; }
inline jfloat BytecodeInterpreter::VMfloatSub(jfloat op1, jfloat op2) { return op1 - op2; }
inline jfloat BytecodeInterpreter::VMfloatMul(jfloat op1, jfloat op2) { return op1 * op2; }
inline jfloat BytecodeInterpreter::VMfloatDiv(jfloat op1, jfloat op2) { return op1 / op2; }
inline jfloat BytecodeInterpreter::VMfloatRem(jfloat op1, jfloat op2) { return (jfloat)fmod((double)op1, (double)op2); }
inline jfloat BytecodeInterpreter::VMfloatNeg(jfloat op) { return -op; }
inline int32_t BytecodeInterpreter::VMfloatCompare(jfloat op1, jfloat op2, int32_t direction) {
return ( op1 < op2 ? -1 :
op1 > op2 ? 1 :
op1 == op2 ? 0 :
(direction == -1 || direction == 1) ? direction : 0);
}
inline void BytecodeInterpreter::VMmemCopy64(uint32_t to[2], const uint32_t from[2]) {
to[0] = from[0]; to[1] = from[1];
}
// The long operations depend on compiler support for "long long" on ppc.
inline jlong BytecodeInterpreter::VMlongAdd(jlong op1, jlong op2) {
return op1 + op2;
}
inline jlong BytecodeInterpreter::VMlongAnd(jlong op1, jlong op2) {
return op1 & op2;
}
inline jlong BytecodeInterpreter::VMlongDiv(jlong op1, jlong op2) {
if (op1 == min_jlong && op2 == -1) return op1;
return op1 / op2;
}
inline jlong BytecodeInterpreter::VMlongMul(jlong op1, jlong op2) {
return op1 * op2;
}
inline jlong BytecodeInterpreter::VMlongOr(jlong op1, jlong op2) {
return op1 | op2;
}
inline jlong BytecodeInterpreter::VMlongSub(jlong op1, jlong op2) {
return op1 - op2;
}
inline jlong BytecodeInterpreter::VMlongXor(jlong op1, jlong op2) {
return op1 ^ op2;
}
inline jlong BytecodeInterpreter::VMlongRem(jlong op1, jlong op2) {
if (op1 == min_jlong && op2 == -1) return 0;
return op1 % op2;
}
inline jlong BytecodeInterpreter::VMlongUshr(jlong op1, jint op2) {
return ((uint64_t) op1) >> (op2 & 0x3F);
}
inline jlong BytecodeInterpreter::VMlongShr(jlong op1, jint op2) {
return op1 >> (op2 & 0x3F);
}
inline jlong BytecodeInterpreter::VMlongShl(jlong op1, jint op2) {
return op1 << (op2 & 0x3F);
}
inline jlong BytecodeInterpreter::VMlongNeg(jlong op) {
return -op;
}
inline jlong BytecodeInterpreter::VMlongNot(jlong op) {
return ~op;
}
inline int32_t BytecodeInterpreter::VMlongLtz(jlong op) {
return (op <= 0);
}
inline int32_t BytecodeInterpreter::VMlongGez(jlong op) {
return (op >= 0);
}
inline int32_t BytecodeInterpreter::VMlongEqz(jlong op) {
return (op == 0);
}
inline int32_t BytecodeInterpreter::VMlongEq(jlong op1, jlong op2) {
return (op1 == op2);
}
inline int32_t BytecodeInterpreter::VMlongNe(jlong op1, jlong op2) {
return (op1 != op2);
}
inline int32_t BytecodeInterpreter::VMlongGe(jlong op1, jlong op2) {
return (op1 >= op2);
}
inline int32_t BytecodeInterpreter::VMlongLe(jlong op1, jlong op2) {
return (op1 <= op2);
}
inline int32_t BytecodeInterpreter::VMlongLt(jlong op1, jlong op2) {
return (op1 < op2);
}
inline int32_t BytecodeInterpreter::VMlongGt(jlong op1, jlong op2) {
return (op1 > op2);
}
inline int32_t BytecodeInterpreter::VMlongCompare(jlong op1, jlong op2) {
return (VMlongLt(op1, op2) ? -1 : VMlongGt(op1, op2) ? 1 : 0);
}
// Long conversions
inline jdouble BytecodeInterpreter::VMlong2Double(jlong val) {
return (jdouble) val;
}
inline jfloat BytecodeInterpreter::VMlong2Float(jlong val) {
return (jfloat) val;
}
inline jint BytecodeInterpreter::VMlong2Int(jlong val) {
return (jint) val;
}
// Double Arithmetic
inline jdouble BytecodeInterpreter::VMdoubleAdd(jdouble op1, jdouble op2) {
return op1 + op2;
}
inline jdouble BytecodeInterpreter::VMdoubleDiv(jdouble op1, jdouble op2) {
return op1 / op2;
}
inline jdouble BytecodeInterpreter::VMdoubleMul(jdouble op1, jdouble op2) {
return op1 * op2;
}
inline jdouble BytecodeInterpreter::VMdoubleNeg(jdouble op) {
return -op;
}
inline jdouble BytecodeInterpreter::VMdoubleRem(jdouble op1, jdouble op2) {
return fmod(op1, op2);
}
inline jdouble BytecodeInterpreter::VMdoubleSub(jdouble op1, jdouble op2) {
return op1 - op2;
}
inline int32_t BytecodeInterpreter::VMdoubleCompare(jdouble op1, jdouble op2, int32_t direction) {
return ( op1 < op2 ? -1 :
op1 > op2 ? 1 :
op1 == op2 ? 0 :
(direction == -1 || direction == 1) ? direction : 0);
}
// Double Conversions
inline jfloat BytecodeInterpreter::VMdouble2Float(jdouble val) {
return (jfloat) val;
}
// Float Conversions
inline jdouble BytecodeInterpreter::VMfloat2Double(jfloat op) {
return (jdouble) op;
}
// Integer Arithmetic
inline jint BytecodeInterpreter::VMintAdd(jint op1, jint op2) {
return op1 + op2;
}
inline jint BytecodeInterpreter::VMintAnd(jint op1, jint op2) {
return op1 & op2;
}
inline jint BytecodeInterpreter::VMintDiv(jint op1, jint op2) {
/* it's possible we could catch this special case implicitly */
if ((juint)op1 == 0x80000000 && op2 == -1) return op1;
else return op1 / op2;
}
inline jint BytecodeInterpreter::VMintMul(jint op1, jint op2) {
return op1 * op2;
}
inline jint BytecodeInterpreter::VMintNeg(jint op) {
return -op;
}
inline jint BytecodeInterpreter::VMintOr(jint op1, jint op2) {
return op1 | op2;
}
inline jint BytecodeInterpreter::VMintRem(jint op1, jint op2) {
/* it's possible we could catch this special case implicitly */
if ((juint)op1 == 0x80000000 && op2 == -1) return 0;
else return op1 % op2;
}
inline jint BytecodeInterpreter::VMintShl(jint op1, jint op2) {
return op1 << (op2 & 0x1f);
}
inline jint BytecodeInterpreter::VMintShr(jint op1, jint op2) {
return op1 >> (op2 & 0x1f);
}
inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) {
return op1 - op2;
}
inline juint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
return ((juint) op1) >> (op2 & 0x1f);
}
inline jint BytecodeInterpreter::VMintXor(jint op1, jint op2) {
return op1 ^ op2;
}
inline jdouble BytecodeInterpreter::VMint2Double(jint val) {
return (jdouble) val;
}
inline jfloat BytecodeInterpreter::VMint2Float(jint val) {
return (jfloat) val;
}
inline jlong BytecodeInterpreter::VMint2Long(jint val) {
return (jlong) val;
}
inline jchar BytecodeInterpreter::VMint2Char(jint val) {
return (jchar) val;
}
inline jshort BytecodeInterpreter::VMint2Short(jint val) {
return (jshort) val;
}
inline jbyte BytecodeInterpreter::VMint2Byte(jint val) {
return (jbyte) val;
}
#endif // CC_INTERP
#endif // CPU_PPC_VM_BYTECODEINTERPRETER_PPC_INLINE_HPP

View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "interpreter/bytecodes.hpp"
void Bytecodes::pd_initialize() {
// No ppc specific initialization.
}

View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_BYTECODES_PPC_HPP
#define CPU_PPC_VM_BYTECODES_PPC_HPP
// No ppc64 specific bytecodes
#endif // CPU_PPC_VM_BYTECODES_PPC_HPP

View File

@ -0,0 +1,155 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_BYTES_PPC_HPP
#define CPU_PPC_VM_BYTES_PPC_HPP
#include "memory/allocation.hpp"
class Bytes: AllStatic {
public:
// Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
// PowerPC needs to check for alignment.
// Can I count on address always being a pointer to an unsigned char? Yes.
// Returns true, if the byte ordering used by Java is different from the nativ byte ordering
// of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
static inline bool is_Java_byte_ordering_different() { return false; }
// Thus, a swap between native and Java ordering is always a no-op:
static inline u2 swap_u2(u2 x) { return x; }
static inline u4 swap_u4(u4 x) { return x; }
static inline u8 swap_u8(u8 x) { return x; }
static inline u2 get_native_u2(address p) {
return (intptr_t(p) & 1) == 0
? *(u2*)p
: ( u2(p[0]) << 8 )
| ( u2(p[1]) );
}
static inline u4 get_native_u4(address p) {
switch (intptr_t(p) & 3) {
case 0: return *(u4*)p;
case 2: return ( u4( ((u2*)p)[0] ) << 16 )
| ( u4( ((u2*)p)[1] ) );
default: return ( u4(p[0]) << 24 )
| ( u4(p[1]) << 16 )
| ( u4(p[2]) << 8 )
| u4(p[3]);
}
}
static inline u8 get_native_u8(address p) {
switch (intptr_t(p) & 7) {
case 0: return *(u8*)p;
case 4: return ( u8( ((u4*)p)[0] ) << 32 )
| ( u8( ((u4*)p)[1] ) );
case 2: return ( u8( ((u2*)p)[0] ) << 48 )
| ( u8( ((u2*)p)[1] ) << 32 )
| ( u8( ((u2*)p)[2] ) << 16 )
| ( u8( ((u2*)p)[3] ) );
default: return ( u8(p[0]) << 56 )
| ( u8(p[1]) << 48 )
| ( u8(p[2]) << 40 )
| ( u8(p[3]) << 32 )
| ( u8(p[4]) << 24 )
| ( u8(p[5]) << 16 )
| ( u8(p[6]) << 8 )
| u8(p[7]);
}
}
static inline void put_native_u2(address p, u2 x) {
if ( (intptr_t(p) & 1) == 0 ) { *(u2*)p = x; }
else {
p[0] = x >> 8;
p[1] = x;
}
}
static inline void put_native_u4(address p, u4 x) {
switch ( intptr_t(p) & 3 ) {
case 0: *(u4*)p = x;
break;
case 2: ((u2*)p)[0] = x >> 16;
((u2*)p)[1] = x;
break;
default: ((u1*)p)[0] = x >> 24;
((u1*)p)[1] = x >> 16;
((u1*)p)[2] = x >> 8;
((u1*)p)[3] = x;
break;
}
}
static inline void put_native_u8(address p, u8 x) {
switch ( intptr_t(p) & 7 ) {
case 0: *(u8*)p = x;
break;
case 4: ((u4*)p)[0] = x >> 32;
((u4*)p)[1] = x;
break;
case 2: ((u2*)p)[0] = x >> 48;
((u2*)p)[1] = x >> 32;
((u2*)p)[2] = x >> 16;
((u2*)p)[3] = x;
break;
default: ((u1*)p)[0] = x >> 56;
((u1*)p)[1] = x >> 48;
((u1*)p)[2] = x >> 40;
((u1*)p)[3] = x >> 32;
((u1*)p)[4] = x >> 24;
((u1*)p)[5] = x >> 16;
((u1*)p)[6] = x >> 8;
((u1*)p)[7] = x;
}
}
// Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering)
// (no byte-order reversal is needed since Power CPUs are big-endian oriented).
static inline u2 get_Java_u2(address p) { return get_native_u2(p); }
static inline u4 get_Java_u4(address p) { return get_native_u4(p); }
static inline u8 get_Java_u8(address p) { return get_native_u8(p); }
static inline void put_Java_u2(address p, u2 x) { put_native_u2(p, x); }
static inline void put_Java_u4(address p, u4 x) { put_native_u4(p, x); }
static inline void put_Java_u8(address p, u8 x) { put_native_u8(p, x); }
};
#endif // CPU_PPC_VM_BYTES_PPC_HPP

View File

@ -0,0 +1,98 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_C2_GLOBALS_PPC_HPP
#define CPU_PPC_VM_C2_GLOBALS_PPC_HPP
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
// Sets the default values for platform dependent flags used by the server compiler.
// (see c2_globals.hpp).
define_pd_global(bool, BackgroundCompilation, true);
define_pd_global(bool, CICompileOSR, true);
define_pd_global(bool, InlineIntrinsics, true);
define_pd_global(bool, PreferInterpreterNativeStubs, false);
define_pd_global(bool, ProfileTraps, true);
define_pd_global(bool, UseOnStackReplacement, true);
define_pd_global(bool, ProfileInterpreter, true);
define_pd_global(bool, TieredCompilation, false);
define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, BackEdgeThreshold, 140000);
define_pd_global(intx, OnStackReplacePercentage, 140);
define_pd_global(intx, ConditionalMoveLimit, 3);
define_pd_global(intx, FLOATPRESSURE, 28);
define_pd_global(intx, FreqInlineSize, 175);
define_pd_global(intx, MinJumpTableSize, 10);
define_pd_global(intx, INTPRESSURE, 25);
define_pd_global(intx, InteriorEntryAlignment, 16);
define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
define_pd_global(intx, RegisterCostAreaRatio, 16000);
define_pd_global(bool, UseTLAB, true);
define_pd_global(bool, ResizeTLAB, true);
define_pd_global(intx, LoopUnrollLimit, 60);
// Peephole and CISC spilling both break the graph, and so make the
// scheduler sick.
define_pd_global(bool, OptoPeephole, false);
define_pd_global(bool, UseCISCSpill, false);
define_pd_global(bool, OptoBundling, false);
// GL:
// Detected a problem with unscaled compressed oops and
// narrow_oop_use_complex_address() == false.
// -Djava.io.tmpdir=./tmp -jar SPECjvm2008.jar -ikv -wt 3 -it 3
// -bt 1 --base compiler.sunflow
// fails in Lower.visitIf->translate->tranlate->translate and
// throws an unexpected NPE. A load and a store seem to be
// reordered. Java reads about:
// loc = x.f
// x.f = 0
// NullCheck loc
// While assembler reads:
// x.f = 0
// loc = x.f
// NullCheck loc
define_pd_global(bool, OptoScheduling, false);
define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, ReservedCodeCacheSize, 256*M);
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
define_pd_global(uint64_t,MaxRAM, 4ULL*G);
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(bool, TrapBasedRangeChecks, false);
// Heap related flags
define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M));
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
#endif // CPU_PPC_VM_C2_GLOBALS_PPC_HPP

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "opto/compile.hpp"
#include "opto/node.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"
// processor dependent initialization for ppc
void Compile::pd_compiler2_init() {
// Power7 and later
if (PowerArchitecturePPC64 > 6) {
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
FLAG_SET_ERGO(bool, UsePopCountInstruction, true);
}
}
if (PowerArchitecturePPC64 == 6) {
if (FLAG_IS_DEFAULT(InsertEndGroupPPC64)) {
FLAG_SET_ERGO(bool, InsertEndGroupPPC64, true);
}
}
}

View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_CODEBUFFER_PPC_HPP
#define CPU_PPC_VM_CODEBUFFER_PPC_HPP
private:
void pd_initialize() {}
public:
void flush_bundle(bool start_new_bundle) {}
#endif // CPU_PPC_VM_CODEBUFFER_PPC_HPP

View File

@ -0,0 +1,261 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
#ifdef COMPILER2
#include "opto/matcher.hpp"
#endif
// Release the CompiledICHolder* associated with this call site is there is one.
void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
// This call site might have become stale so inspect it carefully.
NativeCall* call = nativeCall_at(call_site->addr());
if (is_icholder_entry(call->destination())) {
NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
}
}
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
// This call site might have become stale so inspect it carefully.
NativeCall* call = nativeCall_at(call_site->addr());
return is_icholder_entry(call->destination());
}
//-----------------------------------------------------------------------------
// High-level access to an inline cache. Guaranteed to be MT-safe.
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
: _ic_call(call)
{
address ic_call = call->instruction_address();
assert(ic_call != NULL, "ic_call address must be set");
assert(nm != NULL, "must pass nmethod");
assert(nm->contains(ic_call), "must be in nmethod");
// Search for the ic_call at the given address.
RelocIterator iter(nm, ic_call, ic_call+1);
bool ret = iter.next();
assert(ret == true, "relocInfo must exist at this address");
assert(iter.addr() == ic_call, "must find ic_call");
if (iter.type() == relocInfo::virtual_call_type) {
virtual_call_Relocation* r = iter.virtual_call_reloc();
_is_optimized = false;
_value = nativeMovConstReg_at(r->cached_value());
} else {
assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
_is_optimized = true;
_value = NULL;
}
}
// ----------------------------------------------------------------------------
// A PPC CompiledStaticCall looks like this:
//
// >>>> consts
//
// [call target1]
// [IC cache]
// [call target2]
//
// <<<< consts
// >>>> insts
//
// bl offset16 -+ -+ ??? // How many bits available?
// | |
// <<<< insts | |
// >>>> stubs | |
// | |- trampoline_stub_Reloc
// trampoline stub: | <-+
// r2 = toc |
// r2 = [r2 + offset] | // Load call target1 from const section
// mtctr r2 |
// bctr |- static_stub_Reloc
// comp_to_interp_stub: <---+
// r1 = toc
// ICreg = [r1 + IC_offset] // Load IC from const section
// r1 = [r1 + offset] // Load call target2 from const section
// mtctr r1
// bctr
//
// <<<< stubs
//
// The call instruction in the code either
// - branches directly to a compiled method if offset encodable in instruction
// - branches to the trampoline stub if offset to compiled method not encodable
// - branches to the compiled_to_interp stub if target interpreted
//
// Further there are three relocations from the loads to the constants in
// the constant section.
//
// Usage of r1 and r2 in the stubs allows to distinguish them.
const int IC_pos_in_java_to_interp_stub = 8;
#define __ _masm.
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
#ifdef COMPILER2
// Get the mark within main instrs section which is set to the address of the call.
address call_addr = cbuf.insts_mark();
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
// Start the stub.
address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
if (stub == NULL) {
Compile::current()->env()->record_out_of_memory_failure();
return;
}
// For java_to_interp stubs we use R11_scratch1 as scratch register
// and in call trampoline stubs we use R12_scratch2. This way we
// can distinguish them (see is_NativeCallTrampolineStub_at()).
Register reg_scratch = R11_scratch1;
// Create a static stub relocation which relates this stub
// with the call instruction at insts_call_instruction_offset in the
// instructions code-section.
__ relocate(static_stub_Relocation::spec(call_addr));
const int stub_start_offset = __ offset();
// Now, create the stub's code:
// - load the TOC
// - load the inline cache oop from the constant pool
// - load the call target from the constant pool
// - call
__ calculate_address_from_global_toc(reg_scratch, __ method_toc());
AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);
__ load_const_from_method_toc(as_Register(Matcher::inline_cache_reg_encode()), ic, reg_scratch);
if (ReoptimizeCallSequences) {
__ b64_patchable((address)-1, relocInfo::none);
} else {
AddressLiteral a((address)-1);
__ load_const_from_method_toc(reg_scratch, a, reg_scratch);
__ mtctr(reg_scratch);
__ bctr();
}
// FIXME: Assert that the stub can be identified and patched.
// Java_to_interp_stub_size should be good.
assert((__ offset() - stub_start_offset) <= CompiledStaticCall::to_interp_stub_size(),
"should be good size");
assert(!is_NativeCallTrampolineStub_at(__ addr_at(stub_start_offset)),
"must not confuse java_to_interp with trampoline stubs");
// End the stub.
__ end_a_stub();
#else
ShouldNotReachHere();
#endif
}
#undef __
// Size of java_to_interp stub, this doesn't need to be accurate but it must
// be larger or equal to the real size of the stub.
// Used for optimization in Compile::Shorten_branches.
int CompiledStaticCall::to_interp_stub_size() {
return 12 * BytesPerInstWord;
}
// Relocation entries for call stub, compiled java to interpreter.
// Used for optimization in Compile::Shorten_branches.
int CompiledStaticCall::reloc_to_interp_stub() {
return 5;
}
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
ResourceMark rm;
tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
instruction_address(),
callee->name_and_sig_as_C_string());
}
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(),
"a) MT-unsafe modification of inline cache");
assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry,
"b) MT-unsafe modification of inline cache");
// Update stub.
method_holder->set_data((intptr_t)callee());
jump->set_jump_destination(entry);
// Update jump to call.
set_destination_mt_safe(stub);
}
void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
// Reset stub.
address stub = static_stub->addr();
assert(stub != NULL, "stub not found");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
method_holder->set_data(0);
jump->set_jump_destination((address)-1);
}
//-----------------------------------------------------------------------------
// Non-product mode code
#ifndef PRODUCT
void CompiledStaticCall::verify() {
// Verify call.
NativeCall::verify();
if (os::is_MP()) {
verify_alignment();
}
// Verify stub.
address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
// Verify state.
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
}
#endif // !PRODUCT

View File

@ -0,0 +1,171 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_COPY_PPC_HPP
#define CPU_PPC_VM_COPY_PPC_HPP
#ifndef PPC64
#error "copy currently only implemented for PPC64"
#endif
// Inline functions for memory copy and fill.
static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
(void)memmove(to, from, count * HeapWordSize);
}
static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
switch (count) {
case 8: to[7] = from[7];
case 7: to[6] = from[6];
case 6: to[5] = from[5];
case 5: to[4] = from[4];
case 4: to[3] = from[3];
case 3: to[2] = from[2];
case 2: to[1] = from[1];
case 1: to[0] = from[0];
case 0: break;
default: (void)memcpy(to, from, count * HeapWordSize);
break;
}
}
static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
switch (count) {
case 8: to[7] = from[7];
case 7: to[6] = from[6];
case 6: to[5] = from[5];
case 5: to[4] = from[4];
case 4: to[3] = from[3];
case 3: to[2] = from[2];
case 2: to[1] = from[1];
case 1: to[0] = from[0];
case 0: break;
default: while (count-- > 0) {
*to++ = *from++;
}
break;
}
}
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
(void)memmove(to, from, count * HeapWordSize);
}
static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
pd_disjoint_words(from, to, count);
}
static void pd_conjoint_bytes(void* from, void* to, size_t count) {
(void)memmove(to, from, count);
}
static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
(void)memmove(to, from, count);
}
// Template for atomic, element-wise copy.
template <class T>
static void copy_conjoint_atomic(T* from, T* to, size_t count) {
if (from > to) {
while (count-- > 0) {
// Copy forwards
*to++ = *from++;
}
} else {
from += count - 1;
to += count - 1;
while (count-- > 0) {
// Copy backwards
*to-- = *from--;
}
}
}
static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
// TODO: contribute optimized version.
copy_conjoint_atomic<jshort>(from, to, count);
}
static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
// TODO: contribute optimized version.
copy_conjoint_atomic<jint>(from, to, count);
}
static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
copy_conjoint_atomic<jlong>(from, to, count);
}
static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
copy_conjoint_atomic<oop>(from, to, count);
}
static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_bytes_atomic(from, to, count);
}
static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
// TODO: contribute optimized version.
pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count);
}
static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
// TODO: contribute optimized version.
pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
}
static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
}
static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
}
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
julong* to = (julong*)tohw;
julong v = ((julong)value << 32) | value;
while (count-- > 0) {
*to++ = v;
}
}
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
pd_fill_to_words(tohw, count, value);
}
static void pd_fill_to_bytes(void* to, size_t count, jubyte value) {
(void)memset(to, value, count);
}
static void pd_zero_to_words(HeapWord* tohw, size_t count) {
pd_fill_to_words(tohw, count, 0);
}
static void pd_zero_to_bytes(void* to, size_t count) {
(void)memset(to, 0, count);
}
#endif // CPU_PPC_VM_COPY_PPC_HPP

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
#define CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
address generate_normal_entry(void);
address generate_native_entry(void);
void lock_method(void);
void unlock_method(void);
void generate_counter_incr(Label& overflow);
void generate_counter_overflow(Label& do_continue);
void generate_more_monitors();
void generate_deopt_handling(Register result_index);
void generate_compute_interpreter_state(Label& exception_return);
#endif // CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_CPPINTERPRETER_PPC_HPP
#define CPU_PPC_VM_CPPINTERPRETER_PPC_HPP
protected:
// Size of interpreter code. Increase if too small. Interpreter will
// fail with a guarantee ("not enough space for interpreter generation");
// if too small.
// Run with +PrintInterpreter to get the VM to print out the size.
// Max size with JVMTI
const static int InterpreterCodeSize = 12*K;
#endif // CPU_PPC_VM_CPPINTERPRETER_PPC_HPP

View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/nmethod.hpp"
#include "runtime/frame.hpp"
#include "runtime/init.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include "utilities/top.hpp"
void pd_ps(frame f) {}

View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_DEPCHECKER_PPC_HPP
#define CPU_PPC_VM_DEPCHECKER_PPC_HPP
// Nothing to do on ppc64
#endif // CPU_PPC_VM_DEPCHECKER_PPC_HPP

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_DISASSEMBLER_PPC_HPP
#define CPU_PPC_VM_DISASSEMBLER_PPC_HPP
static int pd_instruction_alignment() {
return sizeof(int);
}
static const char* pd_cpu_opts() {
return "ppc64";
}
#endif // CPU_PPC_VM_DISASSEMBLER_PPC_HPP

View File

@ -0,0 +1,304 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "oops/markOop.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/monitorChunk.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "vmreg_ppc.inline.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#include "runtime/vframeArray.hpp"
#endif
#ifndef CC_INTERP
#error "CC_INTERP must be defined on PPC64"
#endif
#ifdef ASSERT
void RegisterMap::check_location_valid() {
}
#endif // ASSERT
bool frame::safe_for_sender(JavaThread *thread) {
bool safe = false;
address cursp = (address)sp();
address curfp = (address)fp();
if ((cursp != NULL && curfp != NULL &&
(cursp <= thread->stack_base() && cursp >= thread->stack_base() - thread->stack_size())) &&
(curfp <= thread->stack_base() && curfp >= thread->stack_base() - thread->stack_size())) {
safe = true;
}
return safe;
}
bool frame::is_interpreted_frame() const {
return Interpreter::contains(pc());
}
frame frame::sender_for_entry_frame(RegisterMap *map) const {
assert(map != NULL, "map must be set");
// Java frame called from C; skip all C frames and return top C
// frame of that chunk as the sender.
JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
assert(!entry_frame_is_first(), "next Java fp must be non zero");
assert(jfa->last_Java_sp() > _sp, "must be above this frame on stack");
map->clear();
assert(map->include_argument_oops(), "should be set by clear");
if (jfa->last_Java_pc() != NULL) {
frame fr(jfa->last_Java_sp(), jfa->last_Java_pc());
return fr;
}
// Last_java_pc is not set, if we come here from compiled code. The
// constructor retrieves the PC from the stack.
frame fr(jfa->last_Java_sp());
return fr;
}
frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
// Pass callers initial_caller_sp as unextended_sp.
return frame(sender_sp(), sender_pc(), (intptr_t*)((parent_ijava_frame_abi *)callers_abi())->initial_caller_sp);
}
frame frame::sender_for_compiled_frame(RegisterMap *map) const {
assert(map != NULL, "map must be set");
// Frame owned by compiler.
address pc = *compiled_sender_pc_addr(_cb);
frame caller(compiled_sender_sp(_cb), pc);
// Now adjust the map.
// Get the rest.
if (map->update_map()) {
// Tell GC to use argument oopmaps for some runtime stubs that need it.
map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
if (_cb->oop_maps() != NULL) {
OopMapSet::update_register_map(this, map);
}
}
return caller;
}
intptr_t* frame::compiled_sender_sp(CodeBlob* cb) const {
return sender_sp();
}
address* frame::compiled_sender_pc_addr(CodeBlob* cb) const {
return sender_pc_addr();
}
frame frame::sender(RegisterMap* map) const {
// Default is we do have to follow them. The sender_for_xxx will
// update it accordingly.
map->set_include_argument_oops(false);
if (is_entry_frame()) return sender_for_entry_frame(map);
if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
if (_cb != NULL) {
return sender_for_compiled_frame(map);
}
// Must be native-compiled frame, i.e. the marshaling code for native
// methods that exists in the core system.
return frame(sender_sp(), sender_pc());
}
void frame::patch_pc(Thread* thread, address pc) {
if (TracePcPatching) {
tty->print_cr("patch_pc at address " PTR_FORMAT " [" PTR_FORMAT " -> " PTR_FORMAT "]",
&((address*) _sp)[-1], ((address*) _sp)[-1], pc);
}
own_abi()->lr = (uint64_t)pc;
_cb = CodeCache::find_blob(pc);
if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
address orig = (((nmethod*)_cb)->get_original_pc(this));
assert(orig == _pc, "expected original to be stored before patching");
_deopt_state = is_deoptimized;
// Leave _pc as is.
} else {
_deopt_state = not_deoptimized;
_pc = pc;
}
}
void frame::pd_gc_epilog() {
if (is_interpreted_frame()) {
// Set constant pool cache entry for interpreter.
Method* m = interpreter_frame_method();
*interpreter_frame_cpoolcache_addr() = m->constants()->cache();
}
}
bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
// Is there anything to do?
assert(is_interpreted_frame(), "Not an interpreted frame");
return true;
}
BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
assert(is_interpreted_frame(), "interpreted frame expected");
Method* method = interpreter_frame_method();
BasicType type = method->result_type();
if (method->is_native()) {
// Prior to calling into the runtime to notify the method exit the possible
// result value is saved into the interpreter frame.
#ifdef CC_INTERP
interpreterState istate = get_interpreterState();
address lresult = (address)istate + in_bytes(BytecodeInterpreter::native_lresult_offset());
address fresult = (address)istate + in_bytes(BytecodeInterpreter::native_fresult_offset());
#endif
switch (method->result_type()) {
case T_OBJECT:
case T_ARRAY: {
oop* obj_p = *(oop**)lresult;
oop obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
*oop_result = obj;
break;
}
// We use std/stfd to store the values.
case T_BOOLEAN : value_result->z = (jboolean) *(unsigned long*)lresult; break;
case T_INT : value_result->i = (jint) *(long*)lresult; break;
case T_CHAR : value_result->c = (jchar) *(unsigned long*)lresult; break;
case T_SHORT : value_result->s = (jshort) *(long*)lresult; break;
case T_BYTE : value_result->z = (jbyte) *(long*)lresult; break;
case T_LONG : value_result->j = (jlong) *(long*)lresult; break;
case T_FLOAT : value_result->f = (jfloat) *(double*)fresult; break;
case T_DOUBLE : value_result->d = (jdouble) *(double*)fresult; break;
case T_VOID : /* Nothing to do */ break;
default : ShouldNotReachHere();
}
} else {
intptr_t* tos_addr = interpreter_frame_tos_address();
switch (method->result_type()) {
case T_OBJECT:
case T_ARRAY: {
oop obj = *(oop*)tos_addr;
assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
*oop_result = obj;
}
case T_BOOLEAN : value_result->z = (jboolean) *(jint*)tos_addr; break;
case T_BYTE : value_result->b = (jbyte) *(jint*)tos_addr; break;
case T_CHAR : value_result->c = (jchar) *(jint*)tos_addr; break;
case T_SHORT : value_result->s = (jshort) *(jint*)tos_addr; break;
case T_INT : value_result->i = *(jint*)tos_addr; break;
case T_LONG : value_result->j = *(jlong*)tos_addr; break;
case T_FLOAT : value_result->f = *(jfloat*)tos_addr; break;
case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
case T_VOID : /* Nothing to do */ break;
default : ShouldNotReachHere();
}
}
return type;
}
#ifndef PRODUCT
void frame::describe_pd(FrameValues& values, int frame_no) {
if (is_interpreted_frame()) {
#ifdef CC_INTERP
interpreterState istate = get_interpreterState();
values.describe(frame_no, (intptr_t*)istate, "istate");
values.describe(frame_no, (intptr_t*)&(istate->_thread), " thread");
values.describe(frame_no, (intptr_t*)&(istate->_bcp), " bcp");
values.describe(frame_no, (intptr_t*)&(istate->_locals), " locals");
values.describe(frame_no, (intptr_t*)&(istate->_constants), " constants");
values.describe(frame_no, (intptr_t*)&(istate->_method), err_msg(" method = %s", istate->_method->name_and_sig_as_C_string()));
values.describe(frame_no, (intptr_t*)&(istate->_mdx), " mdx");
values.describe(frame_no, (intptr_t*)&(istate->_stack), " stack");
values.describe(frame_no, (intptr_t*)&(istate->_msg), err_msg(" msg = %s", BytecodeInterpreter::C_msg(istate->_msg)));
values.describe(frame_no, (intptr_t*)&(istate->_result), " result");
values.describe(frame_no, (intptr_t*)&(istate->_prev_link), " prev_link");
values.describe(frame_no, (intptr_t*)&(istate->_oop_temp), " oop_temp");
values.describe(frame_no, (intptr_t*)&(istate->_stack_base), " stack_base");
values.describe(frame_no, (intptr_t*)&(istate->_stack_limit), " stack_limit");
values.describe(frame_no, (intptr_t*)&(istate->_monitor_base), " monitor_base");
values.describe(frame_no, (intptr_t*)&(istate->_frame_bottom), " frame_bottom");
values.describe(frame_no, (intptr_t*)&(istate->_last_Java_pc), " last_Java_pc");
values.describe(frame_no, (intptr_t*)&(istate->_last_Java_fp), " last_Java_fp");
values.describe(frame_no, (intptr_t*)&(istate->_last_Java_sp), " last_Java_sp");
values.describe(frame_no, (intptr_t*)&(istate->_self_link), " self_link");
values.describe(frame_no, (intptr_t*)&(istate->_native_fresult), " native_fresult");
values.describe(frame_no, (intptr_t*)&(istate->_native_lresult), " native_lresult");
#else
Unimplemented();
#endif
}
}
#endif
void frame::adjust_unextended_sp() {
// If we are returning to a compiled MethodHandle call site, the
// saved_fp will in fact be a saved value of the unextended SP. The
// simplest way to tell whether we are returning to such a call site
// is as follows:
if (is_compiled_frame() && false /*is_at_mh_callsite()*/) { // TODO PPC port
// If the sender PC is a deoptimization point, get the original
// PC. For MethodHandle call site the unextended_sp is stored in
// saved_fp.
_unextended_sp = _fp - _cb->frame_size();
#ifdef ASSERT
nmethod *sender_nm = _cb->as_nmethod_or_null();
assert(sender_nm && *_sp == *_unextended_sp, "backlink changed");
intptr_t* sp = _unextended_sp; // check if stack can be walked from here
for (int x = 0; x < 5; ++x) { // check up to a couple of backlinks
intptr_t* prev_sp = *(intptr_t**)sp;
if (prev_sp == 0) break; // end of stack
assert(prev_sp>sp, "broken stack");
sp = prev_sp;
}
if (sender_nm->is_deopt_mh_entry(_pc)) { // checks for deoptimization
address original_pc = sender_nm->get_original_pc(this);
assert(sender_nm->insts_contains(original_pc), "original PC must be in nmethod");
assert(sender_nm->is_method_handle_return(original_pc), "must be");
}
#endif
}
}
intptr_t *frame::initial_deoptimization_info() {
// unused... but returns fp() to minimize changes introduced by 7087445
return fp();
}

View File

@ -0,0 +1,448 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_FRAME_PPC_HPP
#define CPU_PPC_VM_FRAME_PPC_HPP
#include "runtime/synchronizer.hpp"
#include "utilities/top.hpp"
#ifndef CC_INTERP
#error "CC_INTERP must be defined on PPC64"
#endif
// C frame layout on PPC-64.
//
// In this figure the stack grows upwards, while memory grows
// downwards. See "64-bit PowerPC ELF ABI Supplement Version 1.7",
// IBM Corp. (2003-10-29)
// (http://math-atlas.sourceforge.net/devel/assembly/PPC-elf64abi-1.7.pdf).
//
// Square brackets denote stack regions possibly larger
// than a single 64 bit slot.
//
// STACK:
// 0 [C_FRAME] <-- SP after prolog (mod 16 = 0)
// [C_FRAME] <-- SP before prolog
// ...
// [C_FRAME]
//
// C_FRAME:
// 0 [ABI_112]
// 112 CARG_9: outgoing arg 9 (arg_1 ... arg_8 via gpr_3 ... gpr_{10})
// ...
// 40+M*8 CARG_M: outgoing arg M (M is the maximum of outgoing args taken over all call sites in the procedure)
// local 1
// ...
// local N
// spill slot for vector reg (16 bytes aligned)
// ...
// spill slot for vector reg
// alignment (4 or 12 bytes)
// V SR_VRSAVE
// V+4 spill slot for GR
// ... ...
// spill slot for GR
// spill slot for FR
// ...
// spill slot for FR
//
// ABI_48:
// 0 caller's SP
// 8 space for condition register (CR) for next call
// 16 space for link register (LR) for next call
// 24 reserved
// 32 reserved
// 40 space for TOC (=R2) register for next call
//
// ABI_112:
// 0 [ABI_48]
// 48 CARG_1: spill slot for outgoing arg 1. used by next callee.
// ... ...
// 104 CARG_8: spill slot for outgoing arg 8. used by next callee.
//
public:
// C frame layout
enum {
// stack alignment
alignment_in_bytes = 16,
// log_2(16*8 bits) = 7.
log_2_of_alignment_in_bits = 7
};
// ABI_48:
struct abi_48 {
uint64_t callers_sp;
uint64_t cr; //_16
uint64_t lr;
uint64_t reserved1; //_16
uint64_t reserved2;
uint64_t toc; //_16
// nothing to add here!
// aligned to frame::alignment_in_bytes (16)
};
enum {
abi_48_size = sizeof(abi_48)
};
struct abi_112 : abi_48 {
uint64_t carg_1;
uint64_t carg_2; //_16
uint64_t carg_3;
uint64_t carg_4; //_16
uint64_t carg_5;
uint64_t carg_6; //_16
uint64_t carg_7;
uint64_t carg_8; //_16
// aligned to frame::alignment_in_bytes (16)
};
enum {
abi_112_size = sizeof(abi_112)
};
#define _abi(_component) \
(offset_of(frame::abi_112, _component))
struct abi_112_spill : abi_112 {
// additional spill slots
uint64_t spill_ret;
uint64_t spill_fret; //_16
// aligned to frame::alignment_in_bytes (16)
};
enum {
abi_112_spill_size = sizeof(abi_112_spill)
};
#define _abi_112_spill(_component) \
(offset_of(frame::abi_112_spill, _component))
// non-volatile GPRs:
struct spill_nonvolatiles {
uint64_t r14;
uint64_t r15; //_16
uint64_t r16;
uint64_t r17; //_16
uint64_t r18;
uint64_t r19; //_16
uint64_t r20;
uint64_t r21; //_16
uint64_t r22;
uint64_t r23; //_16
uint64_t r24;
uint64_t r25; //_16
uint64_t r26;
uint64_t r27; //_16
uint64_t r28;
uint64_t r29; //_16
uint64_t r30;
uint64_t r31; //_16
double f14;
double f15;
double f16;
double f17;
double f18;
double f19;
double f20;
double f21;
double f22;
double f23;
double f24;
double f25;
double f26;
double f27;
double f28;
double f29;
double f30;
double f31;
// aligned to frame::alignment_in_bytes (16)
};
enum {
spill_nonvolatiles_size = sizeof(spill_nonvolatiles)
};
#define _spill_nonvolatiles_neg(_component) \
(int)(-frame::spill_nonvolatiles_size + offset_of(frame::spill_nonvolatiles, _component))
// Frame layout for the Java interpreter on PPC64.
//
// This frame layout provides a C-like frame for every Java frame.
//
// In these figures the stack grows upwards, while memory grows
// downwards. Square brackets denote regions possibly larger than
// single 64 bit slots.
//
// STACK (no JNI, no compiled code, no library calls,
// interpreter-loop is active):
// 0 [InterpretMethod]
// [TOP_IJAVA_FRAME]
// [PARENT_IJAVA_FRAME]
// ...
// [PARENT_IJAVA_FRAME]
// [ENTRY_FRAME]
// [C_FRAME]
// ...
// [C_FRAME]
//
// TOP_IJAVA_FRAME:
// 0 [TOP_IJAVA_FRAME_ABI]
// alignment (optional)
// [operand stack]
// [monitors] (optional)
// [cInterpreter object]
// result, locals, and arguments are in parent frame!
//
// PARENT_IJAVA_FRAME:
// 0 [PARENT_IJAVA_FRAME_ABI]
// alignment (optional)
// [callee's Java result]
// [callee's locals w/o arguments]
// [outgoing arguments]
// [used part of operand stack w/o arguments]
// [monitors] (optional)
// [cInterpreter object]
//
// ENTRY_FRAME:
// 0 [PARENT_IJAVA_FRAME_ABI]
// alignment (optional)
// [callee's Java result]
// [callee's locals w/o arguments]
// [outgoing arguments]
// [ENTRY_FRAME_LOCALS]
//
// PARENT_IJAVA_FRAME_ABI:
// 0 [ABI_48]
// top_frame_sp
// initial_caller_sp
//
// TOP_IJAVA_FRAME_ABI:
// 0 [PARENT_IJAVA_FRAME_ABI]
// carg_3_unused
// carg_4_unused
// carg_5_unused
// carg_6_unused
// carg_7_unused
// frame_manager_lr
//
// PARENT_IJAVA_FRAME_ABI
struct parent_ijava_frame_abi : abi_48 {
// SOE registers.
// C2i adapters spill their top-frame stack-pointer here.
uint64_t top_frame_sp; // carg_1
// Sp of calling compiled frame before it was resized by the c2i
// adapter or sp of call stub. Does not contain a valid value for
// non-initial frames.
uint64_t initial_caller_sp; // carg_2
// aligned to frame::alignment_in_bytes (16)
};
enum {
parent_ijava_frame_abi_size = sizeof(parent_ijava_frame_abi)
};
#define _parent_ijava_frame_abi(_component) \
(offset_of(frame::parent_ijava_frame_abi, _component))
// TOP_IJAVA_FRAME_ABI
struct top_ijava_frame_abi : parent_ijava_frame_abi {
uint64_t carg_3_unused; // carg_3
uint64_t card_4_unused; //_16 carg_4
uint64_t carg_5_unused; // carg_5
uint64_t carg_6_unused; //_16 carg_6
uint64_t carg_7_unused; // carg_7
// Use arg8 for storing frame_manager_lr. The size of
// top_ijava_frame_abi must match abi_112.
uint64_t frame_manager_lr; //_16 carg_8
// nothing to add here!
// aligned to frame::alignment_in_bytes (16)
};
enum {
top_ijava_frame_abi_size = sizeof(top_ijava_frame_abi)
};
#define _top_ijava_frame_abi(_component) \
(offset_of(frame::top_ijava_frame_abi, _component))
// ENTRY_FRAME
struct entry_frame_locals {
uint64_t call_wrapper_address;
uint64_t result_address; //_16
uint64_t result_type;
uint64_t arguments_tos_address; //_16
// aligned to frame::alignment_in_bytes (16)
uint64_t r[spill_nonvolatiles_size/sizeof(uint64_t)];
};
enum {
entry_frame_locals_size = sizeof(entry_frame_locals)
};
#define _entry_frame_locals_neg(_component) \
(int)(-frame::entry_frame_locals_size + offset_of(frame::entry_frame_locals, _component))
// Frame layout for JIT generated methods
//
// In these figures the stack grows upwards, while memory grows
// downwards. Square brackets denote regions possibly larger than single
// 64 bit slots.
//
// STACK (interpreted Java calls JIT generated Java):
// [JIT_FRAME] <-- SP (mod 16 = 0)
// [TOP_IJAVA_FRAME]
// ...
//
// JIT_FRAME (is a C frame according to PPC-64 ABI):
// [out_preserve]
// [out_args]
// [spills]
// [pad_1]
// [monitor] (optional)
// ...
// [monitor] (optional)
// [pad_2]
// [in_preserve] added / removed by prolog / epilog
//
// JIT_ABI (TOP and PARENT)
struct jit_abi {
uint64_t callers_sp;
uint64_t cr;
uint64_t lr;
uint64_t toc;
// Nothing to add here!
// NOT ALIGNED to frame::alignment_in_bytes (16).
};
struct jit_out_preserve : jit_abi {
// Nothing to add here!
};
struct jit_in_preserve {
// Nothing to add here!
};
enum {
jit_out_preserve_size = sizeof(jit_out_preserve),
jit_in_preserve_size = sizeof(jit_in_preserve)
};
struct jit_monitor {
uint64_t monitor[1];
};
enum {
jit_monitor_size = sizeof(jit_monitor),
};
private:
// STACK:
// ...
// [THIS_FRAME] <-- this._sp (stack pointer for this frame)
// [CALLER_FRAME] <-- this.fp() (_sp of caller's frame)
// ...
//
// frame pointer for this frame
intptr_t* _fp;
// The frame's stack pointer before it has been extended by a c2i adapter;
// needed by deoptimization
intptr_t* _unextended_sp;
void adjust_unextended_sp();
public:
// Accessors for fields
intptr_t* fp() const { return _fp; }
// Accessors for ABIs
inline abi_48* own_abi() const { return (abi_48*) _sp; }
inline abi_48* callers_abi() const { return (abi_48*) _fp; }
private:
// Find codeblob and set deopt_state.
inline void find_codeblob_and_set_pc_and_deopt_state(address pc);
public:
// Constructors
inline frame(intptr_t* sp);
frame(intptr_t* sp, address pc);
inline frame(intptr_t* sp, address pc, intptr_t* unextended_sp);
private:
intptr_t* compiled_sender_sp(CodeBlob* cb) const;
address* compiled_sender_pc_addr(CodeBlob* cb) const;
address* sender_pc_addr(void) const;
public:
#ifdef CC_INTERP
// Additional interface for interpreter frames:
inline interpreterState get_interpreterState() const;
#endif // CC_INTERP
// Size of a monitor in bytes.
static int interpreter_frame_monitor_size_in_bytes();
// The size of a cInterpreter object.
static inline int interpreter_frame_cinterpreterstate_size_in_bytes();
private:
ConstantPoolCache** interpreter_frame_cpoolcache_addr() const;
public:
// Additional interface for entry frames:
inline entry_frame_locals* get_entry_frame_locals() const {
return (entry_frame_locals*) (((address) fp()) - entry_frame_locals_size);
}
enum {
// normal return address is 1 bundle past PC
pc_return_offset = 0
};
#endif // CPU_PPC_VM_FRAME_PPC_HPP

View File

@ -0,0 +1,236 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_FRAME_PPC_INLINE_HPP
#define CPU_PPC_VM_FRAME_PPC_INLINE_HPP
#ifndef CC_INTERP
#error "CC_INTERP must be defined on PPC64"
#endif
// Inline functions for ppc64 frames:
// Find codeblob and set deopt_state.
inline void frame::find_codeblob_and_set_pc_and_deopt_state(address pc) {
assert(pc != NULL, "precondition: must have PC");
_cb = CodeCache::find_blob(pc);
_pc = pc; // Must be set for get_deopt_original_pc()
_fp = (intptr_t*)own_abi()->callers_sp;
// Use _fp - frame_size, needs to be done between _cb and _pc initialization
// and get_deopt_original_pc.
adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;
} else {
_deopt_state = not_deoptimized;
}
assert(((uint64_t)_sp & 0xf) == 0, "SP must be 16-byte aligned");
}
// Constructors
// Initialize all fields, _unextended_sp will be adjusted in find_codeblob_and_set_pc_and_deopt_state.
inline frame::frame() : _sp(NULL), _unextended_sp(NULL), _fp(NULL), _cb(NULL), _pc(NULL), _deopt_state(unknown) {}
inline frame::frame(intptr_t* sp) : _sp(sp), _unextended_sp(sp) {
find_codeblob_and_set_pc_and_deopt_state((address)own_abi()->lr); // also sets _fp and adjusts _unextended_sp
}
inline frame::frame(intptr_t* sp, address pc) : _sp(sp), _unextended_sp(sp) {
find_codeblob_and_set_pc_and_deopt_state(pc); // also sets _fp and adjusts _unextended_sp
}
inline frame::frame(intptr_t* sp, address pc, intptr_t* unextended_sp) : _sp(sp), _unextended_sp(unextended_sp) {
find_codeblob_and_set_pc_and_deopt_state(pc); // also sets _fp and adjusts _unextended_sp
}
// Accessors
// Return unique id for this frame. The id must have a value where we
// can distinguish identity and younger/older relationship. NULL
// represents an invalid (incomparable) frame.
inline intptr_t* frame::id(void) const {
// Use _fp. _sp or _unextended_sp wouldn't be correct due to resizing.
return _fp;
}
// Return true if this frame is older (less recent activation) than
// the frame represented by id.
inline bool frame::is_older(intptr_t* id) const {
assert(this->id() != NULL && id != NULL, "NULL frame id");
// Stack grows towards smaller addresses on ppc64.
return this->id() > id;
}
inline int frame::frame_size(RegisterMap* map) const {
// Stack grows towards smaller addresses on PPC64: sender is at a higher address.
return sender_sp() - sp();
}
// Return the frame's stack pointer before it has been extended by a
// c2i adapter. This is needed by deoptimization for ignoring c2i adapter
// frames.
inline intptr_t* frame::unextended_sp() const {
return _unextended_sp;
}
// All frames have this field.
inline address frame::sender_pc() const {
return (address)callers_abi()->lr;
}
inline address* frame::sender_pc_addr() const {
return (address*)&(callers_abi()->lr);
}
// All frames have this field.
inline intptr_t* frame::sender_sp() const {
return (intptr_t*)callers_abi();
}
// All frames have this field.
inline intptr_t* frame::link() const {
return (intptr_t*)callers_abi()->callers_sp;
}
inline intptr_t* frame::real_fp() const {
return fp();
}
#ifdef CC_INTERP
inline interpreterState frame::get_interpreterState() const {
return (interpreterState)(((address)callers_abi())
- frame::interpreter_frame_cinterpreterstate_size_in_bytes());
}
inline intptr_t** frame::interpreter_frame_locals_addr() const {
interpreterState istate = get_interpreterState();
return (intptr_t**)&istate->_locals;
}
inline intptr_t* frame::interpreter_frame_bcx_addr() const {
interpreterState istate = get_interpreterState();
return (intptr_t*)&istate->_bcp;
}
inline intptr_t* frame::interpreter_frame_mdx_addr() const {
interpreterState istate = get_interpreterState();
return (intptr_t*)&istate->_mdx;
}
inline intptr_t* frame::interpreter_frame_expression_stack() const {
return (intptr_t*)interpreter_frame_monitor_end() - 1;
}
inline jint frame::interpreter_frame_expression_stack_direction() {
return -1;
}
// top of expression stack
inline intptr_t* frame::interpreter_frame_tos_address() const {
interpreterState istate = get_interpreterState();
return istate->_stack + 1;
}
inline intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
return &interpreter_frame_tos_address()[offset];
}
// monitor elements
// in keeping with Intel side: end is lower in memory than begin;
// and beginning element is oldest element
// Also begin is one past last monitor.
inline BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
return get_interpreterState()->monitor_base();
}
inline BasicObjectLock* frame::interpreter_frame_monitor_end() const {
return (BasicObjectLock*)get_interpreterState()->stack_base();
}
inline int frame::interpreter_frame_cinterpreterstate_size_in_bytes() {
// Size of an interpreter object. Not aligned with frame size.
return round_to(sizeof(BytecodeInterpreter), 8);
}
inline Method** frame::interpreter_frame_method_addr() const {
interpreterState istate = get_interpreterState();
return &istate->_method;
}
// Constant pool cache
inline ConstantPoolCache** frame::interpreter_frame_cpoolcache_addr() const {
interpreterState istate = get_interpreterState();
return &istate->_constants; // should really use accessor
}
inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
interpreterState istate = get_interpreterState();
return &istate->_constants;
}
#endif // CC_INTERP
inline int frame::interpreter_frame_monitor_size() {
// Number of stack slots for a monitor.
return round_to(BasicObjectLock::size(), // number of stack slots
WordsPerLong); // number of stack slots for a Java long
}
inline int frame::interpreter_frame_monitor_size_in_bytes() {
return frame::interpreter_frame_monitor_size() * wordSize;
}
// entry frames
inline intptr_t* frame::entry_frame_argument_at(int offset) const {
// Since an entry frame always calls the interpreter first, the
// parameters are on the stack and relative to known register in the
// entry frame.
intptr_t* tos = (intptr_t*)get_entry_frame_locals()->arguments_tos_address;
return &tos[offset + 1]; // prepushed tos
}
inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
return (JavaCallWrapper**)&get_entry_frame_locals()->call_wrapper_address;
}
inline oop frame::saved_oop_result(RegisterMap* map) const {
return *((oop*)map->location(R3->as_VMReg()));
}
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
*((oop*)map->location(R3->as_VMReg())) = obj;
}
#endif // CPU_PPC_VM_FRAME_PPC_INLINE_HPP

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
#define CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
// Size of PPC Instructions
const int BytesPerInstWord = 4;
const int StackAlignmentInBytes = 16;
// Indicates whether the C calling conventions require that
// 32-bit integer argument values are properly extended to 64 bits.
// If set, SharedRuntime::c_calling_convention() must adapt
// signatures accordingly.
const bool CCallingConventionRequiresIntsAsLongs = true;
// The PPC CPUs are NOT multiple-copy-atomic.
#define CPU_NOT_MULTIPLE_COPY_ATOMIC
#endif // CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP

View File

@ -0,0 +1,126 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_GLOBALS_PPC_HPP
#define CPU_PPC_VM_GLOBALS_PPC_HPP
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
define_pd_global(bool, ConvertSleepToYield, true);
define_pd_global(bool, ShareVtableStubs, false); // Improves performance markedly for mtrt and compress.
define_pd_global(bool, NeedsDeoptSuspend, false); // Only register window machines need this.
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks.
define_pd_global(bool, TrapBasedNullChecks, true);
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast.
// Use large code-entry alignment.
define_pd_global(intx, CodeEntryAlignment, 128);
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineFrequencyCount, 100);
define_pd_global(intx, InlineSmallCode, 1500);
define_pd_global(intx, PreInflateSpin, 10);
// Flags for template interpreter.
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, UseMembar, false);
// GC Ergo Flags
define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // Default max size of CMS young gen, per GC worker thread.
define_pd_global(uintx, TypeProfileLevel, 0);
// Platform dependent flag handling: flags only defined on this platform.
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
\
/* Load poll address from thread. This is used to implement per-thread */ \
/* safepoints on platforms != IA64. */ \
product(bool, LoadPollAddressFromThread, false, \
"Load polling page address from thread object (required for " \
"per-thread safepoints on platforms != IA64)") \
\
product(uintx, PowerArchitecturePPC64, 0, \
"CPU Version: x for PowerX. Currently recognizes Power5 to " \
"Power7. Default is 0. CPUs newer than Power7 will be " \
"recognized as Power7.") \
\
/* Reoptimize code-sequences of calls at runtime, e.g. replace an */ \
/* indirect call by a direct call. */ \
product(bool, ReoptimizeCallSequences, true, \
"Reoptimize code-sequences of calls at runtime.") \
\
product(bool, UseLoadInstructionsForStackBangingPPC64, false, \
"Use load instructions for stack banging.") \
\
/* special instructions */ \
\
product(bool, UseCountLeadingZerosInstructionsPPC64, true, \
"Use count leading zeros instructions.") \
\
product(bool, UseExtendedLoadAndReserveInstructionsPPC64, false, \
"Use extended versions of load-and-reserve instructions.") \
\
product(bool, UseRotateAndMaskInstructionsPPC64, true, \
"Use rotate and mask instructions.") \
\
product(bool, UseStaticBranchPredictionInCompareAndSwapPPC64, true, \
"Use static branch prediction hints in CAS operations.") \
product(bool, UseStaticBranchPredictionForUncommonPathsPPC64, false, \
"Use static branch prediction hints for uncommon paths.") \
\
product(bool, UsePower6SchedulerPPC64, false, \
"Use Power6 Scheduler.") \
\
product(bool, InsertEndGroupPPC64, false, \
"Insert EndGroup instructions to optimize for Power6.") \
\
/* Trap based checks. */ \
/* Trap based checks use the ppc trap instructions to check certain */ \
/* conditions. This instruction raises a SIGTRAP caught by the */ \
/* exception handler of the VM. */ \
product(bool, UseSIGTRAP, true, \
"Allow trap instructions that make use of SIGTRAP. Use this to " \
"switch off all optimizations requiring SIGTRAP.") \
product(bool, TrapBasedICMissChecks, true, \
"Raise and handle SIGTRAP if inline cache miss detected.") \
product(bool, TrapBasedNotEntrantChecks, true, \
"Raise and handle SIGTRAP if calling not entrant or zombie" \
" method.") \
product(bool, TraceTraps, false, "Trace all traps the signal handler" \
"handles.") \
\
product(bool, ZapMemory, false, "Write 0x0101... to empty memory." \
" Use this to ease debugging.") \
#endif // CPU_PPC_VM_GLOBALS_PPC_HPP

View File

@ -0,0 +1,71 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_ppc.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#define __ masm.
int InlineCacheBuffer::ic_stub_code_size() {
return MacroAssembler::load_const_size + MacroAssembler::b64_patchable_size;
}
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
ResourceMark rm;
CodeBuffer code(code_begin, ic_stub_code_size());
MacroAssembler masm(&code);
// Note: even though the code contains an embedded metadata, we do not need reloc info
// because
// (1) the metadata is old (i.e., doesn't matter for scavenges)
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear.
// Load the oop ...
__ load_const(R19_method, (address) cached_value, R0);
// ... and jump to entry point.
__ b64_patchable((address) entry_point, relocInfo::none);
__ flush();
}
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
NativeJump* jump = nativeJump_at(move->next_instruction_address());
return jump->jump_destination();
}
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
void* o = (void*)move->data();
return o;
}

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "assembler_ppc.inline.hpp"
#include "runtime/icache.hpp"
// Use inline assembler to implement icache flush.
int ICache::ppc64_flush_icache(address start, int lines, int magic) {
address end = start + (unsigned int)lines*ICache::line_size;
assert(start <= end, "flush_icache parms");
// store modified cache lines from data cache
for (address a = start; a < end; a += ICache::line_size) {
__asm__ __volatile__(
"dcbst 0, %0 \n"
:
: "r" (a)
: "memory");
}
// sync instruction
__asm__ __volatile__(
"sync \n"
:
:
: "memory");
// invalidate respective cache lines in instruction cache
for (address a = start; a < end; a += ICache::line_size) {
__asm__ __volatile__(
"icbi 0, %0 \n"
:
: "r" (a)
: "memory");
}
// discard fetched instructions
__asm__ __volatile__(
"isync \n"
:
:
: "memory");
return magic;
}
void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub) {
StubCodeMark mark(this, "ICache", "flush_icache_stub");
*flush_icache_stub = (ICache::flush_icache_stub_t)ICache::ppc64_flush_icache;
// First call to flush itself
ICache::invalidate_range((address)(*flush_icache_stub), 0);
}

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_ICACHE_PPC_HPP
#define CPU_PPC_VM_ICACHE_PPC_HPP
// Interface for updating the instruction cache. Whenever the VM modifies
// code, part of the processor instruction cache potentially has to be flushed.
class ICache : public AbstractICache {
friend class ICacheStubGenerator;
static int ppc64_flush_icache(address start, int lines, int magic);
public:
enum {
// Actually, cache line size is 64, but keeping it as it is to be
// on the safe side on ALL PPC64 implementations.
log2_line_size = 5,
line_size = 1 << log2_line_size
};
static void ppc64_flush_icache_bytes(address start, int bytes) {
// Align start address to an icache line boundary and transform
// nbytes to an icache line count.
const uint line_offset = mask_address_bits(start, line_size - 1);
ppc64_flush_icache(start - line_offset, (bytes + line_offset + line_size - 1) >> log2_line_size, 0);
}
};
#endif // CPU_PPC_VM_ICACHE_PPC_HPP

View File

@ -0,0 +1,510 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interp_masm_ppc_64.hpp"
#include "interpreter/interpreterRuntime.hpp"
#ifdef PRODUCT
#define BLOCK_COMMENT(str) // nothing
#else
#define BLOCK_COMMENT(str) block_comment(str)
#endif
void InterpreterMacroAssembler::null_check_throw(Register a, int offset, Register temp_reg) {
#ifdef CC_INTERP
address exception_entry = StubRoutines::throw_NullPointerException_at_call_entry();
#else
address exception_entry = Interpreter::throw_NullPointerException_entry();
#endif
MacroAssembler::null_check_throw(a, offset, temp_reg, exception_entry);
}
// Lock object
//
// Registers alive
// monitor - Address of the BasicObjectLock to be used for locking,
// which must be initialized with the object to lock.
// object - Address of the object to be locked.
//
void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
if (UseHeavyMonitors) {
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
monitor, /*check_for_exceptions=*/true CC_INTERP_ONLY(&& false));
} else {
// template code:
//
// markOop displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header);
// if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// // We stored the monitor address into the object's mark word.
// } else if (THREAD->is_lock_owned((address)displaced_header))
// // Simple recursive case.
// monitor->lock()->set_displaced_header(NULL);
// } else {
// // Slow path.
// InterpreterRuntime::monitorenter(THREAD, monitor);
// }
const Register displaced_header = R7_ARG5;
const Register object_mark_addr = R8_ARG6;
const Register current_header = R9_ARG7;
const Register tmp = R10_ARG8;
Label done;
Label cas_failed, slow_case;
assert_different_registers(displaced_header, object_mark_addr, current_header, tmp);
// markOop displaced_header = obj->mark().set_unlocked();
// Load markOop from object into displaced_header.
ld(displaced_header, oopDesc::mark_offset_in_bytes(), object);
if (UseBiasedLocking) {
biased_locking_enter(CCR0, object, displaced_header, tmp, current_header, done, &slow_case);
}
// Set displaced_header to be (markOop of object | UNLOCK_VALUE).
ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
// monitor->lock()->set_displaced_header(displaced_header);
// Initialize the box (Must happen before we update the object mark!).
std(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
BasicLock::displaced_header_offset_in_bytes(), monitor);
// if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
// Store stack address of the BasicObjectLock (this is monitor) into object.
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
// Must fence, otherwise, preceding store(s) may float below cmpxchg.
// CmpxchgX sets CCR0 to cmpX(current, displaced).
fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
cmpxchgd(/*flag=*/CCR0,
/*current_value=*/current_header,
/*compare_value=*/displaced_header, /*exchange_value=*/monitor,
/*where=*/object_mark_addr,
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
MacroAssembler::cmpxchgx_hint_acquire_lock(),
noreg,
&cas_failed);
// If the compare-and-exchange succeeded, then we found an unlocked
// object and we have now locked it.
b(done);
bind(cas_failed);
// } else if (THREAD->is_lock_owned((address)displaced_header))
// // Simple recursive case.
// monitor->lock()->set_displaced_header(NULL);
// We did not see an unlocked object so try the fast recursive case.
// Check if owner is self by comparing the value in the markOop of object
// (current_header) with the stack pointer.
sub(current_header, current_header, R1_SP);
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
load_const_optimized(tmp,
(address) (~(os::vm_page_size()-1) |
markOopDesc::lock_mask_in_place));
and_(R0/*==0?*/, current_header, tmp);
// If condition is true we are done and hence we can store 0 in the displaced
// header indicating it is a recursive lock.
bne(CCR0, slow_case);
release();
std(R0/*==0!*/, BasicObjectLock::lock_offset_in_bytes() +
BasicLock::displaced_header_offset_in_bytes(), monitor);
b(done);
// } else {
// // Slow path.
// InterpreterRuntime::monitorenter(THREAD, monitor);
// None of the above fast optimizations worked so we have to get into the
// slow case of monitor enter.
bind(slow_case);
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
monitor, /*check_for_exceptions=*/true CC_INTERP_ONLY(&& false));
// }
bind(done);
}
}
// Unlocks an object. Used in monitorexit bytecode and remove_activation.
//
// Registers alive
// monitor - Address of the BasicObjectLock to be used for locking,
// which must be initialized with the object to lock.
//
// Throw IllegalMonitorException if object is not locked by current thread.
void InterpreterMacroAssembler::unlock_object(Register monitor, bool check_for_exceptions) {
if (UseHeavyMonitors) {
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
monitor, /*check_for_exceptions=*/false);
} else {
// template code:
//
// if ((displaced_header = monitor->displaced_header()) == NULL) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
// monitor->set_obj(NULL);
// } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL);
// } else {
// // Slow path.
// InterpreterRuntime::monitorexit(THREAD, monitor);
// }
const Register object = R7_ARG5;
const Register displaced_header = R8_ARG6;
const Register object_mark_addr = R9_ARG7;
const Register current_header = R10_ARG8;
Label free_slot;
Label slow_case;
assert_different_registers(object, displaced_header, object_mark_addr, current_header);
if (UseBiasedLocking) {
// The object address from the monitor is in object.
ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
biased_locking_exit(CCR0, object, displaced_header, free_slot);
}
// Test first if we are in the fast recursive case.
ld(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
BasicLock::displaced_header_offset_in_bytes(), monitor);
// If the displaced header is zero, we have a recursive unlock.
cmpdi(CCR0, displaced_header, 0);
beq(CCR0, free_slot); // recursive unlock
// } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL);
// If we still have a lightweight lock, unlock the object and be done.
// The object address from the monitor is in object.
if (!UseBiasedLocking) ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor);
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
// We have the displaced header in displaced_header. If the lock is still
// lightweight, it will contain the monitor address and we'll store the
// displaced header back into the object's mark word.
// CmpxchgX sets CCR0 to cmpX(current, monitor).
cmpxchgd(/*flag=*/CCR0,
/*current_value=*/current_header,
/*compare_value=*/monitor, /*exchange_value=*/displaced_header,
/*where=*/object_mark_addr,
MacroAssembler::MemBarRel,
MacroAssembler::cmpxchgx_hint_release_lock(),
noreg,
&slow_case);
b(free_slot);
// } else {
// // Slow path.
// InterpreterRuntime::monitorexit(THREAD, monitor);
// The lock has been converted into a heavy lock and hence
// we need to get into the slow case.
bind(slow_case);
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
monitor, check_for_exceptions CC_INTERP_ONLY(&& false));
// }
Label done;
b(done); // Monitor register may be overwritten! Runtime has already freed the slot.
// Exchange worked, do monitor->set_obj(NULL);
align(32, 12);
bind(free_slot);
li(R0, 0);
std(R0, BasicObjectLock::obj_offset_in_bytes(), monitor);
bind(done);
}
}
void InterpreterMacroAssembler::get_method_counters(Register method,
Register Rcounters,
Label& skip) {
BLOCK_COMMENT("Load and ev. allocate counter object {");
Label has_counters;
ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
cmpdi(CCR0, Rcounters, 0);
bne(CCR0, has_counters);
call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::build_method_counters), method, false);
ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
cmpdi(CCR0, Rcounters, 0);
beq(CCR0, skip); // No MethodCounters, OutOfMemory.
BLOCK_COMMENT("} Load and ev. allocate counter object");
bind(has_counters);
}
void InterpreterMacroAssembler::increment_invocation_counter(Register Rcounters, Register iv_be_count, Register Rtmp_r0) {
assert(UseCompiler, "incrementing must be useful");
Register invocation_count = iv_be_count;
Register backedge_count = Rtmp_r0;
int delta = InvocationCounter::count_increment;
// Load each counter in a register.
// ld(inv_counter, Rtmp);
// ld(be_counter, Rtmp2);
int inv_counter_offset = in_bytes(MethodCounters::invocation_counter_offset() +
InvocationCounter::counter_offset());
int be_counter_offset = in_bytes(MethodCounters::backedge_counter_offset() +
InvocationCounter::counter_offset());
BLOCK_COMMENT("Increment profiling counters {");
// Load the backedge counter.
lwz(backedge_count, be_counter_offset, Rcounters); // is unsigned int
// Mask the backedge counter.
Register tmp = invocation_count;
li(tmp, InvocationCounter::count_mask_value);
andr(backedge_count, tmp, backedge_count); // Cannot use andi, need sign extension of count_mask_value.
// Load the invocation counter.
lwz(invocation_count, inv_counter_offset, Rcounters); // is unsigned int
// Add the delta to the invocation counter and store the result.
addi(invocation_count, invocation_count, delta);
// Store value.
stw(invocation_count, inv_counter_offset, Rcounters);
// Add invocation counter + backedge counter.
add(iv_be_count, backedge_count, invocation_count);
// Note that this macro must leave the backedge_count + invocation_count in
// register iv_be_count!
BLOCK_COMMENT("} Increment profiling counters");
}
void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
if (state == atos) { MacroAssembler::verify_oop(reg); }
}
// Inline assembly for:
//
// if (thread is in interp_only_mode) {
// InterpreterRuntime::post_method_entry();
// }
// if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY ) ||
// *jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY2) ) {
// SharedRuntime::jvmpi_method_entry(method, receiver);
// }
void InterpreterMacroAssembler::notify_method_entry() {
// JVMTI
// Whenever JVMTI puts a thread in interp_only_mode, method
// entry/exit events are sent for that thread to track stack
// depth. If it is possible to enter interp_only_mode we add
// the code to check if the event should be sent.
if (JvmtiExport::can_post_interpreter_events()) {
Label jvmti_post_done;
lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
cmpwi(CCR0, R0, 0);
beq(CCR0, jvmti_post_done);
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry),
/*check_exceptions=*/false);
bind(jvmti_post_done);
}
}
// Inline assembly for:
//
// if (thread is in interp_only_mode) {
// // save result
// InterpreterRuntime::post_method_exit();
// // restore result
// }
// if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_EXIT)) {
// // save result
// SharedRuntime::jvmpi_method_exit();
// // restore result
// }
//
// Native methods have their result stored in d_tmp and l_tmp.
// Java methods have their result stored in the expression stack.
void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, TosState state) {
// JVMTI
// Whenever JVMTI puts a thread in interp_only_mode, method
// entry/exit events are sent for that thread to track stack
// depth. If it is possible to enter interp_only_mode we add
// the code to check if the event should be sent.
if (JvmtiExport::can_post_interpreter_events()) {
Label jvmti_post_done;
lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
cmpwi(CCR0, R0, 0);
beq(CCR0, jvmti_post_done);
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit),
/*check_exceptions=*/false);
align(32, 12);
bind(jvmti_post_done);
}
}
// Convert the current TOP_IJAVA_FRAME into a PARENT_IJAVA_FRAME
// (using parent_frame_resize) and push a new interpreter
// TOP_IJAVA_FRAME (using frame_size).
void InterpreterMacroAssembler::push_interpreter_frame(Register top_frame_size, Register parent_frame_resize,
Register tmp1, Register tmp2, Register tmp3,
Register tmp4, Register pc) {
assert_different_registers(top_frame_size, parent_frame_resize, tmp1, tmp2, tmp3, tmp4);
ld(tmp1, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
mr(tmp2/*top_frame_sp*/, R1_SP);
// Move initial_caller_sp.
ld(tmp4, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
neg(parent_frame_resize, parent_frame_resize);
resize_frame(parent_frame_resize/*-parent_frame_resize*/, tmp3);
// Set LR in new parent frame.
std(tmp1, _abi(lr), R1_SP);
// Set top_frame_sp info for new parent frame.
std(tmp2, _parent_ijava_frame_abi(top_frame_sp), R1_SP);
std(tmp4, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
// Push new TOP_IJAVA_FRAME.
push_frame(top_frame_size, tmp2);
get_PC_trash_LR(tmp3);
std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
// Used for non-initial callers by unextended_sp().
std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
}
// Pop the topmost TOP_IJAVA_FRAME and convert the previous
// PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME.
void InterpreterMacroAssembler::pop_interpreter_frame(Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
assert_different_registers(tmp1, tmp2, tmp3, tmp4);
ld(tmp1/*caller's sp*/, _abi(callers_sp), R1_SP);
ld(tmp3, _abi(lr), tmp1);
ld(tmp4, _parent_ijava_frame_abi(initial_caller_sp), tmp1);
ld(tmp2/*caller's caller's sp*/, _abi(callers_sp), tmp1);
// Merge top frame.
std(tmp2, _abi(callers_sp), R1_SP);
ld(tmp2, _parent_ijava_frame_abi(top_frame_sp), tmp1);
// Update C stack pointer to caller's top_abi.
resize_frame_absolute(tmp2/*addr*/, tmp1/*tmp*/, tmp2/*tmp*/);
// Update LR in top_frame.
std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
std(tmp4, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
// Store the top-frame stack-pointer for c2i adapters.
std(R1_SP, _top_ijava_frame_abi(top_frame_sp), R1_SP);
}
#ifdef CC_INTERP
// Turn state's interpreter frame into the current TOP_IJAVA_FRAME.
void InterpreterMacroAssembler::pop_interpreter_frame_to_state(Register state, Register tmp1, Register tmp2, Register tmp3) {
assert_different_registers(R14_state, R15_prev_state, tmp1, tmp2, tmp3);
if (state == R14_state) {
ld(tmp1/*state's fp*/, state_(_last_Java_fp));
ld(tmp2/*state's sp*/, state_(_last_Java_sp));
} else if (state == R15_prev_state) {
ld(tmp1/*state's fp*/, prev_state_(_last_Java_fp));
ld(tmp2/*state's sp*/, prev_state_(_last_Java_sp));
} else {
ShouldNotReachHere();
}
// Merge top frames.
std(tmp1, _abi(callers_sp), R1_SP);
// Tmp2 is new SP.
// Tmp1 is parent's SP.
resize_frame_absolute(tmp2/*addr*/, tmp1/*tmp*/, tmp2/*tmp*/);
// Update LR in top_frame.
// Must be interpreter frame.
get_PC_trash_LR(tmp3);
std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
// Used for non-initial callers by unextended_sp().
std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
}
#endif // CC_INTERP
// Set SP to initial caller's sp, but before fix the back chain.
void InterpreterMacroAssembler::resize_frame_to_initial_caller(Register tmp1, Register tmp2) {
ld(tmp1, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
ld(tmp2, _parent_ijava_frame_abi(callers_sp), R1_SP);
std(tmp2, _parent_ijava_frame_abi(callers_sp), tmp1); // Fix back chain ...
mr(R1_SP, tmp1); // ... and resize to initial caller.
}
#ifdef CC_INTERP
// Pop the current interpreter state (without popping the correspoding
// frame) and restore R14_state and R15_prev_state accordingly.
// Use prev_state_may_be_0 to indicate whether prev_state may be 0
// in order to generate an extra check before retrieving prev_state_(_prev_link).
void InterpreterMacroAssembler::pop_interpreter_state(bool prev_state_may_be_0)
{
// Move prev_state to state and restore prev_state from state_(_prev_link).
Label prev_state_is_0;
mr(R14_state, R15_prev_state);
// Don't retrieve /*state==*/prev_state_(_prev_link)
// if /*state==*/prev_state is 0.
if (prev_state_may_be_0) {
cmpdi(CCR0, R15_prev_state, 0);
beq(CCR0, prev_state_is_0);
}
ld(R15_prev_state, /*state==*/prev_state_(_prev_link));
bind(prev_state_is_0);
}
void InterpreterMacroAssembler::restore_prev_state() {
// _prev_link is private, but cInterpreter is a friend.
ld(R15_prev_state, state_(_prev_link));
}
#endif // CC_INTERP

View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
#define CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
#include "assembler_ppc.inline.hpp"
#include "interpreter/invocationCounter.hpp"
// This file specializes the assembler with interpreter-specific macros
class InterpreterMacroAssembler: public MacroAssembler {
public:
InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
void null_check_throw(Register a, int offset, Register temp_reg);
// Handy address generation macros
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
#define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method
#ifdef CC_INTERP
#define state_(field_name) in_bytes(byte_offset_of(BytecodeInterpreter, field_name)), R14_state
#define prev_state_(field_name) in_bytes(byte_offset_of(BytecodeInterpreter, field_name)), R15_prev_state
#endif
void get_method_counters(Register method, Register Rcounters, Label& skip);
void increment_invocation_counter(Register iv_be_count, Register Rtmp1, Register Rtmp2_r0);
// Object locking
void lock_object (Register lock_reg, Register obj_reg);
void unlock_object(Register lock_reg, bool check_for_exceptions = true);
// Debugging
void verify_oop(Register reg, TosState state = atos); // only if +VerifyOops && state == atos
// support for jvmdi/jvmpi
void notify_method_entry();
void notify_method_exit(bool is_native_method, TosState state);
#ifdef CC_INTERP
// Convert the current TOP_IJAVA_FRAME into a PARENT_IJAVA_FRAME
// (using parent_frame_resize) and push a new interpreter
// TOP_IJAVA_FRAME (using frame_size).
void push_interpreter_frame(Register top_frame_size, Register parent_frame_resize,
Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register pc=noreg);
// Pop the topmost TOP_IJAVA_FRAME and convert the previous
// PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME.
void pop_interpreter_frame(Register tmp1, Register tmp2, Register tmp3, Register tmp4);
// Turn state's interpreter frame into the current TOP_IJAVA_FRAME.
void pop_interpreter_frame_to_state(Register state, Register tmp1, Register tmp2, Register tmp3);
// Set SP to initial caller's sp, but before fix the back chain.
void resize_frame_to_initial_caller(Register tmp1, Register tmp2);
// Pop the current interpreter state (without popping the
// correspoding frame) and restore R14_state and R15_prev_state
// accordingly. Use prev_state_may_be_0 to indicate whether
// prev_state may be 0 in order to generate an extra check before
// retrieving prev_state_(_prev_link).
void pop_interpreter_state(bool prev_state_may_be_0);
void restore_prev_state();
#endif
};
#endif // CPU_PPC_VM_INTERP_MASM_PPC_64_HPP

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
#define CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
friend class AbstractInterpreterGenerator;
private:
address generate_abstract_entry(void);
address generate_accessor_entry(void);
address generate_Reference_get_entry(void);
#endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP

View File

@ -0,0 +1,150 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/universe.inline.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/signature.hpp"
#define __ _masm->
// Access macros for Java and C arguments.
// The first Java argument is at index -1.
#define locals_j_arg_at(index) (Interpreter::local_offset_in_bytes(index)), R18_locals
// The first C argument is at index 0.
#define sp_c_arg_at(index) ((index)*wordSize + _abi(carg_1)), R1_SP
// Implementation of SignatureHandlerGenerator
void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
Argument jni_arg(jni_offset());
Register r = jni_arg.is_register() ? jni_arg.as_register() : R0;
__ lwa(r, locals_j_arg_at(offset())); // sign extension of integer
if (DEBUG_ONLY(true ||) !jni_arg.is_register()) {
__ std(r, sp_c_arg_at(jni_arg.number()));
}
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
Argument jni_arg(jni_offset());
Register r = jni_arg.is_register() ? jni_arg.as_register() : R0;
__ ld(r, locals_j_arg_at(offset()+1)); // long resides in upper slot
if (DEBUG_ONLY(true ||) !jni_arg.is_register()) {
__ std(r, sp_c_arg_at(jni_arg.number()));
}
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
FloatRegister fp_reg = (_num_used_fp_arg_regs < 13/*max_fp_register_arguments*/)
? as_FloatRegister((_num_used_fp_arg_regs++) + F1_ARG1->encoding())
: F0;
__ lfs(fp_reg, locals_j_arg_at(offset()));
if (DEBUG_ONLY(true ||) jni_offset() > 8) {
__ stfs(fp_reg, sp_c_arg_at(jni_offset()));
}
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
FloatRegister fp_reg = (_num_used_fp_arg_regs < 13/*max_fp_register_arguments*/)
? as_FloatRegister((_num_used_fp_arg_regs++) + F1_ARG1->encoding())
: F0;
__ lfd(fp_reg, locals_j_arg_at(offset()+1));
if (DEBUG_ONLY(true ||) jni_offset() > 8) {
__ stfd(fp_reg, sp_c_arg_at(jni_offset()));
}
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
Argument jni_arg(jni_offset());
Register r = jni_arg.is_register() ? jni_arg.as_register() : R11_scratch1;
// The handle for a receiver will never be null.
bool do_NULL_check = offset() != 0 || is_static();
Label do_null;
if (do_NULL_check) {
__ ld(R0, locals_j_arg_at(offset()));
__ cmpdi(CCR0, R0, 0);
__ li(r, 0);
__ beq(CCR0, do_null);
}
__ addir(r, locals_j_arg_at(offset()));
__ bind(do_null);
if (DEBUG_ONLY(true ||) !jni_arg.is_register()) {
__ std(r, sp_c_arg_at(jni_arg.number()));
}
}
void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) {
// Emit fd for current codebuffer. Needs patching!
__ emit_fd();
// Generate code to handle arguments.
iterate(fingerprint);
// Return the result handler.
__ load_const(R3_RET, AbstractInterpreter::result_handler(method()->result_type()));
__ blr();
__ flush();
}
#undef __
// Implementation of SignatureHandlerLibrary
void SignatureHandlerLibrary::pd_set_handler(address handler) {
// patch fd here.
FunctionDescriptor* fd = (FunctionDescriptor*) handler;
fd->set_entry(handler + (int)sizeof(FunctionDescriptor));
assert(fd->toc() == (address)0xcafe, "need to adjust TOC here");
}
// Access function to get the signature.
IRT_ENTRY(address, InterpreterRuntime::get_signature(JavaThread* thread, Method* method))
methodHandle m(thread, method);
assert(m->is_native(), "sanity check");
Symbol *s = m->signature();
return (address) s->base();
IRT_END
IRT_ENTRY(address, InterpreterRuntime::get_result_handler(JavaThread* thread, Method* method))
methodHandle m(thread, method);
assert(m->is_native(), "sanity check");
return AbstractInterpreter::result_handler(m->result_type());
IRT_END

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_INTERPRETERRT_PPC_HPP
#define CPU_PPC_VM_INTERPRETERRT_PPC_HPP
#include "memory/allocation.hpp"
// native method calls
class SignatureHandlerGenerator: public NativeSignatureIterator {
private:
MacroAssembler* _masm;
// number of already used floating-point argument registers
int _num_used_fp_arg_regs;
void pass_int();
void pass_long();
void pass_double();
void pass_float();
void pass_object();
public:
// Creation
SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
_masm = new MacroAssembler(buffer);
_num_used_fp_arg_regs = 0;
}
// Code generation
void generate(uint64_t fingerprint);
};
// Support for generate_slow_signature_handler.
static address get_result_handler(JavaThread* thread, Method* method);
// A function to get the signature.
static address get_signature(JavaThread* thread, Method* method);
#endif // CPU_PPC_VM_INTERPRETERRT_PPC_HPP

View File

@ -0,0 +1,801 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"
#include "oops/arrayOop.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/arguments.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/timer.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/debug.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#ifndef CC_INTERP
#error "CC_INTERP must be defined on PPC"
#endif
#define __ _masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) // nothing
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
int AbstractInterpreter::BasicType_as_index(BasicType type) {
int i = 0;
switch (type) {
case T_BOOLEAN: i = 0; break;
case T_CHAR : i = 1; break;
case T_BYTE : i = 2; break;
case T_SHORT : i = 3; break;
case T_INT : i = 4; break;
case T_LONG : i = 5; break;
case T_VOID : i = 6; break;
case T_FLOAT : i = 7; break;
case T_DOUBLE : i = 8; break;
case T_OBJECT : i = 9; break;
case T_ARRAY : i = 9; break;
default : ShouldNotReachHere();
}
assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
return i;
}
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// Slow_signature handler that respects the PPC C calling conventions.
//
// We get called by the native entry code with our output register
// area == 8. First we call InterpreterRuntime::get_result_handler
// to copy the pointer to the signature string temporarily to the
// first C-argument and to return the result_handler in
// R3_RET. Since native_entry will copy the jni-pointer to the
// first C-argument slot later on, it is OK to occupy this slot
// temporarilly. Then we copy the argument list on the java
// expression stack into native varargs format on the native stack
// and load arguments into argument registers. Integer arguments in
// the varargs vector will be sign-extended to 8 bytes.
//
// On entry:
// R3_ARG1 - intptr_t* Address of java argument list in memory.
// R15_prev_state - BytecodeInterpreter* Address of interpreter state for
// this method
// R19_method
//
// On exit (just before return instruction):
// R3_RET - contains the address of the result_handler.
// R4_ARG2 - is not updated for static methods and contains "this" otherwise.
// R5_ARG3-R10_ARG8: - When the (i-2)th Java argument is not of type float or double,
// ARGi contains this argument. Otherwise, ARGi is not updated.
// F1_ARG1-F13_ARG13 - contain the first 13 arguments of type float or double.
const int LogSizeOfTwoInstructions = 3;
// FIXME: use Argument:: GL: Argument names different numbers!
const int max_fp_register_arguments = 13;
const int max_int_register_arguments = 6; // first 2 are reserved
const Register arg_java = R21_tmp1;
const Register arg_c = R22_tmp2;
const Register signature = R23_tmp3; // is string
const Register sig_byte = R24_tmp4;
const Register fpcnt = R25_tmp5;
const Register argcnt = R26_tmp6;
const Register intSlot = R27_tmp7;
const Register target_sp = R28_tmp8;
const FloatRegister floatSlot = F0;
address entry = __ emit_fd();
__ save_LR_CR(R0);
__ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
// We use target_sp for storing arguments in the C frame.
__ mr(target_sp, R1_SP);
__ push_frame_abi112_nonvolatiles(0, R11_scratch1);
__ mr(arg_java, R3_ARG1);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_signature), R16_thread, R19_method);
// Signature is in R3_RET. Signature is callee saved.
__ mr(signature, R3_RET);
// Reload method, it may have moved.
#ifdef CC_INTERP
__ ld(R19_method, state_(_method));
#else
__ unimplemented("slow signature handler 1");
#endif
// Get the result handler.
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method);
// Reload method, it may have moved.
#ifdef CC_INTERP
__ ld(R19_method, state_(_method));
#else
__ unimplemented("slow signature handler 2");
#endif
{
Label L;
// test if static
// _access_flags._flags must be at offset 0.
// TODO PPC port: requires change in shared code.
//assert(in_bytes(AccessFlags::flags_offset()) == 0,
// "MethodOopDesc._access_flags == MethodOopDesc._access_flags._flags");
// _access_flags must be a 32 bit value.
assert(sizeof(AccessFlags) == 4, "wrong size");
__ lwa(R11_scratch1/*access_flags*/, method_(access_flags));
// testbit with condition register.
__ testbitdi(CCR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT);
__ btrue(CCR0, L);
// For non-static functions, pass "this" in R4_ARG2 and copy it
// to 2nd C-arg slot.
// We need to box the Java object here, so we use arg_java
// (address of current Java stack slot) as argument and don't
// dereference it as in case of ints, floats, etc.
__ mr(R4_ARG2, arg_java);
__ addi(arg_java, arg_java, -BytesPerWord);
__ std(R4_ARG2, _abi(carg_2), target_sp);
__ bind(L);
}
// Will be incremented directly after loop_start. argcnt=0
// corresponds to 3rd C argument.
__ li(argcnt, -1);
// arg_c points to 3rd C argument
__ addi(arg_c, target_sp, _abi(carg_3));
// no floating-point args parsed so far
__ li(fpcnt, 0);
Label move_intSlot_to_ARG, move_floatSlot_to_FARG;
Label loop_start, loop_end;
Label do_int, do_long, do_float, do_double, do_dontreachhere, do_object, do_array, do_boxed;
// signature points to '(' at entry
#ifdef ASSERT
__ lbz(sig_byte, 0, signature);
__ cmplwi(CCR0, sig_byte, '(');
__ bne(CCR0, do_dontreachhere);
#endif
__ bind(loop_start);
__ addi(argcnt, argcnt, 1);
__ lbzu(sig_byte, 1, signature);
__ cmplwi(CCR0, sig_byte, ')'); // end of signature
__ beq(CCR0, loop_end);
__ cmplwi(CCR0, sig_byte, 'B'); // byte
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'C'); // char
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'D'); // double
__ beq(CCR0, do_double);
__ cmplwi(CCR0, sig_byte, 'F'); // float
__ beq(CCR0, do_float);
__ cmplwi(CCR0, sig_byte, 'I'); // int
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'J'); // long
__ beq(CCR0, do_long);
__ cmplwi(CCR0, sig_byte, 'S'); // short
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'Z'); // boolean
__ beq(CCR0, do_int);
__ cmplwi(CCR0, sig_byte, 'L'); // object
__ beq(CCR0, do_object);
__ cmplwi(CCR0, sig_byte, '['); // array
__ beq(CCR0, do_array);
// __ cmplwi(CCR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type
// __ beq(CCR0, do_void);
__ bind(do_dontreachhere);
__ unimplemented("ShouldNotReachHere in slow_signature_handler", 120);
__ bind(do_array);
{
Label start_skip, end_skip;
__ bind(start_skip);
__ lbzu(sig_byte, 1, signature);
__ cmplwi(CCR0, sig_byte, '[');
__ beq(CCR0, start_skip); // skip further brackets
__ cmplwi(CCR0, sig_byte, '9');
__ bgt(CCR0, end_skip); // no optional size
__ cmplwi(CCR0, sig_byte, '0');
__ bge(CCR0, start_skip); // skip optional size
__ bind(end_skip);
__ cmplwi(CCR0, sig_byte, 'L');
__ beq(CCR0, do_object); // for arrays of objects, the name of the object must be skipped
__ b(do_boxed); // otherwise, go directly to do_boxed
}
__ bind(do_object);
{
Label L;
__ bind(L);
__ lbzu(sig_byte, 1, signature);
__ cmplwi(CCR0, sig_byte, ';');
__ bne(CCR0, L);
}
// Need to box the Java object here, so we use arg_java (address of
// current Java stack slot) as argument and don't dereference it as
// in case of ints, floats, etc.
Label do_null;
__ bind(do_boxed);
__ ld(R0,0, arg_java);
__ cmpdi(CCR0, R0, 0);
__ li(intSlot,0);
__ beq(CCR0, do_null);
__ mr(intSlot, arg_java);
__ bind(do_null);
__ std(intSlot, 0, arg_c);
__ addi(arg_java, arg_java, -BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
__ blt(CCR0, move_intSlot_to_ARG);
__ b(loop_start);
__ bind(do_int);
__ lwa(intSlot, 0, arg_java);
__ std(intSlot, 0, arg_c);
__ addi(arg_java, arg_java, -BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
__ blt(CCR0, move_intSlot_to_ARG);
__ b(loop_start);
__ bind(do_long);
__ ld(intSlot, -BytesPerWord, arg_java);
__ std(intSlot, 0, arg_c);
__ addi(arg_java, arg_java, - 2 * BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
__ blt(CCR0, move_intSlot_to_ARG);
__ b(loop_start);
__ bind(do_float);
__ lfs(floatSlot, 0, arg_java);
#if defined(LINUX)
__ stfs(floatSlot, 4, arg_c);
#elif defined(AIX)
__ stfs(floatSlot, 0, arg_c);
#else
#error "unknown OS"
#endif
__ addi(arg_java, arg_java, -BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
__ blt(CCR0, move_floatSlot_to_FARG);
__ b(loop_start);
__ bind(do_double);
__ lfd(floatSlot, - BytesPerWord, arg_java);
__ stfd(floatSlot, 0, arg_c);
__ addi(arg_java, arg_java, - 2 * BytesPerWord);
__ addi(arg_c, arg_c, BytesPerWord);
__ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
__ blt(CCR0, move_floatSlot_to_FARG);
__ b(loop_start);
__ bind(loop_end);
__ pop_frame();
__ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
__ restore_LR_CR(R0);
__ blr();
Label move_int_arg, move_float_arg;
__ bind(move_int_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
__ mr(R5_ARG3, intSlot); __ b(loop_start);
__ mr(R6_ARG4, intSlot); __ b(loop_start);
__ mr(R7_ARG5, intSlot); __ b(loop_start);
__ mr(R8_ARG6, intSlot); __ b(loop_start);
__ mr(R9_ARG7, intSlot); __ b(loop_start);
__ mr(R10_ARG8, intSlot); __ b(loop_start);
__ bind(move_float_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
__ fmr(F1_ARG1, floatSlot); __ b(loop_start);
__ fmr(F2_ARG2, floatSlot); __ b(loop_start);
__ fmr(F3_ARG3, floatSlot); __ b(loop_start);
__ fmr(F4_ARG4, floatSlot); __ b(loop_start);
__ fmr(F5_ARG5, floatSlot); __ b(loop_start);
__ fmr(F6_ARG6, floatSlot); __ b(loop_start);
__ fmr(F7_ARG7, floatSlot); __ b(loop_start);
__ fmr(F8_ARG8, floatSlot); __ b(loop_start);
__ fmr(F9_ARG9, floatSlot); __ b(loop_start);
__ fmr(F10_ARG10, floatSlot); __ b(loop_start);
__ fmr(F11_ARG11, floatSlot); __ b(loop_start);
__ fmr(F12_ARG12, floatSlot); __ b(loop_start);
__ fmr(F13_ARG13, floatSlot); __ b(loop_start);
__ bind(move_intSlot_to_ARG);
__ sldi(R0, argcnt, LogSizeOfTwoInstructions);
__ load_const(R11_scratch1, move_int_arg); // Label must be bound here.
__ add(R11_scratch1, R0, R11_scratch1);
__ mtctr(R11_scratch1/*branch_target*/);
__ bctr();
__ bind(move_floatSlot_to_FARG);
__ sldi(R0, fpcnt, LogSizeOfTwoInstructions);
__ addi(fpcnt, fpcnt, 1);
__ load_const(R11_scratch1, move_float_arg); // Label must be bound here.
__ add(R11_scratch1, R0, R11_scratch1);
__ mtctr(R11_scratch1/*branch_target*/);
__ bctr();
return entry;
}
address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type) {
//
// Registers alive
// R3_RET
// LR
//
// Registers updated
// R3_RET
//
Label done;
address entry = __ pc();
switch (type) {
case T_BOOLEAN:
// convert !=0 to 1
__ neg(R0, R3_RET);
__ orr(R0, R3_RET, R0);
__ srwi(R3_RET, R0, 31);
break;
case T_BYTE:
// sign extend 8 bits
__ extsb(R3_RET, R3_RET);
break;
case T_CHAR:
// zero extend 16 bits
__ clrldi(R3_RET, R3_RET, 48);
break;
case T_SHORT:
// sign extend 16 bits
__ extsh(R3_RET, R3_RET);
break;
case T_INT:
// sign extend 32 bits
__ extsw(R3_RET, R3_RET);
break;
case T_LONG:
break;
case T_OBJECT:
// unbox result if not null
__ cmpdi(CCR0, R3_RET, 0);
__ beq(CCR0, done);
__ ld(R3_RET, 0, R3_RET);
__ verify_oop(R3_RET);
break;
case T_FLOAT:
break;
case T_DOUBLE:
break;
case T_VOID:
break;
default: ShouldNotReachHere();
}
__ BIND(done);
__ blr();
return entry;
}
// Abstract method entry.
//
address InterpreterGenerator::generate_abstract_entry(void) {
address entry = __ pc();
//
// Registers alive
// R16_thread - JavaThread*
// R19_method - callee's methodOop (method to be invoked)
// R1_SP - SP prepared such that caller's outgoing args are near top
// LR - return address to caller
//
// Stack layout at this point:
//
// 0 [TOP_IJAVA_FRAME_ABI] <-- R1_SP
// alignment (optional)
// [outgoing Java arguments]
// ...
// PARENT [PARENT_IJAVA_FRAME_ABI]
// ...
//
// Can't use call_VM here because we have not set up a new
// interpreter state. Make the call to the vm and make it look like
// our caller set up the JavaFrameAnchor.
__ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
// Push a new C frame and save LR.
__ save_LR_CR(R0);
__ push_frame_abi112(0, R11_scratch1);
// This is not a leaf but we have a JavaFrameAnchor now and we will
// check (create) exceptions afterward so this is ok.
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
// Pop the C frame and restore LR.
__ pop_frame();
__ restore_LR_CR(R0);
// Reset JavaFrameAnchor from call_VM_leaf above.
__ reset_last_Java_frame();
#ifdef CC_INTERP
// Return to frame manager, it will handle the pending exception.
__ blr();
#else
Unimplemented();
#endif
return entry;
}
// Call an accessor method (assuming it is resolved, otherwise drop into
// vanilla (slow path) entry.
address InterpreterGenerator::generate_accessor_entry(void) {
if(!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods)))
return NULL;
Label Lslow_path, Lacquire;
const Register
Rclass_or_obj = R3_ARG1,
Rconst_method = R4_ARG2,
Rcodes = Rconst_method,
Rcpool_cache = R5_ARG3,
Rscratch = R11_scratch1,
Rjvmti_mode = Rscratch,
Roffset = R12_scratch2,
Rflags = R6_ARG4,
Rbtable = R7_ARG5;
static address branch_table[number_of_states];
address entry = __ pc();
// Check for safepoint:
// Ditch this, real man don't need safepoint checks.
// Also check for JVMTI mode
// Check for null obj, take slow path if so.
__ ld(Rclass_or_obj, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp));
__ lwz(Rjvmti_mode, thread_(interp_only_mode));
__ cmpdi(CCR1, Rclass_or_obj, 0);
__ cmpwi(CCR0, Rjvmti_mode, 0);
__ crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2);
__ beq(CCR0, Lslow_path); // this==null or jvmti_mode!=0
// Do 2 things in parallel:
// 1. Load the index out of the first instruction word, which looks like this:
// <0x2a><0xb4><index (2 byte, native endianess)>.
// 2. Load constant pool cache base.
__ ld(Rconst_method, in_bytes(Method::const_offset()), R19_method);
__ ld(Rcpool_cache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
__ lhz(Rcodes, in_bytes(ConstMethod::codes_offset()) + 2, Rconst_method); // Lower half of 32 bit field.
__ ld(Rcpool_cache, ConstantPool::cache_offset_in_bytes(), Rcpool_cache);
// Get the const pool entry by means of <index>.
const int codes_shift = exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord);
__ slwi(Rscratch, Rcodes, codes_shift); // (codes&0xFFFF)<<codes_shift
__ add(Rcpool_cache, Rscratch, Rcpool_cache);
// Check if cpool cache entry is resolved.
// We are resolved if the indices offset contains the current bytecode.
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
// Big Endian:
__ lbz(Rscratch, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::indices_offset()) + 7 - 2, Rcpool_cache);
__ cmpwi(CCR0, Rscratch, Bytecodes::_getfield);
__ bne(CCR0, Lslow_path);
__ isync(); // Order succeeding loads wrt. load of _indices field from cpool_cache.
// Finally, start loading the value: Get cp cache entry into regs.
__ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcpool_cache);
__ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcpool_cache);
// Following code is from templateTable::getfield_or_static
// Load pointer to branch table
__ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
// Get volatile flag
__ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // extract volatile bit
// note: sync is needed before volatile load on PPC64
// Check field type
__ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
#ifdef ASSERT
Label LFlagInvalid;
__ cmpldi(CCR0, Rflags, number_of_states);
__ bge(CCR0, LFlagInvalid);
__ ld(R9_ARG7, 0, R1_SP);
__ ld(R10_ARG8, 0, R21_sender_SP);
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
__ asm_assert_eq("backlink", 0x543);
#endif // ASSERT
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
// Load from branch table and dispatch (volatile case: one instruction ahead)
__ sldi(Rflags, Rflags, LogBytesPerWord);
__ cmpwi(CCR6, Rscratch, 1); // volatile?
__ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
__ ldx(Rbtable, Rbtable, Rflags);
__ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
__ mtctr(Rbtable);
__ bctr();
#ifdef ASSERT
__ bind(LFlagInvalid);
__ stop("got invalid flag", 0x6541);
bool all_uninitialized = true,
all_initialized = true;
for (int i = 0; i<number_of_states; ++i) {
all_uninitialized = all_uninitialized && (branch_table[i] == NULL);
all_initialized = all_initialized && (branch_table[i] != NULL);
}
assert(all_uninitialized != all_initialized, "consistency"); // either or
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
__ stop("unexpected type", 0x6551);
#endif
if (branch_table[itos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[itos] = __ pc(); // non-volatile_entry point
__ lwax(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[ltos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[ltos] = __ pc(); // non-volatile_entry point
__ ldx(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[btos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[btos] = __ pc(); // non-volatile_entry point
__ lbzx(R3_RET, Rclass_or_obj, Roffset);
__ extsb(R3_RET, R3_RET);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[ctos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[ctos] = __ pc(); // non-volatile_entry point
__ lhzx(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[stos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[stos] = __ pc(); // non-volatile_entry point
__ lhax(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[atos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ sync(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[atos] = __ pc(); // non-volatile_entry point
__ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
__ verify_oop(R3_RET);
//__ dcbt(R3_RET); // prefetch
__ beq(CCR6, Lacquire);
__ blr();
}
__ align(32, 12);
__ bind(Lacquire);
__ twi_0(R3_RET);
__ isync(); // acquire
__ blr();
#ifdef ASSERT
for (int i = 0; i<number_of_states; ++i) {
assert(branch_table[i], "accessor_entry initialization");
//tty->print_cr("accessor_entry: branch_table[%d] = 0x%llx (opcode 0x%llx)", i, branch_table[i], *((unsigned int*)branch_table[i]));
}
#endif
__ bind(Lslow_path);
assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
__ load_const_optimized(Rscratch, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
__ mtctr(Rscratch);
__ bctr();
__ flush();
return entry;
}
// Interpreter intrinsic for WeakReference.get().
// 1. Don't push a full blown frame and go on dispatching, but fetch the value
// into R8 and return quickly
// 2. If G1 is active we *must* execute this intrinsic for corrrectness:
// It contains a GC barrier which puts the reference into the satb buffer
// to indicate that someone holds a strong reference to the object the
// weak ref points to!
address InterpreterGenerator::generate_Reference_get_entry(void) {
// Code: _aload_0, _getfield, _areturn
// parameter size = 1
//
// The code that gets generated by this routine is split into 2 parts:
// 1. the "intrinsified" code for G1 (or any SATB based GC),
// 2. the slow path - which is an expansion of the regular method entry.
//
// Notes:
// * In the G1 code we do not check whether we need to block for
// a safepoint. If G1 is enabled then we must execute the specialized
// code for Reference.get (except when the Reference object is null)
// so that we can log the value in the referent field with an SATB
// update buffer.
// If the code for the getfield template is modified so that the
// G1 pre-barrier code is executed when the current method is
// Reference.get() then going through the normal method entry
// will be fine.
// * The G1 code can, however, check the receiver object (the instance
// of java.lang.Reference) and jump to the slow path if null. If the
// Reference object is null then we obviously cannot fetch the referent
// and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE.
//
// This code is based on generate_accessor_enty.
address entry = __ pc();
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
if (UseG1GC) {
Label slow_path;
// Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH);
// In the G1 code we don't check if we need to reach a safepoint. We
// continue and the thread will safepoint at the next bytecode dispatch.
// If the receiver is null then it is OK to jump to the slow path.
__ ld(R3_RET, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp)); // get receiver
// Check if receiver == NULL and go the slow path.
__ cmpdi(CCR0, R3_RET, 0);
__ beq(CCR0, slow_path);
// Load the value of the referent field.
__ load_heap_oop(R3_RET, referent_offset, R3_RET);
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer. Note with
// these parameters the pre-barrier does not generate
// the load of the previous value.
// Restore caller sp for c2i case.
#ifdef ASSERT
__ ld(R9_ARG7, 0, R1_SP);
__ ld(R10_ARG8, 0, R21_sender_SP);
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
__ asm_assert_eq("backlink", 0x544);
#endif // ASSERT
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
__ g1_write_barrier_pre(noreg, // obj
noreg, // offset
R3_RET, // pre_val
R11_scratch1, // tmp
R12_scratch2, // tmp
true); // needs_frame
__ blr();
// Generate regular method entry.
__ bind(slow_path);
assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
__ load_const_optimized(R11_scratch1, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
__ mtctr(R11_scratch1);
__ bctr();
__ flush();
return entry;
} else {
return generate_accessor_entry();
}
}
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
// the days we had adapter frames. When we deoptimize a situation where a
// compiled caller calls a compiled caller will have registers it expects
// to survive the call to the callee. If we deoptimize the callee the only
// way we can restore these registers is to have the oldest interpreter
// frame that we create restore these values. That is what this routine
// will accomplish.
// At the moment we have modified c2 to not have any callee save registers
// so this problem does not exist and this routine is just a place holder.
assert(f->is_interpreted_frame(), "must be interpreted");
}

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_INTERPRETER_PPC_HPP
#define CPU_PPC_VM_INTERPRETER_PPC_HPP
public:
// Stack index relative to tos (which points at value)
static int expr_index_at(int i) {
return stackElementWords * i;
}
// Already negated by c++ interpreter
static int local_index_at(int i) {
assert(i <= 0, "local direction already negated");
return stackElementWords * i;
}
#endif // CPU_PPC_VM_INTERPRETER_PPC_PP

View File

@ -0,0 +1,82 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP
#define CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP
#ifndef CC_INTERP
#error "CC_INTERP must be defined on PPC64"
#endif
public:
// Each arch must define reset, save, restore
// These are used by objects that only care about:
// 1 - initializing a new state (thread creation, javaCalls)
// 2 - saving a current state (javaCalls)
// 3 - restoring an old state (javaCalls)
inline void clear(void) {
// clearing _last_Java_sp must be first
_last_Java_sp = NULL;
// fence?
OrderAccess::release();
_last_Java_pc = NULL;
}
inline void set(intptr_t* sp, address pc) {
_last_Java_pc = pc;
OrderAccess::release();
_last_Java_sp = sp;
}
void copy(JavaFrameAnchor* src) {
// In order to make sure the transition state is valid for "this".
// We must clear _last_Java_sp before copying the rest of the new data.
//
// Hack Alert: Temporary bugfix for 4717480/4721647
// To act like previous version (pd_cache_state) don't NULL _last_Java_sp
// unless the value is changing.
if (_last_Java_sp != src->_last_Java_sp) {
_last_Java_sp = NULL;
OrderAccess::release();
}
_last_Java_pc = src->_last_Java_pc;
// Must be last so profiler will always see valid frame if has_last_frame() is true.
OrderAccess::release();
_last_Java_sp = src->_last_Java_sp;
}
// Always walkable.
bool walkable(void) { return true; }
// Never any thing to do since we are always walkable and can find address of return addresses.
void make_walkable(JavaThread* thread) { }
intptr_t* last_Java_sp(void) const { return _last_Java_sp; }
address last_Java_pc(void) { return _last_Java_pc; }
void set_last_Java_sp(intptr_t* sp) { OrderAccess::release(); _last_Java_sp = sp; }
#endif // CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP

View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "assembler_ppc.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/safepoint.hpp"
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
// we don't have fast jni accessors.
return (address) -1;
}
address JNI_FastGetField::generate_fast_get_boolean_field() {
return generate_fast_get_int_field0(T_BOOLEAN);
}
address JNI_FastGetField::generate_fast_get_byte_field() {
return generate_fast_get_int_field0(T_BYTE);
}
address JNI_FastGetField::generate_fast_get_char_field() {
return generate_fast_get_int_field0(T_CHAR);
}
address JNI_FastGetField::generate_fast_get_short_field() {
return generate_fast_get_int_field0(T_SHORT);
}
address JNI_FastGetField::generate_fast_get_int_field() {
return generate_fast_get_int_field0(T_INT);
}
address JNI_FastGetField::generate_fast_get_long_field() {
// we don't have fast jni accessors.
return (address) -1;
}
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
// e don't have fast jni accessors.
return (address) -1;
}
address JNI_FastGetField::generate_fast_get_float_field() {
return generate_fast_get_float_field0(T_FLOAT);
}
address JNI_FastGetField::generate_fast_get_double_field() {
return generate_fast_get_float_field0(T_DOUBLE);
}

View File

@ -0,0 +1,110 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_JNITYPES_PPC_HPP
#define CPU_PPC_VM_JNITYPES_PPC_HPP
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
#include "prims/jni.h"
// This file holds platform-dependent routines used to write primitive
// jni types to the array of arguments passed into JavaCalls::call.
class JNITypes : AllStatic {
// These functions write a java primitive type (in native format) to
// a java stack slot array to be passed as an argument to
// JavaCalls:calls. I.e., they are functionally 'push' operations
// if they have a 'pos' formal parameter. Note that jlong's and
// jdouble's are written _in reverse_ of the order in which they
// appear in the interpreter stack. This is because call stubs (see
// stubGenerator_sparc.cpp) reverse the argument list constructed by
// JavaCallArguments (see javaCalls.hpp).
private:
#ifndef PPC64
#error "ppc32 support currently not implemented!!!"
#endif // PPC64
public:
// Ints are stored in native format in one JavaCallArgument slot at *to.
static inline void put_int(jint from, intptr_t *to) { *(jint *)(to + 0 ) = from; }
static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; }
static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; }
// Longs are stored in native format in one JavaCallArgument slot at
// *(to+1).
static inline void put_long(jlong from, intptr_t *to) {
*(jlong*) (to + 1) = from;
}
static inline void put_long(jlong from, intptr_t *to, int& pos) {
*(jlong*) (to + 1 + pos) = from;
pos += 2;
}
static inline void put_long(jlong *from, intptr_t *to, int& pos) {
*(jlong*) (to + 1 + pos) = *from;
pos += 2;
}
// Oops are stored in native format in one JavaCallArgument slot at *to.
static inline void put_obj(oop from, intptr_t *to) { *(oop *)(to + 0 ) = from; }
static inline void put_obj(oop from, intptr_t *to, int& pos) { *(oop *)(to + pos++) = from; }
static inline void put_obj(oop *from, intptr_t *to, int& pos) { *(oop *)(to + pos++) = *from; }
// Floats are stored in native format in one JavaCallArgument slot at *to.
static inline void put_float(jfloat from, intptr_t *to) { *(jfloat *)(to + 0 ) = from; }
static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; }
static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; }
// Doubles are stored in native word format in one JavaCallArgument
// slot at *(to+1).
static inline void put_double(jdouble from, intptr_t *to) {
*(jdouble*) (to + 1) = from;
}
static inline void put_double(jdouble from, intptr_t *to, int& pos) {
*(jdouble*) (to + 1 + pos) = from;
pos += 2;
}
static inline void put_double(jdouble *from, intptr_t *to, int& pos) {
*(jdouble*) (to + 1 + pos) = *from;
pos += 2;
}
// The get_xxx routines, on the other hand, actually _do_ fetch
// java primitive types from the interpreter stack.
// No need to worry about alignment on Intel.
static inline jint get_int (intptr_t *from) { return *(jint *) from; }
static inline jlong get_long (intptr_t *from) { return *(jlong *) (from + 1); }
static inline oop get_obj (intptr_t *from) { return *(oop *) from; }
static inline jfloat get_float (intptr_t *from) { return *(jfloat *) from; }
static inline jdouble get_double(intptr_t *from) { return *(jdouble *)(from + 1); }
};
#endif // CPU_PPC_VM_JNITYPES_PPC_HPP

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_PPC_VM_JNI_PPC_H
#define CPU_PPC_VM_JNI_PPC_H
// Note: please do not change these without also changing jni_md.h in the JDK
// repository
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
#define JNIEXPORT __attribute__((visibility("default")))
#define JNIIMPORT __attribute__((visibility("default")))
#else
#define JNIEXPORT
#define JNIIMPORT
#endif
#define JNICALL
typedef int jint;
#if defined(_LP64)
typedef long jlong;
#else
typedef long long jlong;
#endif
typedef signed char jbyte;
#endif // CPU_PPC_VM_JNI_PPC_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,676 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
#define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
#include "asm/assembler.hpp"
// MacroAssembler extends Assembler by a few frequently used macros.
class ciTypeArray;
class MacroAssembler: public Assembler {
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
//
// Optimized instruction emitters
//
inline static int largeoffset_si16_si16_hi(int si31) { return (si31 + (1<<15)) >> 16; }
inline static int largeoffset_si16_si16_lo(int si31) { return si31 - (((si31 + (1<<15)) >> 16) << 16); }
// load d = *[a+si31]
// Emits several instructions if the offset is not encodable in one instruction.
void ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop);
void ld_largeoffset (Register d, int si31, Register a, int emit_filler_nop);
inline static bool is_ld_largeoffset(address a);
inline static int get_ld_largeoffset_offset(address a);
inline void round_to(Register r, int modulus);
// Load/store with type given by parameter.
void load_sized_value( Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes, bool is_signed);
void store_sized_value(Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes);
// Move register if destination register and target register are different
inline void mr_if_needed(Register rd, Register rs);
inline void fmr_if_needed(FloatRegister rd, FloatRegister rs);
// This is dedicated for emitting scheduled mach nodes. For better
// readability of the ad file I put it here.
// Endgroups are not needed if
// - the scheduler is off
// - the scheduler found that there is a natural group end, in that
// case it reduced the size of the instruction used in the test
// yielding 'needed'.
inline void endgroup_if_needed(bool needed);
// Memory barriers.
inline void membar(int bits);
inline void release();
inline void acquire();
inline void fence();
// nop padding
void align(int modulus, int max = 252, int rem = 0);
//
// Constants, loading constants, TOC support
//
// Address of the global TOC.
inline static address global_toc();
// Offset of given address to the global TOC.
inline static int offset_to_global_toc(const address addr);
// Address of TOC of the current method.
inline address method_toc();
// Offset of given address to TOC of the current method.
inline int offset_to_method_toc(const address addr);
// Global TOC.
void calculate_address_from_global_toc(Register dst, address addr,
bool hi16 = true, bool lo16 = true,
bool add_relocation = true, bool emit_dummy_addr = false);
inline void calculate_address_from_global_toc_hi16only(Register dst, address addr) {
calculate_address_from_global_toc(dst, addr, true, false);
};
inline void calculate_address_from_global_toc_lo16only(Register dst, address addr) {
calculate_address_from_global_toc(dst, addr, false, true);
};
inline static bool is_calculate_address_from_global_toc_at(address a, address bound);
static int patch_calculate_address_from_global_toc_at(address a, address addr, address bound);
static address get_address_of_calculate_address_from_global_toc_at(address a, address addr);
#ifdef _LP64
// Patch narrow oop constant.
inline static bool is_set_narrow_oop(address a, address bound);
static int patch_set_narrow_oop(address a, address bound, narrowOop data);
static narrowOop get_narrow_oop(address a, address bound);
#endif
inline static bool is_load_const_at(address a);
// Emits an oop const to the constant pool, loads the constant, and
// sets a relocation info with address current_pc.
void load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc);
void load_toc_from_toc(Register dst, AddressLiteral& a, Register toc) {
assert(dst == R2_TOC, "base register must be TOC");
load_const_from_method_toc(dst, a, toc);
}
static bool is_load_const_from_method_toc_at(address a);
static int get_offset_of_load_const_from_method_toc_at(address a);
// Get the 64 bit constant from a `load_const' sequence.
static long get_const(address load_const);
// Patch the 64 bit constant of a `load_const' sequence. This is a
// low level procedure. It neither flushes the instruction cache nor
// is it atomic.
static void patch_const(address load_const, long x);
// Metadata in code that we have to keep track of.
AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
AddressLiteral constant_metadata_address(Metadata* obj); // find_index
// Oops used directly in compiled code are stored in the constant pool,
// and loaded from there.
// Allocate new entry for oop in constant pool. Generate relocation.
AddressLiteral allocate_oop_address(jobject obj);
// Find oop obj in constant pool. Return relocation with it's index.
AddressLiteral constant_oop_address(jobject obj);
// Find oop in constant pool and emit instructions to load it.
// Uses constant_oop_address.
inline void set_oop_constant(jobject obj, Register d);
// Same as load_address.
inline void set_oop (AddressLiteral obj_addr, Register d);
// Read runtime constant: Issue load if constant not yet established,
// else use real constant.
virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
Register tmp,
int offset);
//
// branch, jump
//
inline void pd_patch_instruction(address branch, address target);
NOT_PRODUCT(static void pd_print_patched_instruction(address branch);)
// Conditional far branch for destinations encodable in 24+2 bits.
// Same interface as bc, e.g. no inverse boint-field.
enum {
bc_far_optimize_not = 0,
bc_far_optimize_on_relocate = 1
};
// optimize: flag for telling the conditional far branch to optimize
// itself when relocated.
void bc_far(int boint, int biint, Label& dest, int optimize);
// Relocation of conditional far branches.
static bool is_bc_far_at(address instruction_addr);
static address get_dest_of_bc_far_at(address instruction_addr);
static void set_dest_of_bc_far_at(address instruction_addr, address dest);
private:
static bool inline is_bc_far_variant1_at(address instruction_addr);
static bool inline is_bc_far_variant2_at(address instruction_addr);
static bool inline is_bc_far_variant3_at(address instruction_addr);
public:
// Convenience bc_far versions.
inline void blt_far(ConditionRegister crx, Label& L, int optimize);
inline void bgt_far(ConditionRegister crx, Label& L, int optimize);
inline void beq_far(ConditionRegister crx, Label& L, int optimize);
inline void bso_far(ConditionRegister crx, Label& L, int optimize);
inline void bge_far(ConditionRegister crx, Label& L, int optimize);
inline void ble_far(ConditionRegister crx, Label& L, int optimize);
inline void bne_far(ConditionRegister crx, Label& L, int optimize);
inline void bns_far(ConditionRegister crx, Label& L, int optimize);
// Emit, identify and patch a NOT mt-safe patchable 64 bit absolute call/jump.
private:
enum {
bxx64_patchable_instruction_count = (2/*load_codecache_const*/ + 3/*5load_const*/ + 1/*mtctr*/ + 1/*bctrl*/),
bxx64_patchable_size = bxx64_patchable_instruction_count * BytesPerInstWord,
bxx64_patchable_ret_addr_offset = bxx64_patchable_size
};
void bxx64_patchable(address target, relocInfo::relocType rt, bool link);
static bool is_bxx64_patchable_at( address instruction_addr, bool link);
// Does the instruction use a pc-relative encoding of the destination?
static bool is_bxx64_patchable_pcrelative_at( address instruction_addr, bool link);
static bool is_bxx64_patchable_variant1_at( address instruction_addr, bool link);
// Load destination relative to global toc.
static bool is_bxx64_patchable_variant1b_at( address instruction_addr, bool link);
static bool is_bxx64_patchable_variant2_at( address instruction_addr, bool link);
static void set_dest_of_bxx64_patchable_at( address instruction_addr, address target, bool link);
static address get_dest_of_bxx64_patchable_at(address instruction_addr, bool link);
public:
// call
enum {
bl64_patchable_instruction_count = bxx64_patchable_instruction_count,
bl64_patchable_size = bxx64_patchable_size,
bl64_patchable_ret_addr_offset = bxx64_patchable_ret_addr_offset
};
inline void bl64_patchable(address target, relocInfo::relocType rt) {
bxx64_patchable(target, rt, /*link=*/true);
}
inline static bool is_bl64_patchable_at(address instruction_addr) {
return is_bxx64_patchable_at(instruction_addr, /*link=*/true);
}
inline static bool is_bl64_patchable_pcrelative_at(address instruction_addr) {
return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/true);
}
inline static void set_dest_of_bl64_patchable_at(address instruction_addr, address target) {
set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/true);
}
inline static address get_dest_of_bl64_patchable_at(address instruction_addr) {
return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/true);
}
// jump
enum {
b64_patchable_instruction_count = bxx64_patchable_instruction_count,
b64_patchable_size = bxx64_patchable_size,
};
inline void b64_patchable(address target, relocInfo::relocType rt) {
bxx64_patchable(target, rt, /*link=*/false);
}
inline static bool is_b64_patchable_at(address instruction_addr) {
return is_bxx64_patchable_at(instruction_addr, /*link=*/false);
}
inline static bool is_b64_patchable_pcrelative_at(address instruction_addr) {
return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/false);
}
inline static void set_dest_of_b64_patchable_at(address instruction_addr, address target) {
set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/false);
}
inline static address get_dest_of_b64_patchable_at(address instruction_addr) {
return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/false);
}
//
// Support for frame handling
//
// some ABI-related functions
void save_nonvolatile_gprs( Register dst_base, int offset);
void restore_nonvolatile_gprs(Register src_base, int offset);
void save_volatile_gprs( Register dst_base, int offset);
void restore_volatile_gprs(Register src_base, int offset);
void save_LR_CR( Register tmp); // tmp contains LR on return.
void restore_LR_CR(Register tmp);
// Get current PC using bl-next-instruction trick.
address get_PC_trash_LR(Register result);
// Resize current frame either relatively wrt to current SP or absolute.
void resize_frame(Register offset, Register tmp);
void resize_frame(int offset, Register tmp);
void resize_frame_absolute(Register addr, Register tmp1, Register tmp2);
// Push a frame of size bytes.
void push_frame(Register bytes, Register tmp);
// Push a frame of size `bytes'. No abi space provided.
void push_frame(unsigned int bytes, Register tmp);
// Push a frame of size `bytes' plus abi112 on top.
void push_frame_abi112(unsigned int bytes, Register tmp);
// Setup up a new C frame with a spill area for non-volatile GPRs and additional
// space for local variables
void push_frame_abi112_nonvolatiles(unsigned int bytes, Register tmp);
// pop current C frame
void pop_frame();
//
// Calls
//
private:
address _last_calls_return_pc;
// Generic version of a call to C function via a function descriptor
// with variable support for C calling conventions (TOC, ENV, etc.).
// updates and returns _last_calls_return_pc.
address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee);
public:
// Get the pc where the last call will return to. returns _last_calls_return_pc.
inline address last_calls_return_pc();
// Call a C function via a function descriptor and use full C
// calling conventions. Updates and returns _last_calls_return_pc.
address call_c(Register function_descriptor);
// For tail calls: only branch, don't link, so callee returns to caller of this function.
address call_c_and_return_to_caller(Register function_descriptor);
address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt);
address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt,
Register toc);
protected:
// It is imperative that all calls into the VM are handled via the
// call_VM macros. They make sure that the stack linkage is setup
// correctly. call_VM's correspond to ENTRY/ENTRY_X entry points
// while call_VM_leaf's correspond to LEAF entry points.
//
// This is the base routine called by the different versions of
// call_VM. The interpreter may customize this version by overriding
// it for its purposes (e.g., to save/restore additional registers
// when doing a VM call).
//
// If no last_java_sp is specified (noreg) then SP will be used instead.
virtual void call_VM_base(
// where an oop-result ends up if any; use noreg otherwise
Register oop_result,
// to set up last_Java_frame in stubs; use noreg otherwise
Register last_java_sp,
// the entry point
address entry_point,
// flag which indicates if exception should be checked
bool check_exception = true
);
// Support for VM calls. This is the base routine called by the
// different versions of call_VM_leaf. The interpreter may customize
// this version by overriding it for its purposes (e.g., to
// save/restore additional registers when doing a VM call).
void call_VM_leaf_base(address entry_point);
public:
// Call into the VM.
// Passes the thread pointer (in R3_ARG1) as a prepended argument.
// Makes sure oop return values are visible to the GC.
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
void call_VM_leaf(address entry_point);
void call_VM_leaf(address entry_point, Register arg_1);
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
// Call a stub function via a function descriptor, but don't save
// TOC before call, don't setup TOC and ENV for call, and don't
// restore TOC after call. Updates and returns _last_calls_return_pc.
inline address call_stub(Register function_entry);
inline void call_stub_and_return_to(Register function_entry, Register return_pc);
//
// Java utilities
//
// Read from the polling page, its address is already in a register.
inline void load_from_polling_page(Register polling_page_address, int offset = 0);
// Check whether instruction is a read access to the polling page
// which was emitted by load_from_polling_page(..).
static bool is_load_from_polling_page(int instruction, void* ucontext/*may be NULL*/,
address* polling_address_ptr = NULL);
// Check whether instruction is a write access to the memory
// serialization page realized by one of the instructions stw, stwu,
// stwx, or stwux.
static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext);
// Support for NULL-checks
//
// Generates code that causes a NULL OS exception if the content of reg is NULL.
// If the accessed location is M[reg + offset] and the offset is known, provide the
// offset. No explicit code generation is needed if the offset is within a certain
// range (0 <= offset <= page_size).
// Stack overflow checking
void bang_stack_with_offset(int offset);
// If instruction is a stack bang of the form ld, stdu, or
// stdux, return the banged address. Otherwise, return 0.
static address get_stack_bang_address(int instruction, void* ucontext);
// Atomics
// CmpxchgX sets condition register to cmpX(current, compare).
// (flag == ne) => (dest_current_value != compare_value), (!swapped)
// (flag == eq) => (dest_current_value == compare_value), ( swapped)
static inline bool cmpxchgx_hint_acquire_lock() { return true; }
// The stxcx will probably not be succeeded by a releasing store.
static inline bool cmpxchgx_hint_release_lock() { return false; }
static inline bool cmpxchgx_hint_atomic_update() { return false; }
// Cmpxchg semantics
enum {
MemBarNone = 0,
MemBarRel = 1,
MemBarAcq = 2,
MemBarFenceAfter = 4 // use powers of 2
};
void cmpxchgw(ConditionRegister flag,
Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
int semantics, bool cmpxchgx_hint = false,
Register int_flag_success = noreg, bool contention_hint = false);
void cmpxchgd(ConditionRegister flag,
Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
int semantics, bool cmpxchgx_hint = false,
Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false);
// interface method calling
void lookup_interface_method(Register recv_klass,
Register intf_klass,
RegisterOrConstant itable_index,
Register method_result,
Register temp_reg, Register temp2_reg,
Label& no_such_interface);
// virtual method calling
void lookup_virtual_method(Register recv_klass,
RegisterOrConstant vtable_index,
Register method_result);
// Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow.
// One of the three labels can be NULL, meaning take the fall-through.
// If super_check_offset is -1, the value is loaded up from super_klass.
// No registers are killed, except temp_reg and temp2_reg.
// If super_check_offset is not -1, temp2_reg is not used and can be noreg.
void check_klass_subtype_fast_path(Register sub_klass,
Register super_klass,
Register temp1_reg,
Register temp2_reg,
Label& L_success,
Label& L_failure);
// The rest of the type check; must be wired to a corresponding fast path.
// It does not repeat the fast path logic, so don't use it standalone.
// The temp_reg can be noreg, if no temps are available.
// It can also be sub_klass or super_klass, meaning it's OK to kill that one.
// Updates the sub's secondary super cache as necessary.
void check_klass_subtype_slow_path(Register sub_klass,
Register super_klass,
Register temp1_reg,
Register temp2_reg,
Label* L_success = NULL,
Register result_reg = noreg);
// Simplified, combined version, good for typical uses.
// Falls through on failure.
void check_klass_subtype(Register sub_klass,
Register super_klass,
Register temp1_reg,
Register temp2_reg,
Label& L_success);
// Method handle support (JSR 292).
void check_method_handle_type(Register mtype_reg, Register mh_reg, Register temp_reg, Label& wrong_method_type);
RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0);
// Biased locking support
// Upon entry,obj_reg must contain the target object, and mark_reg
// must contain the target object's header.
// Destroys mark_reg if an attempt is made to bias an anonymously
// biased lock. In this case a failure will go either to the slow
// case or fall through with the notEqual condition code set with
// the expectation that the slow case in the runtime will be called.
// In the fall-through case where the CAS-based lock is done,
// mark_reg is not destroyed.
void biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, Register mark_reg, Register temp_reg,
Register temp2_reg, Label& done, Label* slow_case = NULL);
// Upon entry, the base register of mark_addr must contain the oop.
// Destroys temp_reg.
// If allow_delay_slot_filling is set to true, the next instruction
// emitted after this one will go in an annulled delay slot if the
// biased locking exit case failed.
void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done);
void compiler_fast_lock_object( ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
// Support for serializing memory accesses between threads
void serialize_memory(Register thread, Register tmp1, Register tmp2);
// GC barrier support.
void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
#ifndef SERIALGC
// General G1 pre-barrier generator.
void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
Register Rtmp1, Register Rtmp2, bool needs_frame = false);
// General G1 post-barrier generator
void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
#endif // SERIALGC
// Support for managing the JavaThread pointer (i.e.; the reference to
// thread-local information).
// Support for last Java frame (but use call_VM instead where possible):
// access R16_thread->last_Java_sp.
void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
void reset_last_Java_frame(void);
void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1);
// Read vm result from thread: oop_result = R16_thread->result;
void get_vm_result (Register oop_result);
void get_vm_result_2(Register metadata_result);
static bool needs_explicit_null_check(intptr_t offset);
// Trap-instruction-based checks.
// Range checks can be distinguished from zero checks as they check 32 bit,
// zero checks all 64 bits (tw, td).
inline void trap_null_check(Register a, trap_to_bits cmp = traptoEqual);
static bool is_trap_null_check(int x) {
return is_tdi(x, traptoEqual, -1/*any reg*/, 0) ||
is_tdi(x, traptoGreaterThanUnsigned, -1/*any reg*/, 0);
}
inline void trap_zombie_not_entrant();
static bool is_trap_zombie_not_entrant(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 1); }
inline void trap_should_not_reach_here();
static bool is_trap_should_not_reach_here(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 2); }
inline void trap_ic_miss_check(Register a, Register b);
static bool is_trap_ic_miss_check(int x) {
return is_td(x, traptoGreaterThanUnsigned | traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/);
}
// Implicit or explicit null check, jumps to static address exception_entry.
inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry);
// Check accessed object for null. Use SIGTRAP-based null checks on AIX.
inline void load_with_trap_null_check(Register d, int si16, Register s1);
// Load heap oop and decompress. Loaded oop may not be null.
inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg);
// Null allowed.
inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
// Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
inline void encode_heap_oop_not_null(Register d);
inline void decode_heap_oop_not_null(Register d);
// Null allowed.
inline void decode_heap_oop(Register d);
// Load/Store klass oop from klass field. Compress.
void load_klass(Register dst, Register src);
void load_klass_with_trap_null_check(Register dst, Register src);
void store_klass(Register dst_oop, Register klass, Register tmp = R0);
static int instr_size_for_decode_klass_not_null();
void decode_klass_not_null(Register dst, Register src = noreg);
void encode_klass_not_null(Register dst, Register src = noreg);
// Load common heap base into register.
void reinit_heapbase(Register d, Register tmp = noreg);
// SIGTRAP-based range checks for arrays.
inline void trap_range_check_l(Register a, Register b);
inline void trap_range_check_l(Register a, int si16);
static bool is_trap_range_check_l(int x) {
return (is_tw (x, traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
is_twi(x, traptoLessThanUnsigned, -1/*any reg*/) );
}
inline void trap_range_check_le(Register a, int si16);
static bool is_trap_range_check_le(int x) {
return is_twi(x, traptoEqual | traptoLessThanUnsigned, -1/*any reg*/);
}
inline void trap_range_check_g(Register a, int si16);
static bool is_trap_range_check_g(int x) {
return is_twi(x, traptoGreaterThanUnsigned, -1/*any reg*/);
}
inline void trap_range_check_ge(Register a, Register b);
inline void trap_range_check_ge(Register a, int si16);
static bool is_trap_range_check_ge(int x) {
return (is_tw (x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
is_twi(x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/) );
}
static bool is_trap_range_check(int x) {
return is_trap_range_check_l(x) || is_trap_range_check_le(x) ||
is_trap_range_check_g(x) || is_trap_range_check_ge(x);
}
void clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp = R0);
// Needle of length 1.
void string_indexof_1(Register result, Register haystack, Register haycnt,
Register needle, jchar needleChar,
Register tmp1, Register tmp2);
// General indexof, eventually with constant needle length.
void string_indexof(Register result, Register haystack, Register haycnt,
Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
Register tmp1, Register tmp2, Register tmp3, Register tmp4);
void string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
Register result_reg, Register tmp_reg);
void char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
Register tmp5_reg);
void char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
Register tmp1_reg, Register tmp2_reg);
//
// Debugging
//
// assert on cr0
void asm_assert(bool check_equal, const char* msg, int id);
void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); }
void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); }
private:
void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base,
const char* msg, int id);
public:
void asm_assert_mem8_is_zero(int mem_offset, Register mem_base, const char* msg, int id) {
asm_assert_mems_zero(true, 8, mem_offset, mem_base, msg, id);
}
void asm_assert_mem8_isnot_zero(int mem_offset, Register mem_base, const char* msg, int id) {
asm_assert_mems_zero(false, 8, mem_offset, mem_base, msg, id);
}
// Verify R16_thread contents.
void verify_thread();
// Emit code to verify that reg contains a valid oop if +VerifyOops is set.
void verify_oop(Register reg, const char* s = "broken oop");
// TODO: verify method and klass metadata (compare against vptr?)
void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
private:
enum {
stop_stop = 0,
stop_untested = 1,
stop_unimplemented = 2,
stop_shouldnotreachhere = 3,
stop_end = 4
};
void stop(int type, const char* msg, int id);
public:
// Prints msg, dumps registers and stops execution.
void stop (const char* msg = "", int id = 0) { stop(stop_stop, msg, id); }
void untested (const char* msg = "", int id = 0) { stop(stop_untested, msg, id); }
void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented, msg, id); }
void should_not_reach_here() { stop(stop_shouldnotreachhere, "", -1); }
void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
};
#endif // CPU_PPC_VM_MACROASSEMBLER_PPC_HPP

View File

@ -0,0 +1,388 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
#define CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/codeBuffer.hpp"
#include "code/codeCache.hpp"
inline bool MacroAssembler::is_ld_largeoffset(address a) {
const int inst1 = *(int *)a;
const int inst2 = *(int *)(a+4);
return (is_ld(inst1)) ||
(is_addis(inst1) && is_ld(inst2) && inv_ra_field(inst2) == inv_rt_field(inst1));
}
inline int MacroAssembler::get_ld_largeoffset_offset(address a) {
assert(MacroAssembler::is_ld_largeoffset(a), "must be ld with large offset");
const int inst1 = *(int *)a;
if (is_ld(inst1)) {
return inv_d1_field(inst1);
} else {
const int inst2 = *(int *)(a+4);
return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2);
}
}
inline void MacroAssembler::round_to(Register r, int modulus) {
assert(is_power_of_2_long((jlong)modulus), "must be power of 2");
addi(r, r, modulus-1);
clrrdi(r, r, log2_long((jlong)modulus));
}
// Move register if destination register and target register are different.
inline void MacroAssembler::mr_if_needed(Register rd, Register rs) {
if (rs != rd) mr(rd, rs);
}
inline void MacroAssembler::fmr_if_needed(FloatRegister rd, FloatRegister rs) {
if (rs != rd) fmr(rd, rs);
}
inline void MacroAssembler::endgroup_if_needed(bool needed) {
if (needed) {
endgroup();
}
}
inline void MacroAssembler::membar(int bits) {
// TODO: use elemental_membar(bits) for Power 8 and disable optimization of acquire-release
// (Matcher::post_membar_release where we use PPC64_ONLY(xop == Op_MemBarRelease ||))
if (bits & StoreLoad) sync(); else lwsync();
}
inline void MacroAssembler::release() { membar(LoadStore | StoreStore); }
inline void MacroAssembler::acquire() { membar(LoadLoad | LoadStore); }
inline void MacroAssembler::fence() { membar(LoadLoad | LoadStore | StoreLoad | StoreStore); }
// Address of the global TOC.
inline address MacroAssembler::global_toc() {
return CodeCache::low_bound();
}
// Offset of given address to the global TOC.
inline int MacroAssembler::offset_to_global_toc(const address addr) {
intptr_t offset = (intptr_t)addr - (intptr_t)MacroAssembler::global_toc();
assert(Assembler::is_simm((long)offset, 31) && offset >= 0, "must be in range");
return (int)offset;
}
// Address of current method's TOC.
inline address MacroAssembler::method_toc() {
return code()->consts()->start();
}
// Offset of given address to current method's TOC.
inline int MacroAssembler::offset_to_method_toc(address addr) {
intptr_t offset = (intptr_t)addr - (intptr_t)method_toc();
assert(is_simm((long)offset, 31) && offset >= 0, "must be in range");
return (int)offset;
}
inline bool MacroAssembler::is_calculate_address_from_global_toc_at(address a, address bound) {
const address inst2_addr = a;
const int inst2 = *(int *) a;
// The relocation points to the second instruction, the addi.
if (!is_addi(inst2)) return false;
// The addi reads and writes the same register dst.
const int dst = inv_rt_field(inst2);
if (inv_ra_field(inst2) != dst) return false;
// Now, find the preceding addis which writes to dst.
int inst1 = 0;
address inst1_addr = inst2_addr - BytesPerInstWord;
while (inst1_addr >= bound) {
inst1 = *(int *) inst1_addr;
if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
// stop, found the addis which writes dst
break;
}
inst1_addr -= BytesPerInstWord;
}
if (!(inst1 == 0 || inv_ra_field(inst1) == 29 /* R29 */)) return false;
return is_addis(inst1);
}
#ifdef _LP64
// Detect narrow oop constants.
inline bool MacroAssembler::is_set_narrow_oop(address a, address bound) {
const address inst2_addr = a;
const int inst2 = *(int *)a;
// The relocation points to the second instruction, the ori.
if (!is_ori(inst2)) return false;
// The ori reads and writes the same register dst.
const int dst = inv_rta_field(inst2);
if (inv_rs_field(inst2) != dst) return false;
// Now, find the preceding addis which writes to dst.
int inst1 = 0;
address inst1_addr = inst2_addr - BytesPerInstWord;
while (inst1_addr >= bound) {
inst1 = *(int *) inst1_addr;
if (is_lis(inst1) && inv_rs_field(inst1) == dst) return true;
inst1_addr -= BytesPerInstWord;
}
return false;
}
#endif
inline bool MacroAssembler::is_load_const_at(address a) {
const int* p_inst = (int *) a;
bool b = is_lis(*p_inst++);
if (is_ori(*p_inst)) {
p_inst++;
b = b && is_rldicr(*p_inst++); // TODO: could be made more precise: `sldi'!
b = b && is_oris(*p_inst++);
b = b && is_ori(*p_inst);
} else if (is_lis(*p_inst)) {
p_inst++;
b = b && is_ori(*p_inst++);
b = b && is_ori(*p_inst);
// TODO: could enhance reliability by adding is_insrdi
} else return false;
return b;
}
inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
set_oop(constant_oop_address(obj), d);
}
inline void MacroAssembler::set_oop(AddressLiteral obj_addr, Register d) {
assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
load_const(d, obj_addr);
}
inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
jint& stub_inst = *(jint*) branch;
stub_inst = patched_branch(target - branch, stub_inst, 0);
}
// Relocation of conditional far branches.
inline bool MacroAssembler::is_bc_far_variant1_at(address instruction_addr) {
// Variant 1, the 1st instruction contains the destination address:
//
// bcxx DEST
// endgroup
//
const int instruction_1 = *(int*)(instruction_addr);
const int instruction_2 = *(int*)(instruction_addr + 4);
return is_bcxx(instruction_1) &&
(inv_bd_field(instruction_1, (intptr_t)instruction_addr) != (intptr_t)(instruction_addr + 2*4)) &&
is_endgroup(instruction_2);
}
// Relocation of conditional far branches.
inline bool MacroAssembler::is_bc_far_variant2_at(address instruction_addr) {
// Variant 2, the 2nd instruction contains the destination address:
//
// b!cxx SKIP
// bxx DEST
// SKIP:
//
const int instruction_1 = *(int*)(instruction_addr);
const int instruction_2 = *(int*)(instruction_addr + 4);
return is_bcxx(instruction_1) &&
(inv_bd_field(instruction_1, (intptr_t)instruction_addr) == (intptr_t)(instruction_addr + 2*4)) &&
is_bxx(instruction_2);
}
// Relocation for conditional branches
inline bool MacroAssembler::is_bc_far_variant3_at(address instruction_addr) {
// Variant 3, far cond branch to the next instruction, already patched to nops:
//
// nop
// endgroup
// SKIP/DEST:
//
const int instruction_1 = *(int*)(instruction_addr);
const int instruction_2 = *(int*)(instruction_addr + 4);
return is_nop(instruction_1) &&
is_endgroup(instruction_2);
}
// Convenience bc_far versions
inline void MacroAssembler::blt_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, less), L, optimize); }
inline void MacroAssembler::bgt_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, greater), L, optimize); }
inline void MacroAssembler::beq_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, equal), L, optimize); }
inline void MacroAssembler::bso_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, summary_overflow), L, optimize); }
inline void MacroAssembler::bge_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, less), L, optimize); }
inline void MacroAssembler::ble_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, greater), L, optimize); }
inline void MacroAssembler::bne_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, equal), L, optimize); }
inline void MacroAssembler::bns_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, summary_overflow), L, optimize); }
inline address MacroAssembler::call_stub(Register function_entry) {
mtctr(function_entry);
bctrl();
return pc();
}
inline void MacroAssembler::call_stub_and_return_to(Register function_entry, Register return_pc) {
assert_different_registers(function_entry, return_pc);
mtlr(return_pc);
mtctr(function_entry);
bctr();
}
// Get the pc where the last emitted call will return to.
inline address MacroAssembler::last_calls_return_pc() {
return _last_calls_return_pc;
}
// Read from the polling page, its address is already in a register.
inline void MacroAssembler::load_from_polling_page(Register polling_page_address, int offset) {
ld(R0, offset, polling_page_address);
}
// Trap-instruction-based checks.
inline void MacroAssembler::trap_null_check(Register a, trap_to_bits cmp) {
assert(TrapBasedNullChecks, "sanity");
tdi(cmp, a/*reg a*/, 0);
}
inline void MacroAssembler::trap_zombie_not_entrant() {
tdi(traptoUnconditional, 0/*reg 0*/, 1);
}
inline void MacroAssembler::trap_should_not_reach_here() {
tdi_unchecked(traptoUnconditional, 0/*reg 0*/, 2);
}
inline void MacroAssembler::trap_ic_miss_check(Register a, Register b) {
td(traptoGreaterThanUnsigned | traptoLessThanUnsigned, a, b);
}
// Do an explicit null check if access to a+offset will not raise a SIGSEGV.
// Either issue a trap instruction that raises SIGTRAP, or do a compare that
// branches to exception_entry.
// No support for compressed oops (base page of heap). Does not distinguish
// loads and stores.
inline void MacroAssembler::null_check_throw(Register a, int offset, Register temp_reg,
address exception_entry) {
if (!ImplicitNullChecks || needs_explicit_null_check(offset) || !os::zero_page_read_protected()) {
if (TrapBasedNullChecks) {
assert(UseSIGTRAP, "sanity");
trap_null_check(a);
} else {
Label ok;
cmpdi(CCR0, a, 0);
bne(CCR0, ok);
load_const_optimized(temp_reg, exception_entry);
mtctr(temp_reg);
bctr();
bind(ok);
}
}
}
inline void MacroAssembler::load_with_trap_null_check(Register d, int si16, Register s1) {
if (!os::zero_page_read_protected()) {
if (TrapBasedNullChecks) {
trap_null_check(s1);
}
}
ld(d, si16, s1);
}
inline void MacroAssembler::load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1) {
if (UseCompressedOops) {
lwz(d, offs, s1);
// Attention: no null check here!
decode_heap_oop_not_null(d);
} else {
ld(d, offs, s1);
}
}
inline void MacroAssembler::load_heap_oop(Register d, RegisterOrConstant offs, Register s1) {
if (UseCompressedOops) {
lwz(d, offs, s1);
decode_heap_oop(d);
} else {
ld(d, offs, s1);
}
}
inline void MacroAssembler::encode_heap_oop_not_null(Register d) {
if (Universe::narrow_oop_base() != NULL) {
sub(d, d, R30);
}
if (Universe::narrow_oop_shift() != 0) {
srdi(d, d, LogMinObjAlignmentInBytes);
}
}
inline void MacroAssembler::decode_heap_oop_not_null(Register d) {
if (Universe::narrow_oop_shift() != 0) {
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
sldi(d, d, LogMinObjAlignmentInBytes);
}
if (Universe::narrow_oop_base() != NULL) {
add(d, d, R30);
}
}
inline void MacroAssembler::decode_heap_oop(Register d) {
Label isNull;
if (Universe::narrow_oop_base() != NULL) {
cmpwi(CCR0, d, 0);
beq(CCR0, isNull);
}
if (Universe::narrow_oop_shift() != 0) {
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
sldi(d, d, LogMinObjAlignmentInBytes);
}
if (Universe::narrow_oop_base() != NULL) {
add(d, d, R30);
}
bind(isNull);
}
// SIGTRAP-based range checks for arrays.
inline void MacroAssembler::trap_range_check_l(Register a, Register b) {
tw (traptoLessThanUnsigned, a/*reg a*/, b/*reg b*/);
}
inline void MacroAssembler::trap_range_check_l(Register a, int si16) {
twi(traptoLessThanUnsigned, a/*reg a*/, si16);
}
inline void MacroAssembler::trap_range_check_le(Register a, int si16) {
twi(traptoEqual | traptoLessThanUnsigned, a/*reg a*/, si16);
}
inline void MacroAssembler::trap_range_check_g(Register a, int si16) {
twi(traptoGreaterThanUnsigned, a/*reg a*/, si16);
}
inline void MacroAssembler::trap_range_check_ge(Register a, Register b) {
tw (traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, b/*reg b*/);
}
inline void MacroAssembler::trap_range_check_ge(Register a, int si16) {
twi(traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, si16);
}
#endif // CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP

View File

@ -0,0 +1,61 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "asm/codeBuffer.hpp"
#include "memory/metaspaceShared.hpp"
// Generate the self-patching vtable method:
//
// This method will be called (as any other Klass virtual method) with
// the Klass itself as the first argument. Example:
//
// oop obj;
// int size = obj->klass()->klass_part()->oop_size(this);
//
// for which the virtual method call is Klass::oop_size();
//
// The dummy method is called with the Klass object as the first
// operand, and an object as the second argument.
//
//=====================================================================
// All of the dummy methods in the vtable are essentially identical,
// differing only by an ordinal constant, and they bear no releationship
// to the original method which the caller intended. Also, there needs
// to be 'vtbl_list_size' instances of the vtable in order to
// differentiate between the 'vtable_list_size' original Klass objects.
void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
void** vtable,
char** md_top,
char* md_end,
char** mc_top,
char* mc_end) {
Unimplemented();
}

View File

@ -0,0 +1,543 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "prims/methodHandles.hpp"
#define __ _masm->
#ifdef CC_INTERP
#define EXCEPTION_ENTRY StubRoutines::throw_NullPointerException_at_call_entry()
#else
#define EXCEPTION_ENTRY Interpreter::throw_NullPointerException_entry()
#endif
#ifdef PRODUCT
#define BLOCK_COMMENT(str) // nothing
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
inline static RegisterOrConstant constant(int value) {
return RegisterOrConstant(value);
}
void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) {
if (VerifyMethodHandles)
verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class), temp_reg, temp2_reg,
"MH argument is a Class");
__ ld(klass_reg, java_lang_Class::klass_offset_in_bytes(), klass_reg);
}
#ifdef ASSERT
static int check_nonzero(const char* xname, int x) {
assert(x != 0, err_msg("%s should be nonzero", xname));
return x;
}
#define NONZERO(x) check_nonzero(#x, x)
#else //ASSERT
#define NONZERO(x) (x)
#endif //ASSERT
#ifdef ASSERT
void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj_reg, SystemDictionary::WKID klass_id,
Register temp_reg, Register temp2_reg,
const char* error_message) {
Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
Label L_ok, L_bad;
BLOCK_COMMENT("verify_klass {");
__ verify_oop(obj_reg);
__ cmpdi(CCR0, obj_reg, 0);
__ beq(CCR0, L_bad);
__ load_klass(temp_reg, obj_reg);
__ load_const_optimized(temp2_reg, (address) klass_addr);
__ ld(temp2_reg, 0, temp2_reg);
__ cmpd(CCR0, temp_reg, temp2_reg);
__ beq(CCR0, L_ok);
__ ld(temp_reg, klass->super_check_offset(), temp_reg);
__ cmpd(CCR0, temp_reg, temp2_reg);
__ beq(CCR0, L_ok);
__ BIND(L_bad);
__ stop(error_message);
__ BIND(L_ok);
BLOCK_COMMENT("} verify_klass");
}
void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {
Label L;
BLOCK_COMMENT("verify_ref_kind {");
__ load_sized_value(temp, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes()), member_reg,
sizeof(u4), /*is_signed*/ false);
// assert(sizeof(u4) == sizeof(java.lang.invoke.MemberName.flags), "");
__ srwi( temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
__ andi(temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
__ cmpwi(CCR1, temp, ref_kind);
__ beq(CCR1, L);
{ char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
if (ref_kind == JVM_REF_invokeVirtual ||
ref_kind == JVM_REF_invokeSpecial)
// could do this for all ref_kinds, but would explode assembly code size
trace_method_handle(_masm, buf);
__ stop(buf);
}
BLOCK_COMMENT("} verify_ref_kind");
__ BIND(L);
}
#endif // ASSERT
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp,
bool for_compiler_entry) {
assert(method == R19_method, "interpreter calling convention");
assert_different_registers(method, target, temp);
if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
Label run_compiled_code;
// JVMTI events, such as single-stepping, are implemented partly by avoiding running
// compiled code in threads for which the event is enabled. Check here for
// interp_only_mode if these events CAN be enabled.
__ verify_thread();
__ lwz(temp, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
__ cmplwi(CCR0, temp, 0);
__ beq(CCR0, run_compiled_code);
__ ld(target, in_bytes(Method::interpreter_entry_offset()), R19_method);
__ mtctr(target);
__ bctr();
__ BIND(run_compiled_code);
}
const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
Method::from_interpreted_offset();
__ ld(target, in_bytes(entry_offset), R19_method);
__ mtctr(target);
__ bctr();
}
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
Register recv, Register method_temp,
Register temp2, Register temp3,
bool for_compiler_entry) {
BLOCK_COMMENT("jump_to_lambda_form {");
// This is the initial entry point of a lazy method handle.
// After type checking, it picks up the invoker from the LambdaForm.
assert_different_registers(recv, method_temp, temp2); // temp3 is only passed on
assert(method_temp == R19_method, "required register for loading method");
// Load the invoker, as MH -> MH.form -> LF.vmentry
__ verify_oop(recv);
__ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), recv);
__ verify_oop(method_temp);
__ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp);
__ verify_oop(method_temp);
// the following assumes that a Method* is normally compressed in the vmtarget field:
__ ld(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), method_temp);
if (VerifyMethodHandles && !for_compiler_entry) {
// make sure recv is already on stack
__ ld(temp2, in_bytes(Method::const_offset()), method_temp);
__ load_sized_value(temp2, in_bytes(ConstMethod::size_of_parameters_offset()), temp2,
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(ConstMethod::_size_of_parameters), "");
Label L;
__ ld(temp2, __ argument_offset(temp2, temp2, 0), CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp));
__ cmpd(CCR1, temp2, recv);
__ beq(CCR1, L);
__ stop("receiver not on stack");
__ BIND(L);
}
jump_from_method_handle(_masm, method_temp, temp2, temp3, for_compiler_entry);
BLOCK_COMMENT("} jump_to_lambda_form");
}
// Code generation
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
vmIntrinsics::ID iid) {
const bool not_for_compiler_entry = false; // this is the interpreter entry
assert(is_signature_polymorphic(iid), "expected invoke iid");
if (iid == vmIntrinsics::_invokeGeneric ||
iid == vmIntrinsics::_compiledLambdaForm) {
// Perhaps surprisingly, the symbolic references visible to Java are not directly used.
// They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
// They all allow an appendix argument.
__ stop("Should not reach here"); // empty stubs make SG sick
return NULL;
}
Register argbase = CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp); // parameter (preserved)
Register argslot = R3;
Register temp1 = R6;
Register param_size = R7;
// here's where control starts out:
__ align(CodeEntryAlignment);
address entry_point = __ pc();
if (VerifyMethodHandles) {
Label L;
BLOCK_COMMENT("verify_intrinsic_id {");
__ load_sized_value(temp1, Method::intrinsic_id_offset_in_bytes(), R19_method,
sizeof(u1), /*is_signed*/ false);
// assert(sizeof(u1) == sizeof(Method::_intrinsic_id), "");
__ cmpwi(CCR1, temp1, (int) iid);
__ beq(CCR1, L);
if (iid == vmIntrinsics::_linkToVirtual ||
iid == vmIntrinsics::_linkToSpecial) {
// could do this for all kinds, but would explode assembly code size
trace_method_handle(_masm, "bad Method*:intrinsic_id");
}
__ stop("bad Method*::intrinsic_id");
__ BIND(L);
BLOCK_COMMENT("} verify_intrinsic_id");
}
// First task: Find out how big the argument list is.
int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
__ ld(param_size, in_bytes(Method::const_offset()), R19_method);
__ load_sized_value(param_size, in_bytes(ConstMethod::size_of_parameters_offset()), param_size,
sizeof(u2), /*is_signed*/ false);
// assert(sizeof(u2) == sizeof(ConstMethod::_size_of_parameters), "");
} else {
DEBUG_ONLY(param_size = noreg);
}
Register tmp_mh = noreg;
if (!is_signature_polymorphic_static(iid)) {
__ ld(tmp_mh = temp1, __ argument_offset(param_size, param_size, 0), argbase);
DEBUG_ONLY(param_size = noreg);
}
if (TraceMethodHandles) {
if (tmp_mh != noreg)
__ mr(R23_method_handle, tmp_mh); // make stub happy
trace_method_handle_interpreter_entry(_masm, iid);
}
if (iid == vmIntrinsics::_invokeBasic) {
generate_method_handle_dispatch(_masm, iid, tmp_mh, noreg, not_for_compiler_entry);
} else {
// Adjust argument list by popping the trailing MemberName argument.
Register tmp_recv = noreg;
if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
// Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
__ ld(tmp_recv = temp1, __ argument_offset(param_size, param_size, 0), argbase);
DEBUG_ONLY(param_size = noreg);
}
Register R19_member = R19_method; // MemberName ptr; incoming method ptr is dead now
__ ld(R19_member, RegisterOrConstant((intptr_t)8), argbase);
__ add(argbase, Interpreter::stackElementSize, argbase);
generate_method_handle_dispatch(_masm, iid, tmp_recv, R19_member, not_for_compiler_entry);
}
return entry_point;
}
void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
vmIntrinsics::ID iid,
Register receiver_reg,
Register member_reg,
bool for_compiler_entry) {
assert(is_signature_polymorphic(iid), "expected invoke iid");
Register temp1 = (for_compiler_entry ? R25_tmp5 : R7);
Register temp2 = (for_compiler_entry ? R22_tmp2 : R8);
Register temp3 = (for_compiler_entry ? R23_tmp3 : R9);
Register temp4 = (for_compiler_entry ? R24_tmp4 : R10);
if (receiver_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, receiver_reg);
if (member_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, member_reg);
if (iid == vmIntrinsics::_invokeBasic) {
// indirect through MH.form.vmentry.vmtarget
jump_to_lambda_form(_masm, receiver_reg, R19_method, temp1, temp2, for_compiler_entry);
} else {
// The method is a member invoker used by direct method handles.
if (VerifyMethodHandles) {
// make sure the trailing argument really is a MemberName (caller responsibility)
verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(MemberName_klass),
temp1, temp2,
"MemberName required for invokeVirtual etc.");
}
Register temp1_recv_klass = temp1;
if (iid != vmIntrinsics::_linkToStatic) {
__ verify_oop(receiver_reg);
if (iid == vmIntrinsics::_linkToSpecial) {
// Don't actually load the klass; just null-check the receiver.
__ null_check_throw(receiver_reg, -1, temp1, EXCEPTION_ENTRY);
} else {
// load receiver klass itself
__ null_check_throw(receiver_reg, oopDesc::klass_offset_in_bytes(), temp1, EXCEPTION_ENTRY);
__ load_klass(temp1_recv_klass, receiver_reg);
__ verify_klass_ptr(temp1_recv_klass);
}
BLOCK_COMMENT("check_receiver {");
// The receiver for the MemberName must be in receiver_reg.
// Check the receiver against the MemberName.clazz
if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
// Did not load it above...
__ load_klass(temp1_recv_klass, receiver_reg);
__ verify_klass_ptr(temp1_recv_klass);
}
if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
Label L_ok;
Register temp2_defc = temp2;
__ load_heap_oop_not_null(temp2_defc, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg);
load_klass_from_Class(_masm, temp2_defc, temp3, temp4);
__ verify_klass_ptr(temp2_defc);
__ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok);
// If we get here, the type check failed!
__ stop("receiver class disagrees with MemberName.clazz");
__ BIND(L_ok);
}
BLOCK_COMMENT("} check_receiver");
}
if (iid == vmIntrinsics::_linkToSpecial ||
iid == vmIntrinsics::_linkToStatic) {
DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass
}
// Live registers at this point:
// member_reg - MemberName that was the trailing argument
// temp1_recv_klass - klass of stacked receiver, if needed
// O5_savedSP - interpreter linkage (if interpreted)
// O0..O5 - compiler arguments (if compiled)
Label L_incompatible_class_change_error;
switch (iid) {
case vmIntrinsics::_linkToSpecial:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2);
}
__ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg);
break;
case vmIntrinsics::_linkToStatic:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2);
}
__ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg);
break;
case vmIntrinsics::_linkToVirtual:
{
// same as TemplateTable::invokevirtual,
// minus the CP setup and profiling:
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp2);
}
// pick out the vtable index from the MemberName, and then we can discard it:
Register temp2_index = temp2;
__ ld(temp2_index, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()), member_reg);
if (VerifyMethodHandles) {
Label L_index_ok;
__ cmpdi(CCR1, temp2_index, 0);
__ bge(CCR1, L_index_ok);
__ stop("no virtual index");
__ BIND(L_index_ok);
}
// Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget
// at this point. And VerifyMethodHandles has already checked clazz, if needed.
// get target Method* & entry point
__ lookup_virtual_method(temp1_recv_klass, temp2_index, R19_method);
break;
}
case vmIntrinsics::_linkToInterface:
{
// same as TemplateTable::invokeinterface
// (minus the CP setup and profiling, with different argument motion)
if (VerifyMethodHandles) {
verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp2);
}
Register temp2_intf = temp2;
__ load_heap_oop_not_null(temp2_intf, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg);
load_klass_from_Class(_masm, temp2_intf, temp3, temp4);
__ verify_klass_ptr(temp2_intf);
Register vtable_index = R19_method;
__ ld(vtable_index, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()), member_reg);
if (VerifyMethodHandles) {
Label L_index_ok;
__ cmpdi(CCR1, vtable_index, 0);
__ bge(CCR1, L_index_ok);
__ stop("invalid vtable index for MH.invokeInterface");
__ BIND(L_index_ok);
}
// given intf, index, and recv klass, dispatch to the implementation method
__ lookup_interface_method(temp1_recv_klass, temp2_intf,
// note: next two args must be the same:
vtable_index, R19_method,
temp3, temp4,
L_incompatible_class_change_error);
break;
}
default:
fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
break;
}
// Live at this point:
// R19_method
// O5_savedSP (if interpreted)
// After figuring out which concrete method to call, jump into it.
// Note that this works in the interpreter with no data motion.
// But the compiled version will require that rcx_recv be shifted out.
__ verify_method_ptr(R19_method);
jump_from_method_handle(_masm, R19_method, temp1, temp2, for_compiler_entry);
if (iid == vmIntrinsics::_linkToInterface) {
__ BIND(L_incompatible_class_change_error);
__ load_const_optimized(temp1, StubRoutines::throw_IncompatibleClassChangeError_entry());
__ mtctr(temp1);
__ bctr();
}
}
}
#ifndef PRODUCT
void trace_method_handle_stub(const char* adaptername,
oopDesc* mh,
intptr_t* entry_sp,
intptr_t* saved_regs) {
bool has_mh = (strstr(adaptername, "/static") == NULL &&
strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH
const char* mh_reg_name = has_mh ? "R23_method_handle" : "G23";
tty->print_cr("MH %s %s="INTPTR_FORMAT " sp=" INTPTR_FORMAT,
adaptername, mh_reg_name, (intptr_t) mh, entry_sp);
if (Verbose) {
tty->print_cr("Registers:");
const int abi_offset = frame::abi_112_size / 8;
for (int i = R3->encoding(); i <= R12->encoding(); i++) {
Register r = as_Register(i);
int count = i - R3->encoding();
// The registers are stored in reverse order on the stack (by save_volatile_gprs(R1_SP, abi_112_size)).
tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[abi_offset + count]);
if ((count + 1) % 4 == 0) {
tty->cr();
} else {
tty->print(", ");
}
}
tty->cr();
{
// dumping last frame with frame::describe
JavaThread* p = JavaThread::active();
ResourceMark rm;
PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
FrameValues values;
// Note: We want to allow trace_method_handle from any call site.
// While trace_method_handle creates a frame, it may be entered
// without a PC on the stack top (e.g. not just after a call).
// Walking that frame could lead to failures due to that invalid PC.
// => carefully detect that frame when doing the stack walking
// Current C frame
frame cur_frame = os::current_frame();
// Robust search of trace_calling_frame (independant of inlining).
// Assumes saved_regs comes from a pusha in the trace_calling_frame.
assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
while (trace_calling_frame.fp() < saved_regs) {
trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
}
// Safely create a frame and call frame::describe.
intptr_t *dump_sp = trace_calling_frame.sender_sp();
frame dump_frame = frame(dump_sp);
dump_frame.describe(values, 1);
values.describe(-1, saved_regs, "raw top of stack");
tty->print_cr("Stack layout:");
values.print(p);
}
if (has_mh && mh->is_oop()) {
mh->print();
if (java_lang_invoke_MethodHandle::is_instance(mh)) {
if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
java_lang_invoke_MethodHandle::form(mh)->print();
}
}
}
}
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
if (!TraceMethodHandles) return;
BLOCK_COMMENT("trace_method_handle {");
int nbytes_save = 10 * 8; // 10 volatile gprs
__ save_LR_CR(R0);
__ mr(R0, R1_SP); // saved_sp
assert(Assembler::is_simm(-nbytes_save, 16), "Overwriting R0");
// push_frame_abi112 only uses R0 if nbytes_save is wider than 16 bit
__ push_frame_abi112(nbytes_save, R0);
__ save_volatile_gprs(R1_SP, frame::abi_112_size); // Except R0.
__ load_const(R3_ARG1, (address)adaptername);
__ mr(R4_ARG2, R23_method_handle);
__ mr(R5_ARG3, R0); // saved_sp
__ mr(R6_ARG4, R1_SP);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub));
__ restore_volatile_gprs(R1_SP, 112); // Except R0.
__ pop_frame();
__ restore_LR_CR(R0);
BLOCK_COMMENT("} trace_method_handle");
}
#endif // PRODUCT

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// Platform-specific definitions for method handles.
// These definitions are inlined into class MethodHandles.
// Adapters
//static unsigned int adapter_code_size() {
// return 32*K DEBUG_ONLY(+ 16*K) + (TraceMethodHandles ? 16*K : 0) + (VerifyMethodHandles ? 32*K : 0);
//}
enum /* platform_dependent_constants */ {
adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 25000)) LP64_ONLY(32000 DEBUG_ONLY(+ 150000))
};
// Additional helper methods for MethodHandles code generation:
public:
static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg);
static void verify_klass(MacroAssembler* _masm,
Register obj_reg, SystemDictionary::WKID klass_id,
Register temp_reg, Register temp2_reg,
const char* error_message = "wrong klass") NOT_DEBUG_RETURN;
static void verify_method_handle(MacroAssembler* _masm, Register mh_reg,
Register temp_reg, Register temp2_reg) {
Unimplemented();
}
static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN;
// Similar to InterpreterMacroAssembler::jump_from_interpreted.
// Takes care of special dispatch from single stepping too.
static void jump_from_method_handle(MacroAssembler* _masm, Register method,
Register temp, Register temp2,
bool for_compiler_entry);
static void jump_to_lambda_form(MacroAssembler* _masm,
Register recv, Register method_temp,
Register temp2, Register temp3,
bool for_compiler_entry);

View File

@ -0,0 +1,391 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/ostream.hpp"
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
// We use an illtrap for marking a method as not_entrant or zombie iff !UseSIGTRAP
// Work around a C++ compiler bug which changes 'this'
bool NativeInstruction::is_sigill_zombie_not_entrant_at(address addr) {
assert(!UseSIGTRAP, "precondition");
if (*(int*)addr != 0 /*illtrap*/) return false;
CodeBlob* cb = CodeCache::find_blob_unsafe(addr);
if (cb == NULL || !cb->is_nmethod()) return false;
nmethod *nm = (nmethod *)cb;
// This method is not_entrant or zombie iff the illtrap instruction is
// located at the verified entry point.
return nm->verified_entry_point() == addr;
}
#ifdef ASSERT
void NativeInstruction::verify() {
// Make sure code pattern is actually an instruction address.
address addr = addr_at(0);
if (addr == 0 || ((intptr_t)addr & 3) != 0) {
fatal("not an instruction address");
}
}
#endif // ASSERT
// Extract call destination from a NativeCall. The call might use a trampoline stub.
address NativeCall::destination() const {
address addr = (address)this;
address destination = Assembler::bxx_destination(addr);
// Do we use a trampoline stub for this call?
CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie.
assert(cb && cb->is_nmethod(), "sanity");
nmethod *nm = (nmethod *)cb;
if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
// Yes we do, so get the destination from the trampoline stub.
const address trampoline_stub_addr = destination;
destination = NativeCallTrampolineStub_at(trampoline_stub_addr)->destination(nm);
}
return destination;
}
// Similar to replace_mt_safe, but just changes the destination. The
// important thing is that free-running threads are able to execute this
// call instruction at all times. Thus, the displacement field must be
// instruction-word-aligned.
//
// Used in the runtime linkage of calls; see class CompiledIC.
//
// Add parameter assert_lock to switch off assertion
// during code generation, where no patching lock is needed.
void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
assert(!assert_lock ||
(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()),
"concurrent code patching");
ResourceMark rm;
int code_size = 1 * BytesPerInstWord;
address addr_call = addr_at(0);
assert(MacroAssembler::is_bl(*(int*)addr_call), "unexpected code at call-site");
CodeBuffer cb(addr_call, code_size + 1);
MacroAssembler* a = new MacroAssembler(&cb);
// Patch the call.
if (ReoptimizeCallSequences &&
a->is_within_range_of_b(dest, addr_call)) {
a->bl(dest);
} else {
address trampoline_stub_addr = get_trampoline();
// We did not find a trampoline stub because the current codeblob
// does not provide this information. The branch will be patched
// later during a final fixup, when all necessary information is
// available.
if (trampoline_stub_addr == 0)
return;
// Patch the constant in the call's trampoline stub.
NativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
a->bl(trampoline_stub_addr);
}
ICache::ppc64_flush_icache_bytes(addr_call, code_size);
}
address NativeCall::get_trampoline() {
address call_addr = addr_at(0);
CodeBlob *code = CodeCache::find_blob(call_addr);
assert(code != NULL, "Could not find the containing code blob");
// There are no relocations available when the code gets relocated
// because of CodeBuffer expansion.
if (code->relocation_size() == 0)
return NULL;
address bl_destination = Assembler::bxx_destination(call_addr);
if (code->content_contains(bl_destination) &&
is_NativeCallTrampolineStub_at(bl_destination))
return bl_destination;
// If the codeBlob is not a nmethod, this is because we get here from the
// CodeBlob constructor, which is called within the nmethod constructor.
return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
}
#ifdef ASSERT
void NativeCall::verify() {
address addr = addr_at(0);
if (!NativeCall::is_call_at(addr)) {
tty->print_cr("not a NativeCall at " PTR_FORMAT, addr);
// TODO: PPC port: Disassembler::decode(addr - 20, addr + 20, tty);
fatal(err_msg("not a NativeCall at " PTR_FORMAT, addr));
}
}
#endif // ASSERT
#ifdef ASSERT
void NativeFarCall::verify() {
address addr = addr_at(0);
NativeInstruction::verify();
if (!NativeFarCall::is_far_call_at(addr)) {
tty->print_cr("not a NativeFarCall at " PTR_FORMAT, addr);
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
fatal(err_msg("not a NativeFarCall at " PTR_FORMAT, addr));
}
}
#endif // ASSERT
address NativeMovConstReg::next_instruction_address() const {
#ifdef ASSERT
CodeBlob* nm = CodeCache::find_blob(instruction_address());
assert(!MacroAssembler::is_set_narrow_oop(addr_at(0), nm->content_begin()), "Should not patch narrow oop here");
#endif
if (MacroAssembler::is_load_const_from_method_toc_at(addr_at(0))) {
return addr_at(load_const_from_method_toc_instruction_size);
} else {
return addr_at(load_const_instruction_size);
}
}
intptr_t NativeMovConstReg::data() const {
address addr = addr_at(0);
if (MacroAssembler::is_load_const_at(addr)) {
return MacroAssembler::get_const(addr);
}
CodeBlob* cb = CodeCache::find_blob_unsafe(addr);
if (MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) {
narrowOop no = (narrowOop)MacroAssembler::get_narrow_oop(addr, cb->content_begin());
return cast_from_oop<intptr_t>(oopDesc::decode_heap_oop(no));
} else {
assert(MacroAssembler::is_load_const_from_method_toc_at(addr), "must be load_const_from_pool");
address ctable = cb->content_begin();
int offset = MacroAssembler::get_offset_of_load_const_from_method_toc_at(addr);
return *(intptr_t *)(ctable + offset);
}
}
address NativeMovConstReg::set_data_plain(intptr_t data, CodeBlob *cb) {
address addr = instruction_address();
address next_address = NULL;
if (!cb) cb = CodeCache::find_blob(addr);
if (cb != NULL && MacroAssembler::is_load_const_from_method_toc_at(addr)) {
// A load from the method's TOC (ctable).
assert(cb->is_nmethod(), "must be nmethod");
const address ctable = cb->content_begin();
const int toc_offset = MacroAssembler::get_offset_of_load_const_from_method_toc_at(addr);
*(intptr_t *)(ctable + toc_offset) = data;
next_address = addr + BytesPerInstWord;
} else if (cb != NULL &&
MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) {
// A calculation relative to the global TOC.
if (MacroAssembler::get_address_of_calculate_address_from_global_toc_at(addr, cb->content_begin()) !=
(address)data) {
const int invalidated_range =
MacroAssembler::patch_calculate_address_from_global_toc_at(addr, cb->content_begin(),
(address)data);
const address start = invalidated_range < 0 ? addr + invalidated_range : addr;
// FIXME:
const int range = invalidated_range < 0 ? 4 - invalidated_range : 8;
ICache::ppc64_flush_icache_bytes(start, range);
}
next_address = addr + 1 * BytesPerInstWord;
} else if (MacroAssembler::is_load_const_at(addr)) {
// A normal 5 instruction load_const code sequence.
if (MacroAssembler::get_const(addr) != (long)data) {
// This is not mt safe, ok in methods like CodeBuffer::copy_code().
MacroAssembler::patch_const(addr, (long)data);
ICache::ppc64_flush_icache_bytes(addr, load_const_instruction_size);
}
next_address = addr + 5 * BytesPerInstWord;
} else if (MacroAssembler::is_bl(* (int*) addr)) {
// A single branch-and-link instruction.
ResourceMark rm;
const int code_size = 1 * BytesPerInstWord;
CodeBuffer cb(addr, code_size + 1);
MacroAssembler* a = new MacroAssembler(&cb);
a->bl((address) data);
ICache::ppc64_flush_icache_bytes(addr, code_size);
next_address = addr + code_size;
} else {
ShouldNotReachHere();
}
return next_address;
}
void NativeMovConstReg::set_data(intptr_t data) {
// Store the value into the instruction stream.
CodeBlob *cb = CodeCache::find_blob(instruction_address());
address next_address = set_data_plain(data, cb);
// Also store the value into an oop_Relocation cell, if any.
if (cb && cb->is_nmethod()) {
RelocIterator iter((nmethod *) cb, instruction_address(), next_address);
oop* oop_addr = NULL;
Metadata** metadata_addr = NULL;
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation *r = iter.oop_reloc();
if (oop_addr == NULL) {
oop_addr = r->oop_addr();
*oop_addr = cast_to_oop(data);
} else {
assert(oop_addr == r->oop_addr(), "must be only one set-oop here") ;
}
}
if (iter.type() == relocInfo::metadata_type) {
metadata_Relocation *r = iter.metadata_reloc();
if (metadata_addr == NULL) {
metadata_addr = r->metadata_addr();
*metadata_addr = (Metadata*)data;
} else {
assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
}
}
}
}
}
void NativeMovConstReg::set_narrow_oop(narrowOop data, CodeBlob *code /* = NULL */) {
address addr = addr_at(0);
CodeBlob* cb = (code) ? code : CodeCache::find_blob(instruction_address());
if (MacroAssembler::get_narrow_oop(addr, cb->content_begin()) == (long)data) return;
const int invalidated_range =
MacroAssembler::patch_set_narrow_oop(addr, cb->content_begin(), (long)data);
const address start = invalidated_range < 0 ? addr + invalidated_range : addr;
// FIXME:
const int range = invalidated_range < 0 ? 4 - invalidated_range : 8;
ICache::ppc64_flush_icache_bytes(start, range);
}
// Do not use an assertion here. Let clients decide whether they only
// want this when assertions are enabled.
#ifdef ASSERT
void NativeMovConstReg::verify() {
address addr = addr_at(0);
if (! MacroAssembler::is_load_const_at(addr) &&
! MacroAssembler::is_load_const_from_method_toc_at(addr)) {
CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // find_nmethod() asserts if nmethod is zombie.
if (! (cb != NULL && MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) &&
! (cb != NULL && MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) &&
! MacroAssembler::is_bl(*((int*) addr))) {
tty->print_cr("not a NativeMovConstReg at " PTR_FORMAT, addr);
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
fatal(err_msg("not a NativeMovConstReg at " PTR_FORMAT, addr));
}
}
}
#endif // ASSERT
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
ResourceMark rm;
int code_size = 1 * BytesPerInstWord;
CodeBuffer cb(verified_entry, code_size + 1);
MacroAssembler* a = new MacroAssembler(&cb);
#ifdef COMPILER2
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
#endif
// Patch this nmethod atomically. Always use illtrap/trap in debug build.
if (DEBUG_ONLY(false &&) a->is_within_range_of_b(dest, a->pc())) {
a->b(dest);
} else {
// The signal handler will continue at dest=OptoRuntime::handle_wrong_method_stub().
if (TrapBasedNotEntrantChecks) {
// We use a special trap for marking a method as not_entrant or zombie.
a->trap_zombie_not_entrant();
} else {
// We use an illtrap for marking a method as not_entrant or zombie.
a->illtrap();
}
}
ICache::ppc64_flush_icache_bytes(verified_entry, code_size);
}
#ifdef ASSERT
void NativeJump::verify() {
address addr = addr_at(0);
NativeInstruction::verify();
if (!NativeJump::is_jump_at(addr)) {
tty->print_cr("not a NativeJump at " PTR_FORMAT, addr);
// TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
fatal(err_msg("not a NativeJump at " PTR_FORMAT, addr));
}
}
#endif // ASSERT
//-------------------------------------------------------------------
// Call trampoline stubs.
//
// Layout and instructions of a call trampoline stub:
// 0: load the TOC (part 1)
// 4: load the TOC (part 2)
// 8: load the call target from the constant pool (part 1)
// [12: load the call target from the constant pool (part 2, optional)]
// ..: branch via CTR
//
address NativeCallTrampolineStub::encoded_destination_addr() const {
address instruction_addr = addr_at(2 * BytesPerInstWord);
assert(MacroAssembler::is_ld_largeoffset(instruction_addr),
"must be a ld with large offset (from the constant pool)");
return instruction_addr;
}
address NativeCallTrampolineStub::destination(nmethod *nm) const {
CodeBlob* cb = nm ? nm : CodeCache::find_blob_unsafe(addr_at(0));
address ctable = cb->content_begin();
return *(address*)(ctable + destination_toc_offset());
}
int NativeCallTrampolineStub::destination_toc_offset() const {
return MacroAssembler::get_ld_largeoffset_offset(encoded_destination_addr());
}
void NativeCallTrampolineStub::set_destination(address new_destination) {
CodeBlob* cb = CodeCache::find_blob(addr_at(0));
address ctable = cb->content_begin();
*(address*)(ctable + destination_toc_offset()) = new_destination;
}

View File

@ -0,0 +1,398 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_NATIVEINST_PPC_HPP
#define CPU_PPC_VM_NATIVEINST_PPC_HPP
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/allocation.hpp"
#include "runtime/icache.hpp"
#include "runtime/os.hpp"
#include "utilities/top.hpp"
// We have interfaces for the following instructions:
//
// - NativeInstruction
// - NativeCall
// - NativeFarCall
// - NativeMovConstReg
// - NativeJump
// - NativeIllegalInstruction
// - NativeConditionalFarBranch
// - NativeCallTrampolineStub
// The base class for different kinds of native instruction abstractions.
// It provides the primitive operations to manipulate code relative to this.
class NativeInstruction VALUE_OBJ_CLASS_SPEC {
friend class Relocation;
public:
bool is_sigtrap_ic_miss_check() {
assert(UseSIGTRAP, "precondition");
return MacroAssembler::is_trap_ic_miss_check(long_at(0));
}
bool is_sigtrap_null_check() {
assert(UseSIGTRAP && TrapBasedNullChecks, "precondition");
return MacroAssembler::is_trap_null_check(long_at(0));
}
// We use a special trap for marking a method as not_entrant or zombie
// iff UseSIGTRAP.
bool is_sigtrap_zombie_not_entrant() {
assert(UseSIGTRAP, "precondition");
return MacroAssembler::is_trap_zombie_not_entrant(long_at(0));
}
// We use an illtrap for marking a method as not_entrant or zombie
// iff !UseSIGTRAP.
bool is_sigill_zombie_not_entrant() {
assert(!UseSIGTRAP, "precondition");
// Work around a C++ compiler bug which changes 'this'.
return NativeInstruction::is_sigill_zombie_not_entrant_at(addr_at(0));
}
static bool is_sigill_zombie_not_entrant_at(address addr);
#ifdef COMPILER2
// SIGTRAP-based implicit range checks
bool is_sigtrap_range_check() {
assert(UseSIGTRAP && TrapBasedRangeChecks, "precondition");
return MacroAssembler::is_trap_range_check(long_at(0));
}
#endif
// 'should not reach here'.
bool is_sigtrap_should_not_reach_here() {
return MacroAssembler::is_trap_should_not_reach_here(long_at(0));
}
bool is_safepoint_poll() {
// Is the current instruction a POTENTIAL read access to the polling page?
// The current arguments of the instruction are not checked!
return MacroAssembler::is_load_from_polling_page(long_at(0), NULL);
}
bool is_memory_serialization(JavaThread *thread, void *ucontext) {
// Is the current instruction a write access of thread to the
// memory serialization page?
return MacroAssembler::is_memory_serialization(long_at(0), thread, ucontext);
}
address get_stack_bang_address(void *ucontext) {
// If long_at(0) is not a stack bang, return 0. Otherwise, return
// banged address.
return MacroAssembler::get_stack_bang_address(long_at(0), ucontext);
}
protected:
address addr_at(int offset) const { return address(this) + offset; }
int long_at(int offset) const { return *(int*)addr_at(offset); }
public:
void verify() NOT_DEBUG_RETURN;
};
inline NativeInstruction* nativeInstruction_at(address address) {
NativeInstruction* inst = (NativeInstruction*)address;
inst->verify();
return inst;
}
// The NativeCall is an abstraction for accessing/manipulating call
// instructions. It is used to manipulate inline caches, primitive &
// dll calls, etc.
//
// Sparc distinguishes `NativeCall' and `NativeFarCall'. On PPC64,
// at present, we provide a single class `NativeCall' representing the
// sequence `load_const, mtctr, bctrl' or the sequence 'ld_from_toc,
// mtctr, bctrl'.
class NativeCall: public NativeInstruction {
public:
enum ppc_specific_constants {
load_const_instruction_size = 28,
load_const_from_method_toc_instruction_size = 16,
instruction_size = 16 // Used in shared code for calls with reloc_info.
};
static bool is_call_at(address a) {
return Assembler::is_bl(*(int*)(a));
}
static bool is_call_before(address return_address) {
return NativeCall::is_call_at(return_address - 4);
}
address instruction_address() const {
return addr_at(0);
}
address next_instruction_address() const {
// We have only bl.
assert(MacroAssembler::is_bl(*(int*)instruction_address()), "Should be bl instruction!");
return addr_at(4);
}
address return_address() const {
return next_instruction_address();
}
address destination() const;
// The parameter assert_lock disables the assertion during code generation.
void set_destination_mt_safe(address dest, bool assert_lock = true);
address get_trampoline();
void verify_alignment() {} // do nothing on ppc
void verify() NOT_DEBUG_RETURN;
};
inline NativeCall* nativeCall_at(address instr) {
NativeCall* call = (NativeCall*)instr;
call->verify();
return call;
}
inline NativeCall* nativeCall_before(address return_address) {
NativeCall* call = NULL;
if (MacroAssembler::is_bl(*(int*)(return_address - 4)))
call = (NativeCall*)(return_address - 4);
call->verify();
return call;
}
// The NativeFarCall is an abstraction for accessing/manipulating native
// call-anywhere instructions.
// Used to call native methods which may be loaded anywhere in the address
// space, possibly out of reach of a call instruction.
class NativeFarCall: public NativeInstruction {
public:
// We use MacroAssembler::bl64_patchable() for implementing a
// call-anywhere instruction.
// Checks whether instr points at a NativeFarCall instruction.
static bool is_far_call_at(address instr) {
return MacroAssembler::is_bl64_patchable_at(instr);
}
// Does the NativeFarCall implementation use a pc-relative encoding
// of the call destination?
// Used when relocating code.
bool is_pcrelative() {
assert(MacroAssembler::is_bl64_patchable_at((address)this),
"unexpected call type");
return MacroAssembler::is_bl64_patchable_pcrelative_at((address)this);
}
// Returns the NativeFarCall's destination.
address destination() const {
assert(MacroAssembler::is_bl64_patchable_at((address)this),
"unexpected call type");
return MacroAssembler::get_dest_of_bl64_patchable_at((address)this);
}
// Sets the NativeCall's destination, not necessarily mt-safe.
// Used when relocating code.
void set_destination(address dest) {
// Set new destination (implementation of call may change here).
assert(MacroAssembler::is_bl64_patchable_at((address)this),
"unexpected call type");
MacroAssembler::set_dest_of_bl64_patchable_at((address)this, dest);
}
void verify() NOT_DEBUG_RETURN;
};
// Instantiates a NativeFarCall object starting at the given instruction
// address and returns the NativeFarCall object.
inline NativeFarCall* nativeFarCall_at(address instr) {
NativeFarCall* call = (NativeFarCall*)instr;
call->verify();
return call;
}
// An interface for accessing/manipulating native set_oop imm, reg instructions.
// (used to manipulate inlined data references, etc.)
class NativeMovConstReg: public NativeInstruction {
public:
enum ppc_specific_constants {
load_const_instruction_size = 20,
load_const_from_method_toc_instruction_size = 8,
instruction_size = 8 // Used in shared code for calls with reloc_info.
};
address instruction_address() const {
return addr_at(0);
}
address next_instruction_address() const;
// (The [set_]data accessor respects oop_type relocs also.)
intptr_t data() const;
// Patch the code stream.
address set_data_plain(intptr_t x, CodeBlob *code);
// Patch the code stream and oop pool.
void set_data(intptr_t x);
// Patch narrow oop constants. Use this also for narrow klass.
void set_narrow_oop(narrowOop data, CodeBlob *code = NULL);
void verify() NOT_DEBUG_RETURN;
};
inline NativeMovConstReg* nativeMovConstReg_at(address address) {
NativeMovConstReg* test = (NativeMovConstReg*)address;
test->verify();
return test;
}
// The NativeJump is an abstraction for accessing/manipulating native
// jump-anywhere instructions.
class NativeJump: public NativeInstruction {
public:
// We use MacroAssembler::b64_patchable() for implementing a
// jump-anywhere instruction.
enum ppc_specific_constants {
instruction_size = MacroAssembler::b64_patchable_size
};
// Checks whether instr points at a NativeJump instruction.
static bool is_jump_at(address instr) {
return MacroAssembler::is_b64_patchable_at(instr)
|| ( MacroAssembler::is_load_const_from_method_toc_at(instr)
&& Assembler::is_mtctr(*(int*)(instr + 2 * 4))
&& Assembler::is_bctr(*(int*)(instr + 3 * 4)));
}
// Does the NativeJump implementation use a pc-relative encoding
// of the call destination?
// Used when relocating code or patching jumps.
bool is_pcrelative() {
return MacroAssembler::is_b64_patchable_pcrelative_at((address)this);
}
// Returns the NativeJump's destination.
address jump_destination() const {
if (MacroAssembler::is_b64_patchable_at((address)this)) {
return MacroAssembler::get_dest_of_b64_patchable_at((address)this);
} else if (MacroAssembler::is_load_const_from_method_toc_at((address)this)
&& Assembler::is_mtctr(*(int*)((address)this + 2 * 4))
&& Assembler::is_bctr(*(int*)((address)this + 3 * 4))) {
return (address)((NativeMovConstReg *)this)->data();
} else {
ShouldNotReachHere();
return NULL;
}
}
// Sets the NativeJump's destination, not necessarily mt-safe.
// Used when relocating code or patching jumps.
void set_jump_destination(address dest) {
// Set new destination (implementation of call may change here).
if (MacroAssembler::is_b64_patchable_at((address)this)) {
MacroAssembler::set_dest_of_b64_patchable_at((address)this, dest);
} else if (MacroAssembler::is_load_const_from_method_toc_at((address)this)
&& Assembler::is_mtctr(*(int*)((address)this + 2 * 4))
&& Assembler::is_bctr(*(int*)((address)this + 3 * 4))) {
((NativeMovConstReg *)this)->set_data((intptr_t)dest);
} else {
ShouldNotReachHere();
}
}
// MT-safe insertion of native jump at verified method entry
static void patch_verified_entry(address entry, address verified_entry, address dest);
void verify() NOT_DEBUG_RETURN;
static void check_verified_entry_alignment(address entry, address verified_entry) {
// We just patch one instruction on ppc64, so the jump doesn't have to
// be aligned. Nothing to do here.
}
};
// Instantiates a NativeJump object starting at the given instruction
// address and returns the NativeJump object.
inline NativeJump* nativeJump_at(address instr) {
NativeJump* call = (NativeJump*)instr;
call->verify();
return call;
}
// NativeConditionalFarBranch is abstraction for accessing/manipulating
// conditional far branches.
class NativeConditionalFarBranch : public NativeInstruction {
public:
static bool is_conditional_far_branch_at(address instr) {
return MacroAssembler::is_bc_far_at(instr);
}
address branch_destination() const {
return MacroAssembler::get_dest_of_bc_far_at((address)this);
}
void set_branch_destination(address dest) {
MacroAssembler::set_dest_of_bc_far_at((address)this, dest);
}
};
inline NativeConditionalFarBranch* NativeConditionalFarBranch_at(address address) {
assert(NativeConditionalFarBranch::is_conditional_far_branch_at(address),
"must be a conditional far branch");
return (NativeConditionalFarBranch*)address;
}
// Call trampoline stubs.
class NativeCallTrampolineStub : public NativeInstruction {
private:
address encoded_destination_addr() const;
public:
address destination(nmethod *nm = NULL) const;
int destination_toc_offset() const;
void set_destination(address new_destination);
};
inline bool is_NativeCallTrampolineStub_at(address address) {
int first_instr = *(int*)address;
return Assembler::is_addis(first_instr) &&
(Register)(intptr_t)Assembler::inv_rt_field(first_instr) == R12_scratch2;
}
inline NativeCallTrampolineStub* NativeCallTrampolineStub_at(address address) {
assert(is_NativeCallTrampolineStub_at(address), "no call trampoline found");
return (NativeCallTrampolineStub*)address;
}
#endif // CPU_PPC_VM_NATIVEINST_PPC_HPP

12054
hotspot/src/cpu/ppc/vm/ppc.ad Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,24 @@
//
// Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
// Copyright 2012, 2013 SAP AG. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_REGISTERMAP_PPC_HPP
#define CPU_PPC_VM_REGISTERMAP_PPC_HPP
// machine-dependent implemention for register maps
friend class frame;
private:
// This is the hook for finding a register in an "well-known" location,
// such as a register block of a predetermined format.
// Since there is none, we just return NULL.
// See registerMap_sparc.hpp for an example of grabbing registers
// from register save areas of a standard layout.
address pd_location(VMReg reg) const { return NULL; }
// no PD state to clear or copy:
void pd_clear() {}
void pd_initialize() {}
void pd_initialize_from(const RegisterMap* map) {}
#endif // CPU_PPC_VM_REGISTERMAP_PPC_HPP

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// make sure the defines don't screw up the declarations later on in this file
#define DONT_USE_REGISTER_DEFINES
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/register.hpp"
#include "register_ppc.hpp"
#ifdef TARGET_ARCH_MODEL_ppc_32
# include "interp_masm_ppc_32.hpp"
#endif
#ifdef TARGET_ARCH_MODEL_ppc_64
# include "interp_masm_ppc_64.hpp"
#endif
REGISTER_DEFINITION(Register, noreg);
REGISTER_DEFINITION(FloatRegister, fnoreg);

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "register_ppc.hpp"
const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers * 2;
const int ConcreteRegisterImpl::max_fpr = ConcreteRegisterImpl::max_gpr +
FloatRegisterImpl::number_of_registers * 2;
const int ConcreteRegisterImpl::max_cnd = ConcreteRegisterImpl::max_fpr +
ConditionRegisterImpl::number_of_registers;
const char* RegisterImpl::name() const {
const char* names[number_of_registers] = {
"R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
"R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
"R16", "R17", "R18", "R19", "R20", "R21", "R22", "R23",
"R24", "R25", "R26", "R27", "R28", "R29", "R30", "R31"
};
return is_valid() ? names[encoding()] : "noreg";
}
const char* ConditionRegisterImpl::name() const {
const char* names[number_of_registers] = {
"CR0", "CR1", "CR2", "CR3", "CR4", "CR5", "CR6", "CR7"
};
return is_valid() ? names[encoding()] : "cnoreg";
}
const char* FloatRegisterImpl::name() const {
const char* names[number_of_registers] = {
"F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7",
"F8", "F9", "F10", "F11", "F12", "F13", "F14", "F15",
"F16", "F17", "F18", "F19", "F20", "F21", "F22", "F23",
"F24", "F25", "F26", "F27", "F28", "F29", "F30", "F31"
};
return is_valid() ? names[encoding()] : "fnoreg";
}
const char* SpecialRegisterImpl::name() const {
const char* names[number_of_registers] = {
"SR_XER", "SR_LR", "SR_CTR", "SR_VRSAVE", "SR_SPEFSCR", "SR_PPR"
};
return is_valid() ? names[encoding()] : "snoreg";
}
const char* VectorRegisterImpl::name() const {
const char* names[number_of_registers] = {
"VR0", "VR1", "VR2", "VR3", "VR4", "VR5", "VR6", "VR7",
"VR8", "VR9", "VR10", "VR11", "VR12", "VR13", "VR14", "VR15",
"VR16", "VR17", "VR18", "VR19", "VR20", "VR21", "VR22", "VR23",
"VR24", "VR25", "VR26", "VR27", "VR28", "VR29", "VR30", "VR31"
};
return is_valid() ? names[encoding()] : "vnoreg";
}

View File

@ -0,0 +1,633 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_REGISTER_PPC_HPP
#define CPU_PPC_VM_REGISTER_PPC_HPP
#include "asm/register.hpp"
#include "vm_version_ppc.hpp"
// forward declaration
class Address;
class VMRegImpl;
typedef VMRegImpl* VMReg;
// PPC64 registers
//
// See "64-bit PowerPC ELF ABI Supplement 1.7", IBM Corp. (2003-10-29).
// (http://math-atlas.sourceforge.net/devel/assembly/PPC-elf64abi-1.7.pdf)
//
// r0 Register used in function prologs (volatile)
// r1 Stack pointer (nonvolatile)
// r2 TOC pointer (volatile)
// r3 Parameter and return value (volatile)
// r4-r10 Function parameters (volatile)
// r11 Register used in calls by pointer and as an environment pointer for languages which require one (volatile)
// r12 Register used for exception handling and glink code (volatile)
// r13 Reserved for use as system thread ID
// r14-r31 Local variables (nonvolatile)
//
// f0 Scratch register (volatile)
// f1-f4 Floating point parameters and return value (volatile)
// f5-f13 Floating point parameters (volatile)
// f14-f31 Floating point values (nonvolatile)
//
// LR Link register for return address (volatile)
// CTR Loop counter (volatile)
// XER Fixed point exception register (volatile)
// FPSCR Floating point status and control register (volatile)
//
// CR0-CR1 Condition code fields (volatile)
// CR2-CR4 Condition code fields (nonvolatile)
// CR5-CR7 Condition code fields (volatile)
//
// ----------------------------------------------
// On processors with the VMX feature:
// v0-v1 Volatile scratch registers
// v2-v13 Volatile vector parameters registers
// v14-v19 Volatile scratch registers
// v20-v31 Non-volatile registers
// vrsave Non-volatile 32-bit register
// Use Register as shortcut
class RegisterImpl;
typedef RegisterImpl* Register;
inline Register as_Register(int encoding) {
assert(encoding >= 0 && encoding < 32, "bad register encoding");
return (Register)(intptr_t)encoding;
}
// The implementation of integer registers for the Power architecture
class RegisterImpl: public AbstractRegisterImpl {
public:
enum {
number_of_registers = 32
};
// general construction
inline friend Register as_Register(int encoding);
// accessors
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
VMReg as_VMReg();
Register successor() const { return as_Register(encoding() + 1); }
// testers
bool is_valid() const { return ( 0 <= (value()&0x7F) && (value()&0x7F) < number_of_registers); }
bool is_volatile() const { return ( 0 <= (value()&0x7F) && (value()&0x7F) <= 13 ); }
bool is_nonvolatile() const { return (14 <= (value()&0x7F) && (value()&0x7F) <= 31 ); }
const char* name() const;
};
// The integer registers of the PPC architecture
CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
CONSTANT_REGISTER_DECLARATION(Register, R0, (0));
CONSTANT_REGISTER_DECLARATION(Register, R1, (1));
CONSTANT_REGISTER_DECLARATION(Register, R2, (2));
CONSTANT_REGISTER_DECLARATION(Register, R3, (3));
CONSTANT_REGISTER_DECLARATION(Register, R4, (4));
CONSTANT_REGISTER_DECLARATION(Register, R5, (5));
CONSTANT_REGISTER_DECLARATION(Register, R6, (6));
CONSTANT_REGISTER_DECLARATION(Register, R7, (7));
CONSTANT_REGISTER_DECLARATION(Register, R8, (8));
CONSTANT_REGISTER_DECLARATION(Register, R9, (9));
CONSTANT_REGISTER_DECLARATION(Register, R10, (10));
CONSTANT_REGISTER_DECLARATION(Register, R11, (11));
CONSTANT_REGISTER_DECLARATION(Register, R12, (12));
CONSTANT_REGISTER_DECLARATION(Register, R13, (13));
CONSTANT_REGISTER_DECLARATION(Register, R14, (14));
CONSTANT_REGISTER_DECLARATION(Register, R15, (15));
CONSTANT_REGISTER_DECLARATION(Register, R16, (16));
CONSTANT_REGISTER_DECLARATION(Register, R17, (17));
CONSTANT_REGISTER_DECLARATION(Register, R18, (18));
CONSTANT_REGISTER_DECLARATION(Register, R19, (19));
CONSTANT_REGISTER_DECLARATION(Register, R20, (20));
CONSTANT_REGISTER_DECLARATION(Register, R21, (21));
CONSTANT_REGISTER_DECLARATION(Register, R22, (22));
CONSTANT_REGISTER_DECLARATION(Register, R23, (23));
CONSTANT_REGISTER_DECLARATION(Register, R24, (24));
CONSTANT_REGISTER_DECLARATION(Register, R25, (25));
CONSTANT_REGISTER_DECLARATION(Register, R26, (26));
CONSTANT_REGISTER_DECLARATION(Register, R27, (27));
CONSTANT_REGISTER_DECLARATION(Register, R28, (28));
CONSTANT_REGISTER_DECLARATION(Register, R29, (29));
CONSTANT_REGISTER_DECLARATION(Register, R30, (30));
CONSTANT_REGISTER_DECLARATION(Register, R31, (31));
//
// Because Power has many registers, #define'ing values for them is
// beneficial in code size and is worth the cost of some of the
// dangers of defines. If a particular file has a problem with these
// defines then it's possible to turn them off in that file by
// defining DONT_USE_REGISTER_DEFINES. Register_definition_ppc.cpp
// does that so that it's able to provide real definitions of these
// registers for use in debuggers and such.
//
#ifndef DONT_USE_REGISTER_DEFINES
#define noreg ((Register)(noreg_RegisterEnumValue))
#define R0 ((Register)(R0_RegisterEnumValue))
#define R1 ((Register)(R1_RegisterEnumValue))
#define R2 ((Register)(R2_RegisterEnumValue))
#define R3 ((Register)(R3_RegisterEnumValue))
#define R4 ((Register)(R4_RegisterEnumValue))
#define R5 ((Register)(R5_RegisterEnumValue))
#define R6 ((Register)(R6_RegisterEnumValue))
#define R7 ((Register)(R7_RegisterEnumValue))
#define R8 ((Register)(R8_RegisterEnumValue))
#define R9 ((Register)(R9_RegisterEnumValue))
#define R10 ((Register)(R10_RegisterEnumValue))
#define R11 ((Register)(R11_RegisterEnumValue))
#define R12 ((Register)(R12_RegisterEnumValue))
#define R13 ((Register)(R13_RegisterEnumValue))
#define R14 ((Register)(R14_RegisterEnumValue))
#define R15 ((Register)(R15_RegisterEnumValue))
#define R16 ((Register)(R16_RegisterEnumValue))
#define R17 ((Register)(R17_RegisterEnumValue))
#define R18 ((Register)(R18_RegisterEnumValue))
#define R19 ((Register)(R19_RegisterEnumValue))
#define R20 ((Register)(R20_RegisterEnumValue))
#define R21 ((Register)(R21_RegisterEnumValue))
#define R22 ((Register)(R22_RegisterEnumValue))
#define R23 ((Register)(R23_RegisterEnumValue))
#define R24 ((Register)(R24_RegisterEnumValue))
#define R25 ((Register)(R25_RegisterEnumValue))
#define R26 ((Register)(R26_RegisterEnumValue))
#define R27 ((Register)(R27_RegisterEnumValue))
#define R28 ((Register)(R28_RegisterEnumValue))
#define R29 ((Register)(R29_RegisterEnumValue))
#define R30 ((Register)(R30_RegisterEnumValue))
#define R31 ((Register)(R31_RegisterEnumValue))
#endif
// Use ConditionRegister as shortcut
class ConditionRegisterImpl;
typedef ConditionRegisterImpl* ConditionRegister;
inline ConditionRegister as_ConditionRegister(int encoding) {
assert(encoding >= 0 && encoding < 8, "bad condition register encoding");
return (ConditionRegister)(intptr_t)encoding;
}
// The implementation of condition register(s) for the PPC architecture
class ConditionRegisterImpl: public AbstractRegisterImpl {
public:
enum {
number_of_registers = 8
};
// construction.
inline friend ConditionRegister as_ConditionRegister(int encoding);
// accessors
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
VMReg as_VMReg();
// testers
bool is_valid() const { return (0 <= value() && value() < number_of_registers); }
bool is_nonvolatile() const { return (2 <= (value()&0x7F) && (value()&0x7F) <= 4 ); }
const char* name() const;
};
// The (parts of the) condition register(s) of the PPC architecture
// sys/ioctl.h on AIX defines CR0-CR3, so I name these CCR.
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR0, (0));
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR1, (1));
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR2, (2));
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR3, (3));
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR4, (4));
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR5, (5));
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR6, (6));
CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR7, (7));
#ifndef DONT_USE_REGISTER_DEFINES
#define CCR0 ((ConditionRegister)(CCR0_ConditionRegisterEnumValue))
#define CCR1 ((ConditionRegister)(CCR1_ConditionRegisterEnumValue))
#define CCR2 ((ConditionRegister)(CCR2_ConditionRegisterEnumValue))
#define CCR3 ((ConditionRegister)(CCR3_ConditionRegisterEnumValue))
#define CCR4 ((ConditionRegister)(CCR4_ConditionRegisterEnumValue))
#define CCR5 ((ConditionRegister)(CCR5_ConditionRegisterEnumValue))
#define CCR6 ((ConditionRegister)(CCR6_ConditionRegisterEnumValue))
#define CCR7 ((ConditionRegister)(CCR7_ConditionRegisterEnumValue))
#endif // DONT_USE_REGISTER_DEFINES
// Use FloatRegister as shortcut
class FloatRegisterImpl;
typedef FloatRegisterImpl* FloatRegister;
inline FloatRegister as_FloatRegister(int encoding) {
assert(encoding >= 0 && encoding < 32, "bad float register encoding");
return (FloatRegister)(intptr_t)encoding;
}
// The implementation of float registers for the PPC architecture
class FloatRegisterImpl: public AbstractRegisterImpl {
public:
enum {
number_of_registers = 32
};
// construction
inline friend FloatRegister as_FloatRegister(int encoding);
// accessors
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
VMReg as_VMReg();
FloatRegister successor() const { return as_FloatRegister(encoding() + 1); }
// testers
bool is_valid() const { return (0 <= value() && value() < number_of_registers); }
const char* name() const;
};
// The float registers of the PPC architecture
CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg, (-1));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F0, ( 0));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F1, ( 1));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F2, ( 2));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F3, ( 3));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F4, ( 4));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F5, ( 5));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F6, ( 6));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F7, ( 7));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F8, ( 8));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F9, ( 9));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F10, (10));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F11, (11));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F12, (12));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F13, (13));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F14, (14));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F15, (15));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F16, (16));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F17, (17));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F18, (18));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F19, (19));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F20, (20));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F21, (21));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F22, (22));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F23, (23));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F24, (24));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F25, (25));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F26, (26));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F27, (27));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F28, (28));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F29, (29));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F30, (30));
CONSTANT_REGISTER_DECLARATION(FloatRegister, F31, (31));
#ifndef DONT_USE_REGISTER_DEFINES
#define fnoreg ((FloatRegister)(fnoreg_FloatRegisterEnumValue))
#define F0 ((FloatRegister)( F0_FloatRegisterEnumValue))
#define F1 ((FloatRegister)( F1_FloatRegisterEnumValue))
#define F2 ((FloatRegister)( F2_FloatRegisterEnumValue))
#define F3 ((FloatRegister)( F3_FloatRegisterEnumValue))
#define F4 ((FloatRegister)( F4_FloatRegisterEnumValue))
#define F5 ((FloatRegister)( F5_FloatRegisterEnumValue))
#define F6 ((FloatRegister)( F6_FloatRegisterEnumValue))
#define F7 ((FloatRegister)( F7_FloatRegisterEnumValue))
#define F8 ((FloatRegister)( F8_FloatRegisterEnumValue))
#define F9 ((FloatRegister)( F9_FloatRegisterEnumValue))
#define F10 ((FloatRegister)( F10_FloatRegisterEnumValue))
#define F11 ((FloatRegister)( F11_FloatRegisterEnumValue))
#define F12 ((FloatRegister)( F12_FloatRegisterEnumValue))
#define F13 ((FloatRegister)( F13_FloatRegisterEnumValue))
#define F14 ((FloatRegister)( F14_FloatRegisterEnumValue))
#define F15 ((FloatRegister)( F15_FloatRegisterEnumValue))
#define F16 ((FloatRegister)( F16_FloatRegisterEnumValue))
#define F17 ((FloatRegister)( F17_FloatRegisterEnumValue))
#define F18 ((FloatRegister)( F18_FloatRegisterEnumValue))
#define F19 ((FloatRegister)( F19_FloatRegisterEnumValue))
#define F20 ((FloatRegister)( F20_FloatRegisterEnumValue))
#define F21 ((FloatRegister)( F21_FloatRegisterEnumValue))
#define F22 ((FloatRegister)( F22_FloatRegisterEnumValue))
#define F23 ((FloatRegister)( F23_FloatRegisterEnumValue))
#define F24 ((FloatRegister)( F24_FloatRegisterEnumValue))
#define F25 ((FloatRegister)( F25_FloatRegisterEnumValue))
#define F26 ((FloatRegister)( F26_FloatRegisterEnumValue))
#define F27 ((FloatRegister)( F27_FloatRegisterEnumValue))
#define F28 ((FloatRegister)( F28_FloatRegisterEnumValue))
#define F29 ((FloatRegister)( F29_FloatRegisterEnumValue))
#define F30 ((FloatRegister)( F30_FloatRegisterEnumValue))
#define F31 ((FloatRegister)( F31_FloatRegisterEnumValue))
#endif // DONT_USE_REGISTER_DEFINES
// Use SpecialRegister as shortcut
class SpecialRegisterImpl;
typedef SpecialRegisterImpl* SpecialRegister;
inline SpecialRegister as_SpecialRegister(int encoding) {
return (SpecialRegister)(intptr_t)encoding;
}
// The implementation of special registers for the Power architecture (LR, CTR and friends)
class SpecialRegisterImpl: public AbstractRegisterImpl {
public:
enum {
number_of_registers = 6
};
// construction
inline friend SpecialRegister as_SpecialRegister(int encoding);
// accessors
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
VMReg as_VMReg();
// testers
bool is_valid() const { return 0 <= value() && value() < number_of_registers; }
const char* name() const;
};
// The special registers of the PPC architecture
CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_XER, (0));
CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_LR, (1));
CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_CTR, (2));
CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_VRSAVE, (3));
CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_SPEFSCR, (4));
CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_PPR, (5));
#ifndef DONT_USE_REGISTER_DEFINES
#define SR_XER ((SpecialRegister)(SR_XER_SpecialRegisterEnumValue))
#define SR_LR ((SpecialRegister)(SR_LR_SpecialRegisterEnumValue))
#define SR_CTR ((SpecialRegister)(SR_CTR_SpecialRegisterEnumValue))
#define SR_VRSAVE ((SpecialRegister)(SR_VRSAVE_SpecialRegisterEnumValue))
#define SR_SPEFSCR ((SpecialRegister)(SR_SPEFSCR_SpecialRegisterEnumValue))
#define SR_PPR ((SpecialRegister)(SR_PPR_SpecialRegisterEnumValue))
#endif // DONT_USE_REGISTER_DEFINES
// Use VectorRegister as shortcut
class VectorRegisterImpl;
typedef VectorRegisterImpl* VectorRegister;
inline VectorRegister as_VectorRegister(int encoding) {
return (VectorRegister)(intptr_t)encoding;
}
// The implementation of vector registers for the Power architecture
class VectorRegisterImpl: public AbstractRegisterImpl {
public:
enum {
number_of_registers = 32
};
// construction
inline friend VectorRegister as_VectorRegister(int encoding);
// accessors
int encoding() const { assert(is_valid(), "invalid register"); return value(); }
// testers
bool is_valid() const { return 0 <= value() && value() < number_of_registers; }
const char* name() const;
};
// The Vector registers of the Power architecture
CONSTANT_REGISTER_DECLARATION(VectorRegister, vnoreg, (-1));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR0, ( 0));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR1, ( 1));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR2, ( 2));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR3, ( 3));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR4, ( 4));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR5, ( 5));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR6, ( 6));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR7, ( 7));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR8, ( 8));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR9, ( 9));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR10, (10));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR11, (11));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR12, (12));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR13, (13));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR14, (14));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR15, (15));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR16, (16));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR17, (17));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR18, (18));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR19, (19));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR20, (20));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR21, (21));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR22, (22));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR23, (23));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR24, (24));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR25, (25));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR26, (26));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR27, (27));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR28, (28));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR29, (29));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR30, (30));
CONSTANT_REGISTER_DECLARATION(VectorRegister, VR31, (31));
#ifndef DONT_USE_REGISTER_DEFINES
#define vnoreg ((VectorRegister)(vnoreg_VectorRegisterEnumValue))
#define VR0 ((VectorRegister)( VR0_VectorRegisterEnumValue))
#define VR1 ((VectorRegister)( VR1_VectorRegisterEnumValue))
#define VR2 ((VectorRegister)( VR2_VectorRegisterEnumValue))
#define VR3 ((VectorRegister)( VR3_VectorRegisterEnumValue))
#define VR4 ((VectorRegister)( VR4_VectorRegisterEnumValue))
#define VR5 ((VectorRegister)( VR5_VectorRegisterEnumValue))
#define VR6 ((VectorRegister)( VR6_VectorRegisterEnumValue))
#define VR7 ((VectorRegister)( VR7_VectorRegisterEnumValue))
#define VR8 ((VectorRegister)( VR8_VectorRegisterEnumValue))
#define VR9 ((VectorRegister)( VR9_VectorRegisterEnumValue))
#define VR10 ((VectorRegister)( VR10_VectorRegisterEnumValue))
#define VR11 ((VectorRegister)( VR11_VectorRegisterEnumValue))
#define VR12 ((VectorRegister)( VR12_VectorRegisterEnumValue))
#define VR13 ((VectorRegister)( VR13_VectorRegisterEnumValue))
#define VR14 ((VectorRegister)( VR14_VectorRegisterEnumValue))
#define VR15 ((VectorRegister)( VR15_VectorRegisterEnumValue))
#define VR16 ((VectorRegister)( VR16_VectorRegisterEnumValue))
#define VR17 ((VectorRegister)( VR17_VectorRegisterEnumValue))
#define VR18 ((VectorRegister)( VR18_VectorRegisterEnumValue))
#define VR19 ((VectorRegister)( VR19_VectorRegisterEnumValue))
#define VR20 ((VectorRegister)( VR20_VectorRegisterEnumValue))
#define VR21 ((VectorRegister)( VR21_VectorRegisterEnumValue))
#define VR22 ((VectorRegister)( VR22_VectorRegisterEnumValue))
#define VR23 ((VectorRegister)( VR23_VectorRegisterEnumValue))
#define VR24 ((VectorRegister)( VR24_VectorRegisterEnumValue))
#define VR25 ((VectorRegister)( VR25_VectorRegisterEnumValue))
#define VR26 ((VectorRegister)( VR26_VectorRegisterEnumValue))
#define VR27 ((VectorRegister)( VR27_VectorRegisterEnumValue))
#define VR28 ((VectorRegister)( VR28_VectorRegisterEnumValue))
#define VR29 ((VectorRegister)( VR29_VectorRegisterEnumValue))
#define VR30 ((VectorRegister)( VR30_VectorRegisterEnumValue))
#define VR31 ((VectorRegister)( VR31_VectorRegisterEnumValue))
#endif // DONT_USE_REGISTER_DEFINES
// Maximum number of incoming arguments that can be passed in i registers.
const int PPC_ARGS_IN_REGS_NUM = 8;
// Need to know the total number of registers of all sorts for SharedInfo.
// Define a class that exports it.
class ConcreteRegisterImpl : public AbstractRegisterImpl {
public:
enum {
// This number must be large enough to cover REG_COUNT (defined by c2) registers.
// There is no requirement that any ordering here matches any ordering c2 gives
// it's optoregs.
number_of_registers =
( RegisterImpl::number_of_registers +
FloatRegisterImpl::number_of_registers )
* 2 // register halves
+ ConditionRegisterImpl::number_of_registers // condition code registers
+ SpecialRegisterImpl::number_of_registers // special registers
+ VectorRegisterImpl::number_of_registers // vector registers
};
static const int max_gpr;
static const int max_fpr;
static const int max_cnd;
};
// Common register declarations used in assembler code.
REGISTER_DECLARATION(Register, R0_SCRATCH, R0); // volatile
REGISTER_DECLARATION(Register, R1_SP, R1); // non-volatile
REGISTER_DECLARATION(Register, R2_TOC, R2); // volatile
REGISTER_DECLARATION(Register, R3_RET, R3); // volatile
REGISTER_DECLARATION(Register, R3_ARG1, R3); // volatile
REGISTER_DECLARATION(Register, R4_ARG2, R4); // volatile
REGISTER_DECLARATION(Register, R5_ARG3, R5); // volatile
REGISTER_DECLARATION(Register, R6_ARG4, R6); // volatile
REGISTER_DECLARATION(Register, R7_ARG5, R7); // volatile
REGISTER_DECLARATION(Register, R8_ARG6, R8); // volatile
REGISTER_DECLARATION(Register, R9_ARG7, R9); // volatile
REGISTER_DECLARATION(Register, R10_ARG8, R10); // volatile
REGISTER_DECLARATION(FloatRegister, F0_SCRATCH, F0); // volatile
REGISTER_DECLARATION(FloatRegister, F1_RET, F1); // volatile
REGISTER_DECLARATION(FloatRegister, F1_ARG1, F1); // volatile
REGISTER_DECLARATION(FloatRegister, F2_ARG2, F2); // volatile
REGISTER_DECLARATION(FloatRegister, F3_ARG3, F3); // volatile
REGISTER_DECLARATION(FloatRegister, F4_ARG4, F4); // volatile
REGISTER_DECLARATION(FloatRegister, F5_ARG5, F5); // volatile
REGISTER_DECLARATION(FloatRegister, F6_ARG6, F6); // volatile
REGISTER_DECLARATION(FloatRegister, F7_ARG7, F7); // volatile
REGISTER_DECLARATION(FloatRegister, F8_ARG8, F8); // volatile
REGISTER_DECLARATION(FloatRegister, F9_ARG9, F9); // volatile
REGISTER_DECLARATION(FloatRegister, F10_ARG10, F10); // volatile
REGISTER_DECLARATION(FloatRegister, F11_ARG11, F11); // volatile
REGISTER_DECLARATION(FloatRegister, F12_ARG12, F12); // volatile
REGISTER_DECLARATION(FloatRegister, F13_ARG13, F13); // volatile
#ifndef DONT_USE_REGISTER_DEFINES
#define R0_SCRATCH AS_REGISTER(Register, R0)
#define R1_SP AS_REGISTER(Register, R1)
#define R2_TOC AS_REGISTER(Register, R2)
#define R3_RET AS_REGISTER(Register, R3)
#define R3_ARG1 AS_REGISTER(Register, R3)
#define R4_ARG2 AS_REGISTER(Register, R4)
#define R5_ARG3 AS_REGISTER(Register, R5)
#define R6_ARG4 AS_REGISTER(Register, R6)
#define R7_ARG5 AS_REGISTER(Register, R7)
#define R8_ARG6 AS_REGISTER(Register, R8)
#define R9_ARG7 AS_REGISTER(Register, R9)
#define R10_ARG8 AS_REGISTER(Register, R10)
#define F0_SCRATCH AS_REGISTER(FloatRegister, F0)
#define F1_RET AS_REGISTER(FloatRegister, F1)
#define F1_ARG1 AS_REGISTER(FloatRegister, F1)
#define F2_ARG2 AS_REGISTER(FloatRegister, F2)
#define F3_ARG3 AS_REGISTER(FloatRegister, F3)
#define F4_ARG4 AS_REGISTER(FloatRegister, F4)
#define F5_ARG5 AS_REGISTER(FloatRegister, F5)
#define F6_ARG6 AS_REGISTER(FloatRegister, F6)
#define F7_ARG7 AS_REGISTER(FloatRegister, F7)
#define F8_ARG8 AS_REGISTER(FloatRegister, F8)
#define F9_ARG9 AS_REGISTER(FloatRegister, F9)
#define F10_ARG10 AS_REGISTER(FloatRegister, F10)
#define F11_ARG11 AS_REGISTER(FloatRegister, F11)
#define F12_ARG12 AS_REGISTER(FloatRegister, F12)
#define F13_ARG13 AS_REGISTER(FloatRegister, F13)
#endif
// Register declarations to be used in frame manager assembly code.
// Use only non-volatile registers in order to keep values across C-calls.
REGISTER_DECLARATION(Register, R14_state, R14); // address of new cInterpreter.
REGISTER_DECLARATION(Register, R15_prev_state, R15); // address of old cInterpreter
REGISTER_DECLARATION(Register, R16_thread, R16); // address of current thread
REGISTER_DECLARATION(Register, R17_tos, R17); // address of Java tos (prepushed).
REGISTER_DECLARATION(Register, R18_locals, R18); // address of first param slot (receiver).
REGISTER_DECLARATION(Register, R19_method, R19); // address of current method
#ifndef DONT_USE_REGISTER_DEFINES
#define R14_state AS_REGISTER(Register, R14)
#define R15_prev_state AS_REGISTER(Register, R15)
#define R16_thread AS_REGISTER(Register, R16)
#define R17_tos AS_REGISTER(Register, R17)
#define R18_locals AS_REGISTER(Register, R18)
#define R19_method AS_REGISTER(Register, R19)
#define R21_sender_SP AS_REGISTER(Register, R21)
#define R23_method_handle AS_REGISTER(Register, R23)
#endif
// Temporary registers to be used within frame manager. We can use
// the non-volatiles because the call stub has saved them.
// Use only non-volatile registers in order to keep values across C-calls.
REGISTER_DECLARATION(Register, R21_tmp1, R21);
REGISTER_DECLARATION(Register, R22_tmp2, R22);
REGISTER_DECLARATION(Register, R23_tmp3, R23);
REGISTER_DECLARATION(Register, R24_tmp4, R24);
REGISTER_DECLARATION(Register, R25_tmp5, R25);
REGISTER_DECLARATION(Register, R26_tmp6, R26);
REGISTER_DECLARATION(Register, R27_tmp7, R27);
REGISTER_DECLARATION(Register, R28_tmp8, R28);
REGISTER_DECLARATION(Register, R29_tmp9, R29);
#ifndef DONT_USE_REGISTER_DEFINES
#define R21_tmp1 AS_REGISTER(Register, R21)
#define R22_tmp2 AS_REGISTER(Register, R22)
#define R23_tmp3 AS_REGISTER(Register, R23)
#define R24_tmp4 AS_REGISTER(Register, R24)
#define R25_tmp5 AS_REGISTER(Register, R25)
#define R26_tmp6 AS_REGISTER(Register, R26)
#define R27_tmp7 AS_REGISTER(Register, R27)
#define R28_tmp8 AS_REGISTER(Register, R28)
#define R29_tmp9 AS_REGISTER(Register, R29)
#define CCR4_is_synced AS_REGISTER(ConditionRegister, CCR4)
#endif
// Scratch registers are volatile.
REGISTER_DECLARATION(Register, R11_scratch1, R11);
REGISTER_DECLARATION(Register, R12_scratch2, R12);
#ifndef DONT_USE_REGISTER_DEFINES
#define R11_scratch1 AS_REGISTER(Register, R11)
#define R12_scratch2 AS_REGISTER(Register, R12)
#endif
#endif // CPU_PPC_VM_REGISTER_PPC_HPP

View File

@ -0,0 +1,133 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "assembler_ppc.inline.hpp"
#include "code/relocInfo.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
bool copy_back_to_oop_pool = true; // TODO: PPC port
// The following comment is from the declaration of DataRelocation:
//
// "The "o" (displacement) argument is relevant only to split relocations
// on RISC machines. In some CPUs (SPARC), the set-hi and set-lo ins'ns
// can encode more than 32 bits between them. This allows compilers to
// share set-hi instructions between addresses that differ by a small
// offset (e.g., different static variables in the same class).
// On such machines, the "x" argument to set_value on all set-lo
// instructions must be the same as the "x" argument for the
// corresponding set-hi instructions. The "o" arguments for the
// set-hi instructions are ignored, and must not affect the high-half
// immediate constant. The "o" arguments for the set-lo instructions are
// added into the low-half immediate constant, and must not overflow it."
//
// Currently we don't support splitting of relocations, so o must be
// zero:
assert(o == 0, "tried to split relocations");
if (!verify_only) {
if (format() != 1) {
nativeMovConstReg_at(addr())->set_data_plain(((intptr_t)x), code());
} else {
assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type,
"how to encode else?");
narrowOop no = (type() == relocInfo::oop_type) ?
oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x);
nativeMovConstReg_at(addr())->set_narrow_oop(no, code());
}
} else {
assert((address) (nativeMovConstReg_at(addr())->data()) == x, "data must match");
}
}
address Relocation::pd_call_destination(address orig_addr) {
intptr_t adj = 0;
address inst_loc = addr();
if (orig_addr != NULL) {
// We just moved this call instruction from orig_addr to addr().
// This means its target will appear to have grown by addr() - orig_addr.
adj = -(inst_loc - orig_addr);
}
if (NativeFarCall::is_far_call_at(inst_loc)) {
NativeFarCall* call = nativeFarCall_at(inst_loc);
return call->destination() + (intptr_t)(call->is_pcrelative() ? adj : 0);
} else if (NativeJump::is_jump_at(inst_loc)) {
NativeJump* jump = nativeJump_at(inst_loc);
return jump->jump_destination() + (intptr_t)(jump->is_pcrelative() ? adj : 0);
} else if (NativeConditionalFarBranch::is_conditional_far_branch_at(inst_loc)) {
NativeConditionalFarBranch* branch = NativeConditionalFarBranch_at(inst_loc);
return branch->branch_destination();
} else {
// There are two instructions at the beginning of a stub, therefore we
// load at orig_addr + 8.
orig_addr = nativeCall_at(inst_loc)->get_trampoline();
if (orig_addr == NULL) {
return (address) -1;
} else {
return (address) nativeMovConstReg_at(orig_addr + 8)->data();
}
}
}
void Relocation::pd_set_call_destination(address x) {
address inst_loc = addr();
if (NativeFarCall::is_far_call_at(inst_loc)) {
NativeFarCall* call = nativeFarCall_at(inst_loc);
call->set_destination(x);
} else if (NativeJump::is_jump_at(inst_loc)) {
NativeJump* jump= nativeJump_at(inst_loc);
jump->set_jump_destination(x);
} else if (NativeConditionalFarBranch::is_conditional_far_branch_at(inst_loc)) {
NativeConditionalFarBranch* branch = NativeConditionalFarBranch_at(inst_loc);
branch->set_branch_destination(x);
} else {
NativeCall* call = nativeCall_at(inst_loc);
call->set_destination_mt_safe(x, false);
}
}
address* Relocation::pd_address_in_code() {
ShouldNotReachHere();
return 0;
}
address Relocation::pd_get_address_from_code() {
return (address)(nativeMovConstReg_at(addr())->data());
}
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
}
void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
}
void metadata_Relocation::pd_fix_value(address x) {
}

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_PPC_VM_RELOCINFO_PPC_HPP
#define CPU_PPC_VM_RELOCINFO_PPC_HPP
// machine-dependent parts of class relocInfo
private:
enum {
// Since Power instructions are whole words,
// the two low-order offset bits can always be discarded.
offset_unit = 4,
// There is no need for format bits; the instructions are
// sufficiently self-identifying.
#ifndef _LP64
format_width = 0
#else
// Except narrow oops in 64-bits VM.
format_width = 1
#endif
};
#endif // CPU_PPC_VM_RELOCINFO_PPC_HPP

View File

@ -0,0 +1,183 @@
/*
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#ifdef COMPILER2
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_ppc.hpp"
#include "opto/runtime.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/globalDefinitions.hpp"
#include "vmreg_ppc.inline.hpp"
#endif
#define __ masm->
#ifdef COMPILER2
// SP adjustment (must use unextended SP) for method handle call sites
// during exception handling.
static intptr_t adjust_SP_for_methodhandle_callsite(JavaThread *thread) {
RegisterMap map(thread, false);
// The frame constructor will do the correction for us (see frame::adjust_unextended_SP).
frame mh_caller_frame = thread->last_frame().sender(&map);
assert(mh_caller_frame.is_compiled_frame(), "Only may reach here for compiled MH call sites");
return (intptr_t) mh_caller_frame.unextended_sp();
}
//------------------------------generate_exception_blob---------------------------
// Creates exception blob at the end.
// Using exception blob, this code is jumped from a compiled method.
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state
// (i.e. callee save registers) unwind the frame and jump to the
// exception handler for the nmethod if there is no Java level handler
// for the nmethod.
//
// This code is entered with a jmp.
//
// Arguments:
// R3_ARG1: exception oop
// R4_ARG2: exception pc
//
// Results:
// R3_ARG1: exception oop
// R4_ARG2: exception pc in caller
// destination: exception handler of caller
//
// Note: the exception pc MUST be at a call (precise debug information)
//
void OptoRuntime::generate_exception_blob() {
// Allocate space for the code.
ResourceMark rm;
// Setup code generation tools.
CodeBuffer buffer("exception_blob", 2048, 1024);
InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
address start = __ pc();
int frame_size_in_bytes = frame::abi_112_size;
OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
// Exception pc is 'return address' for stack walker.
__ std(R4_ARG2/*exception pc*/, _abi(lr), R1_SP);
// Store the exception in the Thread object.
__ std(R3_ARG1/*exception oop*/, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
__ std(R4_ARG2/*exception pc*/, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
// Save callee-saved registers.
// Push a C frame for the exception blob. It is needed for the C call later on.
__ push_frame_abi112(0, R11_scratch1);
// This call does all the hard work. It checks if an exception handler
// exists in the method.
// If so, it returns the handler address.
// If not, it prepares for stack-unwinding, restoring the callee-save
// registers of the frame being removed.
__ set_last_Java_frame(/*sp=*/R1_SP, noreg);
__ mr(R3_ARG1, R16_thread);
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, OptoRuntime::handle_exception_C),
relocInfo::none);
address calls_return_pc = __ last_calls_return_pc();
# ifdef ASSERT
__ cmpdi(CCR0, R3_RET, 0);
__ asm_assert_ne("handle_exception_C must not return NULL", 0x601);
# endif
// Set an oopmap for the call site. This oopmap will only be used if we
// are unwinding the stack. Hence, all locations will be dead.
// Callee-saved registers will be the same as the frame above (i.e.,
// handle_exception_stub), since they were restored when we got the
// exception.
OopMapSet* oop_maps = new OopMapSet();
oop_maps->add_gc_map(calls_return_pc - start, map);
// Get unextended_sp for method handle call sites.
Label mh_callsite, mh_done; // Use a 2nd c call if it's a method handle call site.
__ lwa(R4_ARG2, in_bytes(JavaThread::is_method_handle_return_offset()), R16_thread);
__ cmpwi(CCR0, R4_ARG2, 0);
__ bne(CCR0, mh_callsite);
__ mtctr(R3_RET); // Move address of exception handler to SR_CTR.
__ reset_last_Java_frame();
__ pop_frame();
__ bind(mh_done);
// We have a handler in register SR_CTR (could be deopt blob).
// Get the exception oop.
__ ld(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
// Get the exception pc in case we are deoptimized.
__ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
// Reset thread values.
__ li(R0, 0);
#ifdef ASSERT
__ std(R0, in_bytes(JavaThread::exception_handler_pc_offset()), R16_thread);
__ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
#endif
// Clear the exception oop so GC no longer processes it as a root.
__ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
// Move exception pc into SR_LR.
__ mtlr(R4_ARG2);
__ bctr();
// Same as above, but also set sp to unextended_sp.
__ bind(mh_callsite);
__ mr(R31, R3_RET); // Save branch address.
__ mr(R3_ARG1, R16_thread);
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, adjust_SP_for_methodhandle_callsite), relocInfo::none);
// Returns unextended_sp in R3_RET.
__ mtctr(R31); // Move address of exception handler to SR_CTR.
__ reset_last_Java_frame();
__ mr(R1_SP, R3_RET); // Set sp to unextended_sp.
__ b(mh_done);
// Make sure all code is generated.
masm->flush();
// Set exception blob.
_exception_blob = ExceptionBlob::create(&buffer, oop_maps,
frame_size_in_bytes/wordSize);
}
#endif // COMPILER2

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More