This commit is contained in:
Lana Steuck 2012-02-09 19:42:39 -08:00
commit 38d86a4aa3
256 changed files with 10185 additions and 3827 deletions

View File

@ -145,3 +145,5 @@ f0eccb2946986fb9626efde7d8ed9c8192623f5c jdk8-b17
b3a426170188f52981cf4573a2f14d487fddab0d jdk8-b21
e8f03541af27e38aafb619b96863e17f65ffe53b jdk8-b22
498124337041ad53cbaa7eb110f3d7acd6d4eac4 jdk8-b23
7d3720d8c595d1519c31e9ff7366203fc2c61350 jdk8-b24
0071a6d64113a35ba345bb1580c256de5ce17d3e jdk8-b25

View File

@ -145,3 +145,5 @@ a4f28069d44a379cda99dd1d921d19f819726d22 jdk8-b15
cc771d92284f71765eca14d6d08703c4af254c04 jdk8-b21
7ad075c809952e355d25030605da6af30456ed74 jdk8-b22
60d6f64a86b1e511169d264727f6d51415978df0 jdk8-b23
1a5f1d6b98d6827cdb529a4abe6e52a886d944f4 jdk8-b24
221a378e06a326f45e5d89e2123cd6323e0181d1 jdk8-b25

View File

@ -145,3 +145,5 @@ e1366c5d84ef984095a332bcee70b3938232d07d jdk8-b19
f157fc2a71a38ce44007a6f18d5b011824dce705 jdk8-b21
a11d0062c445d5f36651c78650ab88aa594bcbff jdk8-b22
5218eb256658442b62b05295aafa5b5f35252972 jdk8-b23
b98f0e6dddf987df565029a1f58417fc1844c3f3 jdk8-b24
e45d6b406d5f91ff5256a5c82456ab1e7eb8becd jdk8-b25

View File

@ -215,3 +215,8 @@ fe2c8764998112b7fefcd7d41599714813ae4327 jdk8-b20
dcc292399a39113957eebbd3e487b7e05e2c79fc hs23-b11
e850d8e7ea54b91c7aa656e297f0f9f38dd4c296 jdk8-b23
9e177d44b10fe92ecffa965fef9c5ac5433c1b46 hs23-b12
a80fd4f45d7aaa154ed2f86a129f3c9c4035ec7a jdk8-b24
b22de824749922986ce4d442bed029916b832807 hs23-b13
64b46f975ab82948c1e021e17775ff4fab8bc40e hs23-b14
9ad8feb5afbddec46d3cfe29fb5f73c2e99d5a43 jdk8-b25
d71e662fe03741b6de498ca2077220148405a978 hs23-b15

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,15 +42,6 @@ public class LoaderConstraintTable extends TwoOopHashtable {
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("LoaderConstraintTable");
nofBuckets = db.lookupIntConstant("LoaderConstraintTable::_nof_buckets").intValue();
}
// Fields
private static int nofBuckets;
// Accessors
public static int getNumOfBuckets() {
return nofBuckets;
}
public LoaderConstraintTable(Address addr) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,7 +36,6 @@ public class SystemDictionary {
private static AddressField placeholdersField;
private static AddressField loaderConstraintTableField;
private static sun.jvm.hotspot.types.OopField javaSystemLoaderField;
private static int nofBuckets;
private static sun.jvm.hotspot.types.OopField objectKlassField;
private static sun.jvm.hotspot.types.OopField classLoaderKlassField;
@ -62,7 +61,6 @@ public class SystemDictionary {
placeholdersField = type.getAddressField("_placeholders");
loaderConstraintTableField = type.getAddressField("_loader_constraints");
javaSystemLoaderField = type.getOopField("_java_system_loader");
nofBuckets = db.lookupIntConstant("SystemDictionary::_nof_buckets").intValue();
objectKlassField = type.getOopField(WK_KLASS("Object_klass"));
classLoaderKlassField = type.getOopField(WK_KLASS("ClassLoader_klass"));
@ -142,10 +140,6 @@ public class SystemDictionary {
return newOop(javaSystemLoaderField.getValue());
}
public static int getNumOfBuckets() {
return nofBuckets;
}
private static Oop newOop(OopHandle handle) {
return VM.getVM().getObjectHeap().newOop(handle);
}

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -89,19 +89,31 @@ KERNEL_VM_TARGETS=productkernel fastdebugkernel optimizedkernel jvmgkernel
ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero jvmgzero
SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark jvmgshark
COMMON_VM_PRODUCT_TARGETS=product product1 productkernel docs export_product
COMMON_VM_FASTDEBUG_TARGETS=fastdebug fastdebug1 fastdebugkernel docs export_fastdebug
COMMON_VM_DEBUG_TARGETS=jvmg jvmg1 jvmgkernel docs export_debug
# JDK directory list
JDK_DIRS=bin include jre lib demo
all: all_product all_fastdebug
ifndef BUILD_CLIENT_ONLY
all_product: product product1 productkernel docs export_product
all_fastdebug: fastdebug fastdebug1 fastdebugkernel docs export_fastdebug
all_debug: jvmg jvmg1 jvmgkernel docs export_debug
else
ifdef BUILD_CLIENT_ONLY
all_product: product1 docs export_product
all_fastdebug: fastdebug1 docs export_fastdebug
all_debug: jvmg1 docs export_debug
else
ifeq ($(MACOSX_UNIVERSAL),true)
all_product: universal_product
all_fastdebug: universal_fastdebug
all_debug: universal_debug
else
all_product: $(COMMON_VM_PRODUCT_TARGETS)
all_fastdebug: $(COMMON_VM_FASTDEBUG_TARGETS)
all_debug: $(COMMON_VM_DEBUG_TARGETS)
endif
endif
all_optimized: optimized optimized1 optimizedkernel docs export_optimized
allzero: all_productzero all_fastdebugzero
@ -232,20 +244,19 @@ export_debug:
$(MAKE) VM_SUBDIR=${VM_DEBUG} EXPORT_SUBDIR=/debug generic_export
export_optimized:
$(MAKE) VM_SUBDIR=optimized EXPORT_SUBDIR=/optimized generic_export
export_product_jdk:
export_product_jdk::
$(MAKE) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) \
VM_SUBDIR=product generic_export
export_optimized_jdk:
export_optimized_jdk::
$(MAKE) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) \
VM_SUBDIR=optimized generic_export
export_fastdebug_jdk:
export_fastdebug_jdk::
$(MAKE) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/fastdebug \
VM_SUBDIR=fastdebug generic_export
export_debug_jdk:
export_debug_jdk::
$(MAKE) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/debug \
VM_SUBDIR=${VM_DEBUG} generic_export
# Export file copy rules
XUSAGE=$(HS_SRC_DIR)/share/vm/Xusage.txt
DOCS_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_docs
@ -444,14 +455,14 @@ test_jdk:
endif
$(JDK_IMAGE_DIR)/bin/java -server -version
copy_product_jdk:
copy_product_jdk::
$(RM) -r $(JDK_IMAGE_DIR)
$(MKDIR) -p $(JDK_IMAGE_DIR)
($(CD) $(JDK_IMPORT_PATH) && \
$(TAR) -cf - $(JDK_DIRS)) | \
($(CD) $(JDK_IMAGE_DIR) && $(TAR) -xf -)
copy_fastdebug_jdk:
copy_fastdebug_jdk::
$(RM) -r $(JDK_IMAGE_DIR)/fastdebug
$(MKDIR) -p $(JDK_IMAGE_DIR)/fastdebug
if [ -d $(JDK_IMPORT_PATH)/fastdebug ] ; then \
@ -464,7 +475,7 @@ copy_fastdebug_jdk:
($(CD) $(JDK_IMAGE_DIR)/fastdebug && $(TAR) -xf -) ; \
fi
copy_debug_jdk:
copy_debug_jdk::
$(RM) -r $(JDK_IMAGE_DIR)/debug
$(MKDIR) -p $(JDK_IMAGE_DIR)/debug
if [ -d $(JDK_IMPORT_PATH)/debug ] ; then \
@ -481,36 +492,6 @@ copy_debug_jdk:
($(CD) $(JDK_IMAGE_DIR)/debug && $(TAR) -xf -) ; \
fi
# macosx universal builds
ifeq ($(MACOSX_UNIVERSAL), true)
$(UNIVERSAL_LIPO_LIST):
lipo -create -output $@ $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@)
$(UNIVERSAL_COPY_LIST):
$(CP) $(EXPORT_JRE_LIB_DIR)/i386/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) $@
universalize: $(UNIVERSAL_LIPO_LIST) $(UNIVERSAL_COPY_LIST)
endif
universal_product:
$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 MACOSX_UNIVERSAL=true all_product
$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 MACOSX_UNIVERSAL=true all_product
$(MKDIR) -p $(EXPORT_JRE_LIB_DIR)/{client,server}
$(QUIETLY) $(MAKE) MACOSX_UNIVERSAL=true universalize
universal_fastdebug:
$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 MACOSX_UNIVERSAL=true all_fastdebug
$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 MACOSX_UNIVERSAL=true all_fastdebug
$(MKDIR) -p $(EXPORT_JRE_LIB_DIR)/{client,server}
$(QUIETLY) $(MAKE) MACOSX_UNIVERSAL=true universalize
universal_debug:
$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 MACOSX_UNIVERSAL=true all_debug
$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 MACOSX_UNIVERSAL=true all_debug
$(MKDIR) -p $(EXPORT_JRE_LIB_DIR)/{client,server}
$(QUIETLY) $(MAKE) MACOSX_UNIVERSAL=true universalize
#
# Check target
#
@ -630,6 +611,13 @@ examples_help:
@$(ECHO) \
" $(MAKE) ALT_JDK_IMPORT_PATH=/opt/java/jdk$(JDK_VERSION)"
# Universal build support
ifeq ($(OS_VENDOR), Darwin)
ifeq ($(MACOSX_UNIVERSAL),true)
include $(GAMMADIR)/make/$(OSNAME)/makefiles/universal.gmk
endif
endif
# JPRT rule to build this workspace
include $(GAMMADIR)/make/jprt.gmk
@ -639,6 +627,4 @@ include $(GAMMADIR)/make/jprt.gmk
export_product export_fastdebug export_debug export_optimized \
export_jdk_product export_jdk_fastdebug export_jdk_debug \
create_jdk copy_jdk update_jdk test_jdk \
copy_product_jdk copy_fastdebug_jdk copy_debug_jdk universalize \
universal_product
copy_product_jdk copy_fastdebug_jdk copy_debug_jdk

View File

@ -61,10 +61,10 @@ Src_Dirs_I += $(GAMMADIR)/src/share/vm/adlc $(GENERATED)
INCLUDES += $(Src_Dirs_I:%=-I%)
# set flags for adlc compilation
CPPFLAGS = $(SYSDEFS) $(INCLUDES)
CXXFLAGS = $(SYSDEFS) $(INCLUDES)
# Force assertions on.
CPPFLAGS += -DASSERT
CXXFLAGS += -DASSERT
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
# Compiler warnings are treated as errors
@ -111,7 +111,7 @@ all: $(EXEC)
$(EXEC) : $(OBJECTS)
@echo Making adlc
$(QUIETLY) $(HOST.LINK_NOPROF.CC) -o $(EXEC) $(OBJECTS)
$(QUIETLY) $(HOST.LINK_NOPROF.CXX) -o $(EXEC) $(OBJECTS)
# Random dependencies:
$(OBJECTS): opcodes.hpp classes.hpp adlc.hpp adlcVMDeps.hpp adlparse.hpp archDesc.hpp arena.hpp dict2.hpp filebuff.hpp forms.hpp formsopt.hpp formssel.hpp
@ -213,14 +213,14 @@ PROCESS_AD_FILES = awk '{ \
$(OUTDIR)/%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(HOST.COMPILE.CC) -o $@ $< $(COMPILE_DONE)
$(QUIETLY) $(HOST.COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
# Some object files are given a prefix, to disambiguate
# them from objects of the same name built for the VM.
$(OUTDIR)/adlc-%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(HOST.COMPILE.CC) -o $@ $< $(COMPILE_DONE)
$(QUIETLY) $(HOST.COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
# #########################################################################

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -171,10 +171,36 @@ ADD_SA_BINARIES/zero =
EXPORT_LIST += $(ADD_SA_BINARIES/$(HS_ARCH))
UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX)
UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX)
# Universal build settings
ifeq ($(OS_VENDOR), Darwin)
# Build universal binaries by default on Mac OS X
MACOSX_UNIVERSAL = true
ifneq ($(ALT_MACOSX_UNIVERSAL),)
MACOSX_UNIVERSAL = $(ALT_MACOSX_UNIVERSAL)
endif
MAKE_ARGS += MACOSX_UNIVERSAL=$(MACOSX_UNIVERSAL)
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/Xusage.txt
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/Xusage.txt
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX)
# Universal settings
ifeq ($(MACOSX_UNIVERSAL), true)
# Set universal export path but avoid using ARCH or PLATFORM subdirs
EXPORT_PATH=$(OUTPUTDIR)/export-universal$(EXPORT_SUBDIR)
ifneq ($(ALT_EXPORT_PATH),)
EXPORT_PATH=$(ALT_EXPORT_PATH)
endif
# Set universal image dir
JDK_IMAGE_DIR=$(OUTPUTDIR)/jdk-universal$(EXPORT_SUBDIR)
# Binaries to 'universalize' if built
UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX)
UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX)
UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX)
# Files to simply copy in place
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/Xusage.txt
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/Xusage.txt
endif
endif

View File

@ -105,11 +105,11 @@ LFLAGS_GENOFFS += -L.
lib$(GENOFFS).dylib: $(DTRACE_SRCDIR)/$(GENOFFS).cpp $(DTRACE_SRCDIR)/$(GENOFFS).h \
$(LIBJVM.o)
$(QUIETLY) $(CCC) $(CPPFLAGS) $(GENOFFS_CFLAGS) $(SHARED_FLAG) $(PICFLAG) \
$(QUIETLY) $(CXX) $(CXXFLAGS) $(GENOFFS_CFLAGS) $(SHARED_FLAG) $(PICFLAG) \
$(LFLAGS_GENOFFS) -o $@ $(DTRACE_SRCDIR)/$(GENOFFS).cpp -ljvm
$(GENOFFS): $(DTRACE_SRCDIR)/$(GENOFFS)Main.c lib$(GENOFFS).dylib
$(QUIETLY) $(LINK.CC) -o $@ $(DTRACE_SRCDIR)/$(GENOFFS)Main.c \
$(QUIETLY) $(LINK.CXX) -o $@ $(DTRACE_SRCDIR)/$(GENOFFS)Main.c \
./lib$(GENOFFS).dylib
# $@.tmp is created first to avoid an empty $(JVMOFFS).h if an error occurs.
@ -135,7 +135,7 @@ $(JVMOFFS).cpp: $(GENOFFS) $(JVMOFFS).h $(JVMOFFS)Index.h
fi
$(JVMOFFS.o): $(JVMOFFS).h $(JVMOFFS).cpp
$(QUIETLY) $(CCC) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp
$(QUIETLY) $(CXX) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp
$(LIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_DB_MAPFILE)
@echo Making $@

View File

@ -25,20 +25,19 @@
OS_VENDOR = $(shell uname -s)
#------------------------------------------------------------------------
# CC, CPP & AS
# CC, CXX & AS
# When cross-compiling the ALT_COMPILER_PATH points
# to the cross-compilation toolset
ifdef CROSS_COMPILE_ARCH
CPP = $(ALT_COMPILER_PATH)/g++
CXX = $(ALT_COMPILER_PATH)/g++
CC = $(ALT_COMPILER_PATH)/gcc
HOSTCPP = g++
HOSTCXX = g++
HOSTCC = gcc
else ifneq ($(OS_VENDOR), Darwin)
CXX = g++
CPP = $(CXX)
CC = gcc
HOSTCPP = $(CPP)
HOSTCXX = $(CXX)
HOSTCC = $(CC)
endif
@ -53,7 +52,6 @@ ifeq ($(OS_VENDOR), Darwin)
ifeq ($(origin CC), default)
CC = llvm-gcc
endif
CPP = $(CXX)
ifeq ($(ARCH), i486)
LLVM_SUPPORTS_STACKREALIGN := $(shell \
@ -67,11 +65,11 @@ ifeq ($(OS_VENDOR), Darwin)
CXX32 ?= g++-4.0
CC32 ?= gcc-4.0
endif
CPP = $(CXX32)
CXX = $(CXX32)
CC = $(CC32)
endif
HOSTCPP = $(CPP)
HOSTCXX = $(CXX)
HOSTCC = $(CC)
endif

View File

@ -71,10 +71,10 @@ else
LIBS_LAUNCHER += -l$(JVM) $(LIBS)
endif
LINK_LAUNCHER = $(LINK.c)
LINK_LAUNCHER = $(LINK.CC)
LINK_LAUNCHER/PRE_HOOK = $(LINK_LIB.CC/PRE_HOOK)
LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CC/POST_HOOK)
LINK_LAUNCHER/PRE_HOOK = $(LINK_LIB.CXX/PRE_HOOK)
LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CXX/POST_HOOK)
LAUNCHER_OUT = launcher
@ -90,11 +90,11 @@ DEPFILES := $(patsubst %.o,%.d,$(OBJS))
$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR_SHARE)/%.c
$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CPPFLAGS)
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR)/%.c
$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CPPFLAGS)
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
$(LAUNCHER): $(OBJS) $(LIBJVM) $(LAUNCHER_MAPFILE)
$(QUIETLY) echo Linking launcher...

View File

@ -55,4 +55,4 @@ STRIP_LIBJVM = $(STRIP) -g $@ || exit 1;
STRIP_AOUT = $(STRIP) -x $@ || exit 1;
# Don't strip in VM build; JDK build will strip libraries later
# LINK_LIB.CC/POST_HOOK += $(STRIP_$(LINK_INTO))
# LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))

View File

@ -27,52 +27,39 @@
# Tell make that .cpp is important
.SUFFIXES: .cpp $(SUFFIXES)
# For now. Other makefiles use CPP as the c++ compiler, but that should really
# name the preprocessor.
ifeq ($(CCC),)
CCC = $(CPP)
endif
DEMANGLER = c++filt
DEMANGLE = $(DEMANGLER) < $@ > .$@ && mv -f .$@ $@
# $(CC) is the c compiler (cc/gcc), $(CCC) is the c++ compiler (CC/g++).
C_COMPILE = $(CC) $(CPPFLAGS) $(CFLAGS)
CC_COMPILE = $(CCC) $(CPPFLAGS) $(CFLAGS)
# $(CC) is the c compiler (cc/gcc), $(CXX) is the c++ compiler (CC/g++).
CC_COMPILE = $(CC) $(CXXFLAGS) $(CFLAGS)
CXX_COMPILE = $(CXX) $(CXXFLAGS) $(CFLAGS)
AS.S = $(AS) $(ASFLAGS)
COMPILE.c = $(C_COMPILE) -c
GENASM.c = $(C_COMPILE) -S
LINK.c = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
LINK_LIB.c = $(CC) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.c = $(C_COMPILE) -E
COMPILE.CC = $(CC_COMPILE) -c
GENASM.CC = $(CC_COMPILE) -S
LINK.CC = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
LINK_LIB.CC = $(CC) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.CC = $(CC_COMPILE) -E
COMPILE.CC = $(CC_COMPILE) -c
GENASM.CC = $(CC_COMPILE) -S
LINK.CC = $(CCC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
LINK_NOPROF.CC = $(CCC) $(LFLAGS) $(AOUT_FLAGS)
LINK_LIB.CC = $(CCC) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.CC = $(CC_COMPILE) -E
COMPILE.CXX = $(CXX_COMPILE) -c
GENASM.CXX = $(CXX_COMPILE) -S
LINK.CXX = $(CXX) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
LINK_NOPROF.CXX = $(CXX) $(LFLAGS) $(AOUT_FLAGS)
LINK_LIB.CXX = $(CXX) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.CXX = $(CXX_COMPILE) -E
# cross compiling the jvm with c2 requires host compilers to build
# adlc tool
HOST.CC_COMPILE = $(HOSTCPP) $(CPPFLAGS) $(CFLAGS)
HOST.COMPILE.CC = $(HOST.CC_COMPILE) -c
HOST.LINK_NOPROF.CC = $(HOSTCPP) $(LFLAGS) $(AOUT_FLAGS)
HOST.CXX_COMPILE = $(HOSTCXX) $(CXXFLAGS) $(CFLAGS)
HOST.COMPILE.CXX = $(HOST.CXX_COMPILE) -c
HOST.LINK_NOPROF.CXX = $(HOSTCXX) $(LFLAGS) $(AOUT_FLAGS)
# Effect of REMOVE_TARGET is to delete out-of-date files during "gnumake -k".
REMOVE_TARGET = rm -f $@
# Synonyms.
COMPILE.cpp = $(COMPILE.CC)
GENASM.cpp = $(GENASM.CC)
LINK.cpp = $(LINK.CC)
LINK_LIB.cpp = $(LINK_LIB.CC)
PREPROCESS.cpp = $(PREPROCESS.CC)
# Note use of ALT_BOOTDIR to explicitly specify location of java and
# javac; this is the same environment variable used in the J2SE build
# process for overriding the default spec, which is BOOTDIR.
@ -161,14 +148,14 @@ ifdef LP64
%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(COMPILE.CC) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
else
%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(if $(findstring $@, $(NONPIC_OBJ_FILES)), \
$(subst $(VM_PICFLAG), ,$(COMPILE.CC)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
$(COMPILE.CC) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
$(subst $(VM_PICFLAG), ,$(COMPILE.CXX)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
$(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
endif
%.o: %.s
@ -178,13 +165,13 @@ endif
%.s: %.cpp
@echo Generating assembly for $<
$(QUIETLY) $(GENASM.CC) -o $@ $<
$(QUIETLY) $(GENASM.CXX) -o $@ $<
$(QUIETLY) $(DEMANGLE) $(COMPILE_DONE)
# Intermediate files (for debugging macros)
%.i: %.cpp
@echo Preprocessing $< to $@
$(QUIETLY) $(PREPROCESS.CC) $< > $@ $(COMPILE_DONE)
$(QUIETLY) $(PREPROCESS.CXX) $< > $@ $(COMPILE_DONE)
# Override gnumake built-in rules which do sccs get operations badly.
# (They put the checked out code in the current directory, not in the

View File

@ -23,13 +23,13 @@
#
#------------------------------------------------------------------------
# CC, CPP & AS
# CC, CXX & AS
CPP = CC
CXX = CC
CC = cc
AS = $(CC) -c
HOSTCPP = $(CPP)
HOSTCXX = $(CXX)
HOSTCC = $(CC)
ARCHFLAG = $(ARCHFLAG/$(BUILDARCH))

View File

@ -0,0 +1,113 @@
#
# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# macosx universal builds
universal_product:
$(MAKE) MACOSX_UNIVERSAL=true all_product_universal
universal_fastdebug:
$(MAKE) MACOSX_UNIVERSAL=true all_fastdebug_universal
universal_debug:
$(MAKE) MACOSX_UNIVERSAL=true all_debug_universal
# Universal builds include 1 or more architectures in a single binary
all_product_universal:
# $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_PRODUCT_TARGETS)
$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_PRODUCT_TARGETS)
$(QUIETLY) $(MAKE) EXPORT_SUBDIR= universalize
all_fastdebug_universal:
# $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_FASTDEBUG_TARGETS)
$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_FASTDEBUG_TARGETS)
$(QUIETLY) $(MAKE) EXPORT_SUBDIR=/fastdebug universalize
all_debug_universal:
# $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_DEBUG_TARGETS)
$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_DEBUG_TARGETS)
$(QUIETLY) $(MAKE) EXPORT_SUBDIR=/debug universalize
# Consolidate architecture builds into a single Universal binary
universalize: $(UNIVERSAL_LIPO_LIST) $(UNIVERSAL_COPY_LIST)
$(RM) -r $(EXPORT_PATH)/jre/lib/{i386,amd64}
# Package built libraries in a universal binary
$(UNIVERSAL_LIPO_LIST):
BUILT_LIPO_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`"; \
if [ -n "$${BUILT_LIPO_FILES}" ]; then \
$(MKDIR) -p $(shell dirname $@); \
lipo -create -output $@ $${BUILT_LIPO_FILES}; \
fi
# Copy built non-universal binaries in place
$(UNIVERSAL_COPY_LIST):
BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`"; \
if [ -n "$${BUILT_COPY_FILES}" ]; then \
for i in $${BUILT_COPY_FILES}; do \
if [ -f $${i} ]; then \
$(MKDIR) -p $(shell dirname $@); \
$(CP) $${i} $@; \
fi; \
done; \
fi
# Replace arch specific binaries with universal binaries
export_universal:
$(RM) -r $(EXPORT_PATH)/jre/lib/{i386,amd64}
$(RM) -r $(JDK_IMAGE_DIR)/jre/lib/{i386,amd64}
$(RM) $(JDK_IMAGE_DIR)/jre/lib/{client,server}/libjsig.$(LIBRARY_SUFFIX)
($(CD) $(EXPORT_PATH) && \
$(TAR) -cf - *) | \
($(CD) $(JDK_IMAGE_DIR) && $(TAR) -xpf -)
# Overlay universal binaries
copy_universal:
$(RM) -r $(JDK_IMAGE_DIR)$(COPY_SUBDIR)/jre/lib/{i386,amd64}
$(RM) $(JDK_IMAGE_DIR)$(COPY_SUBDIR)/jre/lib/{client,server}/libjsig.$(LIBRARY_SUFFIX)
($(CD) $(EXPORT_PATH)$(COPY_SUBDIR) && \
$(TAR) -cf - *) | \
($(CD) $(JDK_IMAGE_DIR)$(COPY_SUBDIR) && $(TAR) -xpf -)
# Additional processing for universal builds
export_product_jdk::
$(MAKE) EXPORT_SUBDIR= export_universal
export_optimized_jdk::
$(MAKE) EXPORT_SUBDIR= export_universal
export_fastdebug_jdk::
$(MAKE) EXPORT_SUBDIR=/fastdebug export_universal
export_debug_jdk::
$(MAKE) EXPORT_SUBDIR=/debug export_universal
copy_product_jdk::
$(MAKE) COPY_SUBDIR= copy_universal
copy_fastdebug_jdk::
$(MAKE) COPY_SUBDIR=/fastdebug copy_universal
copy_debug_jdk::
$(MAKE) COPY_SUBDIR=/debug copy_universal
.PHONY: universal_product universal_fastdebug universal_debug \
all_product_universal all_fastdebug_universal all_debug_universal \
universalize export_universal copy_universal

View File

@ -82,18 +82,22 @@ BUILD_TARGET = -DHOTSPOT_BUILD_TARGET="\"$(TARGET)\""
BUILD_USER = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\""
VM_DISTRO = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\""
CPPFLAGS = \
CXXFLAGS = \
${SYSDEFS} \
${INCLUDES} \
${BUILD_VERSION} \
${BUILD_TARGET} \
${BUILD_USER} \
${HS_LIB_ARCH} \
${JRE_VERSION} \
${VM_DISTRO}
# This is VERY important! The version define must only be supplied to vm_version.o
# If not, ccache will not re-use the cache at all, since the version string might contain
# a time and date.
vm_version.o: CXXFLAGS += ${JRE_VERSION}
ifdef DEFAULT_LIBPATH
CPPFLAGS += -DDEFAULT_LIBPATH="\"$(DEFAULT_LIBPATH)\""
CXXFLAGS += -DDEFAULT_LIBPATH="\"$(DEFAULT_LIBPATH)\""
endif
ifndef JAVASE_EMBEDDED
@ -260,9 +264,9 @@ else
ifeq ($(STATIC_CXX), true)
LFLAGS_VM += $(STATIC_LIBGCC)
LIBS_VM += $(STATIC_STDCXX)
LINK_VM = $(LINK_LIB.c)
else
LINK_VM = $(LINK_LIB.CC)
else
LINK_VM = $(LINK_LIB.CXX)
endif
LIBS_VM += $(LIBS)
@ -280,7 +284,7 @@ endif
$(PRECOMPILED_HEADER):
$(QUIETLY) echo Generating precompiled header $@
$(QUIETLY) mkdir -p $(PRECOMPILED_HEADER_DIR)
$(QUIETLY) $(COMPILE.CC) $(DEPFLAGS) -x c++-header $(PRECOMPILED_HEADER_SRC) -o $@ $(COMPILE_DONE)
$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -x c++-header $(PRECOMPILED_HEADER_SRC) -o $@ $(COMPILE_DONE)
# making the library:
@ -305,10 +309,10 @@ endif
$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
$(QUIETLY) { \
echo Linking vm...; \
$(LINK_LIB.CC/PRE_HOOK) \
$(LINK_LIB.CXX/PRE_HOOK) \
$(LINK_VM) $(LD_SCRIPT_FLAG) \
$(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \
$(LINK_LIB.CC/POST_HOOK) \
$(LINK_LIB.CXX/POST_HOOK) \
rm -f $@.1; ln -s $@ $@.1; \
[ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
}

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -185,6 +185,15 @@ ifneq ($(ALT_BOOTDIR),)
BOOTDIR=$(ALT_BOOTDIR)
endif
# Select name of the export directory and honor ALT overrides
EXPORT_PATH=$(OUTPUTDIR)/export-$(PLATFORM)$(EXPORT_SUBDIR)
ifneq ($(ALT_EXPORT_PATH),)
EXPORT_PATH=$(ALT_EXPORT_PATH)
endif
# Default jdk image if one is created for you with create_jdk
JDK_IMAGE_DIR=$(OUTPUTDIR)/jdk-$(PLATFORM)
# The platform dependent defs.make defines platform specific variable such
# as ARCH, EXPORT_LIST etc. We must place the include here after BOOTDIR is defined.
include $(GAMMADIR)/make/$(OSNAME)/makefiles/defs.make
@ -263,15 +272,6 @@ MAKE_ARGS += JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
# includes this make/defs.make file.
MAKE_ARGS += HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION)
# Select name of export directory
EXPORT_PATH=$(OUTPUTDIR)/export-$(PLATFORM)$(EXPORT_SUBDIR)
ifneq ($(ALT_EXPORT_PATH),)
EXPORT_PATH=$(ALT_EXPORT_PATH)
endif
# Default jdk image if one is created for you with create_jdk
JDK_IMAGE_DIR=$(OUTPUTDIR)/jdk-$(PLATFORM)
# Various export sub directories
EXPORT_INCLUDE_DIR = $(EXPORT_PATH)/include
EXPORT_DOCS_DIR = $(EXPORT_PATH)/docs

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
HS_MAJOR_VER=23
HS_MINOR_VER=0
HS_BUILD_NUMBER=12
HS_BUILD_NUMBER=15
JDK_MAJOR_VER=1
JDK_MINOR_VER=8

View File

@ -438,12 +438,12 @@ jprt.my.macosx.x64.test.targets = \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC
# ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default, \
# ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
# ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
# ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
# ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC
jprt.my.windows.i586.test.targets = \
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \

View File

@ -61,10 +61,10 @@ Src_Dirs_I += $(GAMMADIR)/src/share/vm/adlc $(GENERATED)
INCLUDES += $(Src_Dirs_I:%=-I%)
# set flags for adlc compilation
CPPFLAGS = $(SYSDEFS) $(INCLUDES)
CXXFLAGS = $(SYSDEFS) $(INCLUDES)
# Force assertions on.
CPPFLAGS += -DASSERT
CXXFLAGS += -DASSERT
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
# Compiler warnings are treated as errors
@ -109,7 +109,7 @@ all: $(EXEC)
$(EXEC) : $(OBJECTS)
@echo Making adlc
$(QUIETLY) $(HOST.LINK_NOPROF.CC) -o $(EXEC) $(OBJECTS)
$(QUIETLY) $(HOST.LINK_NOPROF.CXX) -o $(EXEC) $(OBJECTS)
# Random dependencies:
$(OBJECTS): opcodes.hpp classes.hpp adlc.hpp adlcVMDeps.hpp adlparse.hpp archDesc.hpp arena.hpp dict2.hpp filebuff.hpp forms.hpp formsopt.hpp formssel.hpp
@ -211,14 +211,14 @@ PROCESS_AD_FILES = awk '{ \
$(OUTDIR)/%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(HOST.COMPILE.CC) -o $@ $< $(COMPILE_DONE)
$(QUIETLY) $(HOST.COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
# Some object files are given a prefix, to disambiguate
# them from objects of the same name built for the VM.
$(OUTDIR)/adlc-%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(HOST.COMPILE.CC) -o $@ $< $(COMPILE_DONE)
$(QUIETLY) $(HOST.COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
# #########################################################################

View File

@ -23,19 +23,19 @@
#
#------------------------------------------------------------------------
# CC, CPP & AS
# CC, CXX & AS
# When cross-compiling the ALT_COMPILER_PATH points
# to the cross-compilation toolset
ifdef CROSS_COMPILE_ARCH
CPP = $(ALT_COMPILER_PATH)/g++
CXX = $(ALT_COMPILER_PATH)/g++
CC = $(ALT_COMPILER_PATH)/gcc
HOSTCPP = g++
HOSTCXX = g++
HOSTCC = gcc
else
CPP = g++
CXX = g++
CC = gcc
HOSTCPP = $(CPP)
HOSTCXX = $(CXX)
HOSTCC = $(CC)
endif

View File

@ -54,10 +54,10 @@ else
LIBS_LAUNCHER += -l$(JVM) $(LIBS)
endif
LINK_LAUNCHER = $(LINK.c)
LINK_LAUNCHER = $(LINK.CC)
LINK_LAUNCHER/PRE_HOOK = $(LINK_LIB.CC/PRE_HOOK)
LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CC/POST_HOOK)
LINK_LAUNCHER/PRE_HOOK = $(LINK_LIB.CXX/PRE_HOOK)
LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CXX/POST_HOOK)
LAUNCHER_OUT = launcher
@ -73,11 +73,11 @@ DEPFILES := $(patsubst %.o,%.d,$(OBJS))
$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR_SHARE)/%.c
$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CPPFLAGS)
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR)/%.c
$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CPPFLAGS)
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
$(LAUNCHER): $(OBJS) $(LIBJVM) $(LAUNCHER_MAPFILE)
$(QUIETLY) echo Linking launcher...

View File

@ -52,4 +52,4 @@ STRIP_AOUT = $(STRIP) -x $@ || exit 1;
# If we can create .debuginfo files, then the VM is stripped in vm.make
# and this macro is not used.
# LINK_LIB.CC/POST_HOOK += $(STRIP_$(LINK_INTO))
# LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))

View File

@ -27,52 +27,39 @@
# Tell make that .cpp is important
.SUFFIXES: .cpp $(SUFFIXES)
# For now. Other makefiles use CPP as the c++ compiler, but that should really
# name the preprocessor.
ifeq ($(CCC),)
CCC = $(CPP)
endif
DEMANGLER = c++filt
DEMANGLE = $(DEMANGLER) < $@ > .$@ && mv -f .$@ $@
# $(CC) is the c compiler (cc/gcc), $(CCC) is the c++ compiler (CC/g++).
C_COMPILE = $(CC) $(CPPFLAGS) $(CFLAGS)
CC_COMPILE = $(CCC) $(CPPFLAGS) $(CFLAGS)
# $(CC) is the c compiler (cc/gcc), $(CXX) is the c++ compiler (CC/g++).
CC_COMPILE = $(CC) $(CXXFLAGS) $(CFLAGS)
CXX_COMPILE = $(CXX) $(CXXFLAGS) $(CFLAGS)
AS.S = $(AS) $(ASFLAGS)
COMPILE.c = $(C_COMPILE) -c
GENASM.c = $(C_COMPILE) -S
LINK.c = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
LINK_LIB.c = $(CC) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.c = $(C_COMPILE) -E
COMPILE.CC = $(CC_COMPILE) -c
GENASM.CC = $(CC_COMPILE) -S
LINK.CC = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
LINK_LIB.CC = $(CC) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.CC = $(CC_COMPILE) -E
COMPILE.CC = $(CC_COMPILE) -c
GENASM.CC = $(CC_COMPILE) -S
LINK.CC = $(CCC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
LINK_NOPROF.CC = $(CCC) $(LFLAGS) $(AOUT_FLAGS)
LINK_LIB.CC = $(CCC) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.CC = $(CC_COMPILE) -E
COMPILE.CXX = $(CXX_COMPILE) -c
GENASM.CXX = $(CXX_COMPILE) -S
LINK.CXX = $(CXX) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
LINK_NOPROF.CXX = $(CXX) $(LFLAGS) $(AOUT_FLAGS)
LINK_LIB.CXX = $(CXX) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.CXX = $(CXX_COMPILE) -E
# cross compiling the jvm with c2 requires host compilers to build
# adlc tool
HOST.CC_COMPILE = $(HOSTCPP) $(CPPFLAGS) $(CFLAGS)
HOST.COMPILE.CC = $(HOST.CC_COMPILE) -c
HOST.LINK_NOPROF.CC = $(HOSTCPP) $(LFLAGS) $(AOUT_FLAGS)
HOST.CXX_COMPILE = $(HOSTCXX) $(CXXFLAGS) $(CFLAGS)
HOST.COMPILE.CXX = $(HOST.CXX_COMPILE) -c
HOST.LINK_NOPROF.CXX = $(HOSTCXX) $(LFLAGS) $(AOUT_FLAGS)
# Effect of REMOVE_TARGET is to delete out-of-date files during "gnumake -k".
REMOVE_TARGET = rm -f $@
# Synonyms.
COMPILE.cpp = $(COMPILE.CC)
GENASM.cpp = $(GENASM.CC)
LINK.cpp = $(LINK.CC)
LINK_LIB.cpp = $(LINK_LIB.CC)
PREPROCESS.cpp = $(PREPROCESS.CC)
# Note use of ALT_BOOTDIR to explicitly specify location of java and
# javac; this is the same environment variable used in the J2SE build
# process for overriding the default spec, which is BOOTDIR.
@ -161,14 +148,14 @@ ifdef LP64
%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(COMPILE.CC) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
else
%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(if $(findstring $@, $(NONPIC_OBJ_FILES)), \
$(subst $(VM_PICFLAG), ,$(COMPILE.CC)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
$(COMPILE.CC) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
$(subst $(VM_PICFLAG), ,$(COMPILE.CXX)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
$(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
endif
%.o: %.s
@ -178,13 +165,13 @@ endif
%.s: %.cpp
@echo Generating assembly for $<
$(QUIETLY) $(GENASM.CC) -o $@ $<
$(QUIETLY) $(GENASM.CXX) -o $@ $<
$(QUIETLY) $(DEMANGLE) $(COMPILE_DONE)
# Intermediate files (for debugging macros)
%.i: %.cpp
@echo Preprocessing $< to $@
$(QUIETLY) $(PREPROCESS.CC) $< > $@ $(COMPILE_DONE)
$(QUIETLY) $(PREPROCESS.CXX) $< > $@ $(COMPILE_DONE)
# Override gnumake built-in rules which do sccs get operations badly.
# (They put the checked out code in the current directory, not in the

View File

@ -23,13 +23,13 @@
#
#------------------------------------------------------------------------
# CC, CPP & AS
# CC, CXX & AS
CPP = CC
CXX = CC
CC = cc
AS = $(CC) -c
HOSTCPP = $(CPP)
HOSTCXX = $(CXX)
HOSTCC = $(CC)
ARCHFLAG = $(ARCHFLAG/$(BUILDARCH))

View File

@ -88,16 +88,20 @@ BUILD_TARGET = -DHOTSPOT_BUILD_TARGET="\"$(TARGET)\""
BUILD_USER = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\""
VM_DISTRO = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\""
CPPFLAGS = \
CXXFLAGS = \
${SYSDEFS} \
${INCLUDES} \
${BUILD_VERSION} \
${BUILD_TARGET} \
${BUILD_USER} \
${HS_LIB_ARCH} \
${JRE_VERSION} \
${VM_DISTRO}
# This is VERY important! The version define must only be supplied to vm_version.o
# If not, ccache will not re-use the cache at all, since the version string might contain
# a time and date.
vm_version.o: CXXFLAGS += ${JRE_VERSION}
ifndef JAVASE_EMBEDDED
CFLAGS += -DINCLUDE_TRACE
endif
@ -272,13 +276,13 @@ ifeq ($(SHARK_BUILD), true)
LIBS_VM += $(LLVM_LIBS)
endif
LINK_VM = $(LINK_LIB.c)
LINK_VM = $(LINK_LIB.CC)
# rule for building precompiled header
$(PRECOMPILED_HEADER):
$(QUIETLY) echo Generating precompiled header $@
$(QUIETLY) mkdir -p $(PRECOMPILED_HEADER_DIR)
$(QUIETLY) $(COMPILE.CC) $(DEPFLAGS) -x c++-header $(PRECOMPILED_HEADER_SRC) -o $@ $(COMPILE_DONE)
$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -x c++-header $(PRECOMPILED_HEADER_SRC) -o $@ $(COMPILE_DONE)
# making the library:
@ -308,10 +312,10 @@ endif
$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
$(QUIETLY) { \
echo Linking vm...; \
$(LINK_LIB.CC/PRE_HOOK) \
$(LINK_LIB.CXX/PRE_HOOK) \
$(LINK_VM) $(LD_SCRIPT_FLAG) \
$(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \
$(LINK_LIB.CC/POST_HOOK) \
$(LINK_LIB.CXX/POST_HOOK) \
rm -f $@.1; ln -s $@ $@.1; \
[ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
if [ \"$(CROSS_COMPILE_ARCH)\" = \"\" ] ; then \

View File

@ -62,10 +62,10 @@ Src_Dirs_I += $(GAMMADIR)/src/share/vm/adlc $(GENERATED)
INCLUDES += $(Src_Dirs_I:%=-I%)
# set flags for adlc compilation
CPPFLAGS = $(SYSDEFS) $(INCLUDES)
CXXFLAGS = $(SYSDEFS) $(INCLUDES)
# Force assertions on.
CPPFLAGS += -DASSERT
CXXFLAGS += -DASSERT
ifndef USE_GCC
# We need libCstd.so for adlc
@ -130,7 +130,7 @@ all: $(EXEC)
$(EXEC) : $(OBJECTS)
@echo Making adlc
$(QUIETLY) $(LINK_NOPROF.CC) -o $(EXEC) $(OBJECTS)
$(QUIETLY) $(LINK_NOPROF.CXX) -o $(EXEC) $(OBJECTS)
# Random dependencies:
$(OBJECTS): opcodes.hpp classes.hpp adlc.hpp adlcVMDeps.hpp adlparse.hpp archDesc.hpp arena.hpp dict2.hpp filebuff.hpp forms.hpp formsopt.hpp formssel.hpp
@ -228,14 +228,14 @@ PROCESS_AD_FILES = awk '{ \
$(OUTDIR)/%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE)
$(QUIETLY) $(COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
# Some object files are given a prefix, to disambiguate
# them from objects of the same name built for the VM.
$(OUTDIR)/adlc-%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE)
$(QUIETLY) $(COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
# #########################################################################

View File

@ -150,11 +150,11 @@ endif
lib$(GENOFFS).so: $(DTRACE_SRCDIR)/$(GENOFFS).cpp $(DTRACE_SRCDIR)/$(GENOFFS).h \
$(LIBJVM.o)
$(QUIETLY) $(CCC) $(CPPFLAGS) $(GENOFFS_CFLAGS) $(SHARED_FLAG) $(PICFLAG) \
$(QUIETLY) $(CXX) $(CXXFLAGS) $(GENOFFS_CFLAGS) $(SHARED_FLAG) $(PICFLAG) \
$(LFLAGS_GENOFFS) -o $@ $(DTRACE_SRCDIR)/$(GENOFFS).cpp -lc
$(GENOFFS): $(DTRACE_SRCDIR)/$(GENOFFS)Main.c lib$(GENOFFS).so
$(QUIETLY) $(LINK.CC) -z nodefs -o $@ $(DTRACE_SRCDIR)/$(GENOFFS)Main.c \
$(QUIETLY) $(LINK.CXX) -z nodefs -o $@ $(DTRACE_SRCDIR)/$(GENOFFS)Main.c \
./lib$(GENOFFS).so
CONDITIONALLY_UPDATE_JVMOFFS_TARGET = \
@ -178,7 +178,7 @@ $(JVMOFFS).cpp: $(GENOFFS) $(JVMOFFS).h $(JVMOFFS)Index.h
$(QUIETLY) $(CONDITIONALLY_UPDATE_JVMOFFS_TARGET)
$(JVMOFFS.o): $(JVMOFFS).h $(JVMOFFS).cpp
$(QUIETLY) $(CCC) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp
$(QUIETLY) $(CXX) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp
$(LIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_DB_MAPFILE)
@echo Making $@

View File

@ -23,9 +23,9 @@
#
#------------------------------------------------------------------------
# CC, CPP & AS
# CC, CXX & AS
CPP = g++
CXX = g++
CC = gcc
AS = $(CC) -c
@ -36,12 +36,12 @@ Compiler = gcc
CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
# Check for the versions of C++ and C compilers ($CPP and $CC) used.
# Check for the versions of C++ and C compilers ($CXX and $CC) used.
# Get the last thing on the line that looks like x.x+ (x is a digit).
COMPILER_REV := \
$(shell $(CPP) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
C_COMPILER_REV := \
$(shell $(CXX) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1)
CC_COMPILER_REV := \
$(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)

View File

@ -52,10 +52,10 @@ else
LIBS_LAUNCHER += -l$(JVM) $(LIBS)
endif
LINK_LAUNCHER = $(LINK.CC)
LINK_LAUNCHER = $(LINK.CXX)
LINK_LAUNCHER/PRE_HOOK = $(LINK_LIB.CC/PRE_HOOK)
LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CC/POST_HOOK)
LINK_LAUNCHER/PRE_HOOK = $(LINK_LIB.CXX/PRE_HOOK)
LINK_LAUNCHER/POST_HOOK = $(LINK_LIB.CXX/POST_HOOK)
ifeq ("${Platform_compiler}", "sparcWorks")
# Enable the following LAUNCHERFLAGS addition if you need to compare the
@ -86,11 +86,11 @@ DEPFILES := $(patsubst %.o,%.d,$(OBJS))
$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR_SHARE)/%.c
$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CPPFLAGS)
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
$(LAUNCHER_OUT)/%.o: $(LAUNCHERDIR)/%.c
$(QUIETLY) [ -d $(LAUNCHER_OUT) ] || { mkdir -p $(LAUNCHER_OUT); }
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CPPFLAGS)
$(QUIETLY) $(CC) -g -o $@ -c $< -MMD $(LAUNCHERFLAGS) $(CXXFLAGS)
$(LAUNCHER): $(OBJS) $(LIBJVM) $(LAUNCHER_MAPFILE)
ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),)

View File

@ -70,7 +70,7 @@ endif
# If we can create .debuginfo files, then the VM is stripped in vm.make
# and this macro is not used.
# LINK_LIB.CC/POST_HOOK += $(STRIP_LIB.CC/POST_HOOK)
# LINK_LIB.CXX/POST_HOOK += $(STRIP_LIB.CXX/POST_HOOK)
G_SUFFIX =
SYSDEFS += -DPRODUCT

View File

@ -27,44 +27,31 @@
# Tell make that .cpp is important
.SUFFIXES: .cpp $(SUFFIXES)
# For now. Other makefiles use CPP as the c++ compiler, but that should really
# name the preprocessor.
ifeq ($(CCC),)
CCC = $(CPP)
endif
DEMANGLER = c++filt
DEMANGLE = $(DEMANGLER) < $@ > .$@ && mv -f .$@ $@
# $(CC) is the c compiler (cc/gcc), $(CCC) is the c++ compiler (CC/g++).
C_COMPILE = $(CC) $(CPPFLAGS) $(CFLAGS)
CC_COMPILE = $(CCC) $(CPPFLAGS) $(CFLAGS)
# $(CC) is the c compiler (cc/gcc), $(CXX) is the c++ compiler (CC/g++).
CC_COMPILE = $(CC) $(CXXFLAGS) $(CFLAGS)
CXX_COMPILE = $(CXX) $(CXXFLAGS) $(CFLAGS)
AS.S = $(AS) $(ASFLAGS)
COMPILE.c = $(C_COMPILE) -c
GENASM.c = $(C_COMPILE) -S
LINK.c = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
LINK_LIB.c = $(CC) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.c = $(C_COMPILE) -E
COMPILE.CC = $(CC_COMPILE) -c
GENASM.CC = $(CC_COMPILE) -S
LINK.CC = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
LINK_LIB.CC = $(CC) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.CC = $(CC_COMPILE) -E
COMPILE.CC = $(CC_COMPILE) -c
GENASM.CC = $(CC_COMPILE) -S
LINK.CC = $(CCC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
LINK_NOPROF.CC = $(CCC) $(LFLAGS) $(AOUT_FLAGS)
LINK_LIB.CC = $(CCC) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.CC = $(CC_COMPILE) -E
COMPILE.CXX = $(CXX_COMPILE) -c
GENASM.CXX = $(CXX_COMPILE) -S
LINK.CXX = $(CXX) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
LINK_NOPROF.CXX = $(CXX) $(LFLAGS) $(AOUT_FLAGS)
LINK_LIB.CXX = $(CXX) $(LFLAGS) $(SHARED_FLAG)
PREPROCESS.CXX = $(CXX_COMPILE) -E
# Effect of REMOVE_TARGET is to delete out-of-date files during "gnumake -k".
REMOVE_TARGET = rm -f $@
# Synonyms.
COMPILE.cpp = $(COMPILE.CC)
GENASM.cpp = $(GENASM.CC)
LINK.cpp = $(LINK.CC)
LINK_LIB.cpp = $(LINK_LIB.CC)
PREPROCESS.cpp = $(PREPROCESS.CC)
# Note use of ALT_BOOTDIR to explicitly specify location of java and
# javac; this is the same environment variable used in the J2SE build
# process for overriding the default spec, which is BOOTDIR.
@ -153,14 +140,14 @@ ifdef LP64
%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(COMPILE.CC) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
else
%.o: %.cpp
@echo Compiling $<
$(QUIETLY) $(REMOVE_TARGET)
$(QUIETLY) $(if $(findstring $@, $(NONPIC_OBJ_FILES)), \
$(subst $(VM_PICFLAG), ,$(COMPILE.CC)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
$(COMPILE.CC) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
$(subst $(VM_PICFLAG), ,$(COMPILE.CXX)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
$(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
endif
%.o: %.s
@ -170,13 +157,13 @@ endif
%.s: %.cpp
@echo Generating assembly for $<
$(QUIETLY) $(GENASM.CC) -o $@ $<
$(QUIETLY) $(GENASM.CXX) -o $@ $<
$(QUIETLY) $(DEMANGLE) $(COMPILE_DONE)
# Intermediate files (for debugging macros)
%.i: %.cpp
@echo Preprocessing $< to $@
$(QUIETLY) $(PREPROCESS.CC) $< > $@ $(COMPILE_DONE)
$(QUIETLY) $(PREPROCESS.CXX) $< > $@ $(COMPILE_DONE)
# Override gnumake built-in rules which do sccs get operations badly.
# (They put the checked out code in the current directory, not in the

View File

@ -93,7 +93,7 @@ $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
exit 1; \
fi
@echo Making SA debugger back-end...
$(QUIETLY) $(CPP) \
$(QUIETLY) $(CXX) \
$(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
-I$(SASRCDIR) \
-I$(GENERATED) \

View File

@ -26,7 +26,7 @@
# tell make which C and C++ compilers to use
CC = cc
CPP = CC
CXX = CC
# Note that this 'as' is an older version of the Sun Studio 'fbe', and will
# use the older style options. The 'fbe' options will match 'cc' and 'CC'.
@ -37,23 +37,23 @@ NAWK = /bin/nawk
REORDER_FLAG = -xF
# Check for the versions of C++ and C compilers ($CPP and $CC) used.
# Check for the versions of C++ and C compilers ($CXX and $CC) used.
# Get the last thing on the line that looks like x.x+ (x is a digit).
COMPILER_REV := \
$(shell $(CPP) -V 2>&1 | sed -n 's/^.*[ ,\t]C++[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p')
C_COMPILER_REV := \
$(shell $(CXX) -V 2>&1 | sed -n 's/^.*[ ,\t]C++[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p')
CC_COMPILER_REV := \
$(shell $(CC) -V 2>&1 | sed -n 's/^.*[ ,\t]C[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p')
# Pick which compiler is validated
ifeq ($(JRE_RELEASE_VER),1.6.0)
# Validated compiler for JDK6 is SS11 (5.8)
VALIDATED_COMPILER_REVS := 5.8
VALIDATED_C_COMPILER_REVS := 5.8
VALIDATED_CC_COMPILER_REVS := 5.8
else
# Validated compiler for JDK7 is SS12 update 1 + patches (5.10)
VALIDATED_COMPILER_REVS := 5.10
VALIDATED_C_COMPILER_REVS := 5.10
VALIDATED_CC_COMPILER_REVS := 5.10
endif
# Warning messages about not using the above validated versions
@ -67,13 +67,13 @@ dummy_var_to_enforce_compiler_rev := $(shell \
warning.)
endif
ENFORCE_C_COMPILER_REV${ENFORCE_C_COMPILER_REV} := $(strip ${VALIDATED_C_COMPILER_REVS})
ifeq ($(filter ${ENFORCE_C_COMPILER_REV},${C_COMPILER_REV}),)
PRINTABLE_C_REVS := $(subst $(shell echo ' '), or ,${ENFORCE_C_COMPILER_REV})
ENFORCE_CC_COMPILER_REV${ENFORCE_CC_COMPILER_REV} := $(strip ${VALIDATED_CC_COMPILER_REVS})
ifeq ($(filter ${ENFORCE_CC_COMPILER_REV},${CC_COMPILER_REV}),)
PRINTABLE_C_REVS := $(subst $(shell echo ' '), or ,${ENFORCE_CC_COMPILER_REV})
dummy_var_to_enforce_c_compiler_rev := $(shell \
echo >&2 WARNING: You are using cc version ${C_COMPILER_REV} and \
echo >&2 WARNING: You are using cc version ${CC_COMPILER_REV} and \
should be using version ${PRINTABLE_C_REVS}.; \
echo >&2 Set ENFORCE_C_COMPILER_REV=${C_COMPILER_REV} to avoid this \
echo >&2 Set ENFORCE_CC_COMPILER_REV=${CC_COMPILER_REV} to avoid this \
warning.)
endif
@ -98,7 +98,7 @@ JVM_CHECK_SYMBOLS = $(NM) -u -p $(LIBJVM.o) | \
} \
END { exit rc; }'
LINK_LIB.CC/PRE_HOOK += $(JVM_CHECK_SYMBOLS) || exit 1;
LINK_LIB.CXX/PRE_HOOK += $(JVM_CHECK_SYMBOLS) || exit 1;
# New architecture options started in SS12 (5.9), we need both styles to build.
# The older arch options for SS11 (5.8) or older and also for /usr/ccs/bin/as.
@ -518,7 +518,7 @@ endif
#FASTDEBUG_CFLAGS += -Qoption ccfe -xglobalstatic
ifeq (${COMPILER_REV_NUMERIC}, 502)
COMPILER_DATE := $(shell $(CPP) -V 2>&1 | sed -n '/^.*[ ]C++[ ]\([1-9]\.[0-9][0-9]*\)/p' | awk '{ print $$NF; }')
COMPILER_DATE := $(shell $(CXX) -V 2>&1 | sed -n '/^.*[ ]C++[ ]\([1-9]\.[0-9][0-9]*\)/p' | awk '{ print $$NF; }')
ifeq (${COMPILER_DATE}, 2001/01/31)
# disable -g0 in fastdebug since SC6.1 dated 2001/01/31 seems to be buggy
# use an innocuous value because it will get -g if it's empty
@ -568,7 +568,7 @@ STRIP = /usr/ccs/bin/strip
# removing repeated lines. The data can be extracted from
# binaries in the field by using "mcs -p libjvm.so" or the older
# command "what libjvm.so".
LINK_LIB.CC/POST_HOOK += $(MCS) -c $@ || exit 1;
LINK_LIB.CXX/POST_HOOK += $(MCS) -c $@ || exit 1;
# (The exit 1 is necessary to cause a build failure if the command fails and
# multiple commands are strung together, and the final semicolon is necessary
# since the hook must terminate itself as a valid command.)
@ -576,7 +576,7 @@ LINK_LIB.CC/POST_HOOK += $(MCS) -c $@ || exit 1;
# Also, strip debug and line number information (worth about 1.7Mb).
# If we can create .debuginfo files, then the VM is stripped in vm.make
# and this macro is not used.
STRIP_LIB.CC/POST_HOOK = $(STRIP) -x $@ || exit 1;
# STRIP_LIB.CC/POST_HOOK is incorporated into LINK_LIB.CC/POST_HOOK
STRIP_LIB.CXX/POST_HOOK = $(STRIP) -x $@ || exit 1;
# STRIP_LIB.CXX/POST_HOOK is incorporated into LINK_LIB.CXX/POST_HOOK
# in certain configurations, such as product.make. Other configurations,
# such as debug.make, do not include the strip operation.

View File

@ -76,16 +76,20 @@ BUILD_TARGET = -DHOTSPOT_BUILD_TARGET="\"$(TARGET)\""
BUILD_USER = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\""
VM_DISTRO = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\""
CPPFLAGS = \
CXXFLAGS = \
${SYSDEFS} \
${INCLUDES} \
${BUILD_VERSION} \
${BUILD_TARGET} \
${BUILD_USER} \
${HS_LIB_ARCH} \
${JRE_VERSION} \
${VM_DISTRO}
# This is VERY important! The version define must only be supplied to vm_version.o
# If not, ccache will not re-use the cache at all, since the version string might contain
# a time and date.
vm_version.o: CXXFLAGS += ${JRE_VERSION}
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
CFLAGS += $(CFLAGS_WARN)
@ -265,17 +269,17 @@ endif
endif
ifdef USE_GCC
LINK_VM = $(LINK_LIB.c)
else
LINK_VM = $(LINK_LIB.CC)
else
LINK_VM = $(LINK_LIB.CXX)
endif
# making the library:
$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE)
ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),)
@echo Linking vm...
$(QUIETLY) $(LINK_LIB.CC/PRE_HOOK)
$(QUIETLY) $(LINK_LIB.CXX/PRE_HOOK)
$(QUIETLY) $(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM)
$(QUIETLY) $(LINK_LIB.CC/POST_HOOK)
$(QUIETLY) $(LINK_LIB.CXX/POST_HOOK)
$(QUIETLY) rm -f $@.1 && ln -s $@ $@.1
$(QUIETLY) [ -f $(LIBJVM_G) ] || ln -s $@ $(LIBJVM_G)
$(QUIETLY) [ -f $(LIBJVM_G).1 ] || ln -s $@.1 $(LIBJVM_G).1

View File

@ -57,10 +57,10 @@ DUMPBIN="link.exe /dump"
# When called from IDE the first param should contain the link version, otherwise may be nill
if [ "x$1" != "x" ]; then
LINK_VER="$1"
LD_VER="$1"
fi
if [ "x$LINK_VER" != "x800" -a "x$LINK_VER" != "x900" -a "x$LINK_VER" != "x1000" ]; then
if [ "x$LD_VER" != "x800" -a "x$LD_VER" != "x900" -a "x$LD_VER" != "x1000" ]; then
$DUMPBIN /symbols *.obj | "$GREP" "??_7.*@@6B@" | "$GREP" -v "type_info" | "$AWK" '{print $7}' | "$SORT" | "$UNIQ" > vm2.def
else
# Can't use pipes when calling cl.exe or link.exe from IDE. Using transit file vm3.def

View File

@ -72,8 +72,8 @@ else
echo "MSC_VER_RAW=$MSC_VER_RAW"
fi
if [ "x$FORCE_LINK_VER" != "x" ]; then
echo "LINK_VER=$FORCE_LINK_VER"
if [ "x$FORCE_LD_VER" != "x" ]; then
echo "LD_VER=$FORCE_LD_VER"
else
# use the "link" command that is co-located with the "cl" command
cl_cmd=`which cl`
@ -83,11 +83,11 @@ else
# which can't find "cl" so just use which ever "link" we find
link_cmd="link"
fi
LINK_VER_RAW=`"$link_cmd" 2>&1 | "$HEAD" -n 1 | "$SED" 's/.*Version[\ ]*\([0-9][0-9.]*\).*/\1/'`
LINK_VER_MAJOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f1`
LINK_VER_MINOR=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f2`
LINK_VER_MICRO=`"$ECHO" $LINK_VER_RAW | "$CUT" -d'.' -f3`
LINK_VER=`"$EXPR" $LINK_VER_MAJOR \* 100 + $LINK_VER_MINOR`
echo "LINK_VER=$LINK_VER"
echo "LINK_VER_RAW=$LINK_VER_RAW"
LD_VER_RAW=`"$link_cmd" 2>&1 | "$HEAD" -n 1 | "$SED" 's/.*Version[\ ]*\([0-9][0-9.]*\).*/\1/'`
LD_VER_MAJOR=`"$ECHO" $LD_VER_RAW | "$CUT" -d'.' -f1`
LD_VER_MINOR=`"$ECHO" $LD_VER_RAW | "$CUT" -d'.' -f2`
LD_VER_MICRO=`"$ECHO" $LD_VER_RAW | "$CUT" -d'.' -f3`
LD_VER=`"$EXPR" $LD_VER_MAJOR \* 100 + $LD_VER_MINOR`
echo "LD_VER=$LD_VER"
echo "LD_VER_RAW=$LD_VER_RAW"
fi

View File

@ -45,9 +45,9 @@ ADLCFLAGS=-q -T -D_LP64
ADLCFLAGS=-q -T -U_LP64
!endif
ADLC_CPP_FLAGS=$(CPP_FLAGS) /D _CRT_SECURE_NO_WARNINGS /D _CRT_SECURE_NO_DEPRECATE
ADLC_CXX_FLAGS=$(CXX_FLAGS) /D _CRT_SECURE_NO_WARNINGS /D _CRT_SECURE_NO_DEPRECATE
CPP_INCLUDE_DIRS=\
CXX_INCLUDE_DIRS=\
/I "..\generated" \
/I "$(WorkSpace)\src\share\vm" \
/I "$(WorkSpace)\src\os\windows\vm" \
@ -94,14 +94,14 @@ GENERATED_NAMES_IN_DIR=\
$(AdlcOutDir)\dfa_$(Platform_arch_model).cpp
{$(WorkSpace)\src\share\vm\adlc}.cpp.obj::
$(CPP) $(ADLC_CPP_FLAGS) $(EXH_FLAGS) $(CPP_INCLUDE_DIRS) /c $<
$(CXX) $(ADLC_CXX_FLAGS) $(EXH_FLAGS) $(CXX_INCLUDE_DIRS) /c $<
{$(WorkSpace)\src\share\vm\opto}.cpp.obj::
$(CPP) $(ADLC_CPP_FLAGS) $(EXH_FLAGS) $(CPP_INCLUDE_DIRS) /c $<
$(CXX) $(ADLC_CXX_FLAGS) $(EXH_FLAGS) $(CXX_INCLUDE_DIRS) /c $<
adlc.exe: main.obj adlparse.obj archDesc.obj arena.obj dfa.obj dict2.obj filebuff.obj \
forms.obj formsopt.obj formssel.obj opcodes.obj output_c.obj output_h.obj
$(LINK) $(LINK_FLAGS) /subsystem:console /out:$@ $**
$(LD) $(LD_FLAGS) /subsystem:console /out:$@ $**
!if "$(MT)" != ""
# The previous link command created a .manifest file that we want to
# insert into the linked artifact so we do not need to track it

View File

@ -23,9 +23,9 @@
#
# Generic compiler settings
CPP=cl.exe
CXX=cl.exe
# CPP Flags: (these vary slightly from VC6->VS2003->VS2005 compilers)
# CXX Flags: (these vary slightly from VC6->VS2003->VS2005 compilers)
# /nologo Supress copyright message at every cl.exe startup
# /W3 Warning level 3
# /Zi Include debugging information
@ -50,47 +50,47 @@ CPP=cl.exe
# improving the quality of crash log stack traces involving jvm.dll.
# These are always used in all compiles
CPP_FLAGS=/nologo /W3 /WX
CXX_FLAGS=/nologo /W3 /WX
# Let's add debug information always too.
CPP_FLAGS=$(CPP_FLAGS) /Zi
CXX_FLAGS=$(CXX_FLAGS) /Zi
# Based on BUILDARCH we add some flags and select the default compiler name
!if "$(BUILDARCH)" == "ia64"
MACHINE=IA64
DEFAULT_COMPILER_NAME=VS2003
CPP_FLAGS=$(CPP_FLAGS) /D "CC_INTERP" /D "_LP64" /D "IA64"
CXX_FLAGS=$(CXX_FLAGS) /D "CC_INTERP" /D "_LP64" /D "IA64"
!endif
!if "$(BUILDARCH)" == "amd64"
MACHINE=AMD64
DEFAULT_COMPILER_NAME=VS2005
CPP_FLAGS=$(CPP_FLAGS) /D "_LP64" /D "AMD64"
CXX_FLAGS=$(CXX_FLAGS) /D "_LP64" /D "AMD64"
LP64=1
!endif
!if "$(BUILDARCH)" == "i486"
MACHINE=I386
DEFAULT_COMPILER_NAME=VS2003
CPP_FLAGS=$(CPP_FLAGS) /D "IA32"
CXX_FLAGS=$(CXX_FLAGS) /D "IA32"
!endif
# Sanity check, this is the default if not amd64, ia64, or i486
!ifndef DEFAULT_COMPILER_NAME
CPP=ARCH_ERROR
CXX=ARCH_ERROR
!endif
CPP_FLAGS=$(CPP_FLAGS) /D "WIN32" /D "_WINDOWS"
CXX_FLAGS=$(CXX_FLAGS) /D "WIN32" /D "_WINDOWS"
# Must specify this for sharedRuntimeTrig.cpp
CPP_FLAGS=$(CPP_FLAGS) /D "VM_LITTLE_ENDIAN"
CXX_FLAGS=$(CXX_FLAGS) /D "VM_LITTLE_ENDIAN"
# Used for platform dispatching
CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_FAMILY_windows
CPP_FLAGS=$(CPP_FLAGS) /D TARGET_ARCH_$(Platform_arch)
CPP_FLAGS=$(CPP_FLAGS) /D TARGET_ARCH_MODEL_$(Platform_arch_model)
CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_ARCH_windows_$(Platform_arch)
CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_ARCH_MODEL_windows_$(Platform_arch_model)
CPP_FLAGS=$(CPP_FLAGS) /D TARGET_COMPILER_visCPP
CXX_FLAGS=$(CXX_FLAGS) /D TARGET_OS_FAMILY_windows
CXX_FLAGS=$(CXX_FLAGS) /D TARGET_ARCH_$(Platform_arch)
CXX_FLAGS=$(CXX_FLAGS) /D TARGET_ARCH_MODEL_$(Platform_arch_model)
CXX_FLAGS=$(CXX_FLAGS) /D TARGET_OS_ARCH_windows_$(Platform_arch)
CXX_FLAGS=$(CXX_FLAGS) /D TARGET_OS_ARCH_MODEL_windows_$(Platform_arch_model)
CXX_FLAGS=$(CXX_FLAGS) /D TARGET_COMPILER_visCPP
# MSC_VER is a 4 digit number that tells us what compiler is being used
@ -150,14 +150,14 @@ MS_RUNTIME_OPTION = /MTd /D "_DEBUG"
# Always add the _STATIC_CPPLIB flag
STATIC_CPPLIB_OPTION = /D _STATIC_CPPLIB /D _DISABLE_DEPRECATE_STATIC_CPPLIB
MS_RUNTIME_OPTION = $(MS_RUNTIME_OPTION) $(STATIC_CPPLIB_OPTION)
CPP_FLAGS=$(CPP_FLAGS) $(MS_RUNTIME_OPTION)
CXX_FLAGS=$(CXX_FLAGS) $(MS_RUNTIME_OPTION)
# How /GX option is spelled
GX_OPTION = /GX
# Optimization settings for various versions of the compilers and types of
# builds. Three basic sets of settings: product, fastdebug, and debug.
# These get added into CPP_FLAGS as needed by other makefiles.
# These get added into CXX_FLAGS as needed by other makefiles.
!if "$(COMPILER_NAME)" == "VC6"
PRODUCT_OPT_OPTION = /Ox /Os /Gy /GF
FASTDEBUG_OPT_OPTION = /Ox /Os /Gy /GF
@ -180,7 +180,7 @@ GX_OPTION = /EHsc
# externals at link time. Even with /GS-, you need bufferoverflowU.lib.
# NOTE: Currently we decided to not use /GS-
BUFFEROVERFLOWLIB = bufferoverflowU.lib
LINK_FLAGS = /manifest $(LINK_FLAGS) $(BUFFEROVERFLOWLIB)
LD_FLAGS = /manifest $(LD_FLAGS) $(BUFFEROVERFLOWLIB)
# Manifest Tool - used in VS2005 and later to adjust manifests stored
# as resources inside build artifacts.
MT=mt.exe
@ -191,7 +191,7 @@ PRODUCT_OPT_OPTION = /O2 /Oy-
FASTDEBUG_OPT_OPTION = /O2 /Oy-
DEBUG_OPT_OPTION = /Od
GX_OPTION = /EHsc
LINK_FLAGS = /manifest $(LINK_FLAGS)
LD_FLAGS = /manifest $(LD_FLAGS)
# Manifest Tool - used in VS2005 and later to adjust manifests stored
# as resources inside build artifacts.
MT=mt.exe
@ -202,12 +202,12 @@ PRODUCT_OPT_OPTION = /O2 /Oy-
FASTDEBUG_OPT_OPTION = /O2 /Oy-
DEBUG_OPT_OPTION = /Od
GX_OPTION = /EHsc
LINK_FLAGS = /manifest $(LINK_FLAGS)
LD_FLAGS = /manifest $(LD_FLAGS)
# Manifest Tool - used in VS2005 and later to adjust manifests stored
# as resources inside build artifacts.
MT=mt.exe
!if "$(BUILDARCH)" == "i486"
LINK_FLAGS = /SAFESEH $(LINK_FLAGS)
LD_FLAGS = /SAFESEH $(LD_FLAGS)
!endif
!endif
@ -225,15 +225,15 @@ FASTDEBUG_OPT_OPTION = $(DEBUG_OPT_OPTION)
!endif
# Generic linker settings
LINK=link.exe
LINK_FLAGS= $(LINK_FLAGS) kernel32.lib user32.lib gdi32.lib winspool.lib \
LD=link.exe
LD_FLAGS= $(LD_FLAGS) kernel32.lib user32.lib gdi32.lib winspool.lib \
comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib \
uuid.lib Wsock32.lib winmm.lib /nologo /machine:$(MACHINE) /opt:REF \
/opt:ICF,8 /map /debug
!if $(MSC_VER) >= 1600
LINK_FLAGS= $(LINK_FLAGS) psapi.lib
LD_FLAGS= $(LD_FLAGS) psapi.lib
!endif
# Resource compiler settings
@ -250,7 +250,7 @@ RC_FLAGS=/D "HS_VER=$(HS_VER)" \
/D "HS_INTERNAL_NAME=$(HS_INTERNAL_NAME)" \
/D "HS_NAME=$(HS_NAME)"
# Need this to match the CPP_FLAGS settings
# Need this to match the CXX_FLAGS settings
!if "$(MFC_DEBUG)" == "true"
RC_FLAGS = $(RC_FLAGS) /D "_DEBUG"
!endif

View File

@ -38,7 +38,7 @@ default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA
!include ../local.make
!include compile.make
CPP_FLAGS=$(CPP_FLAGS) $(DEBUG_OPT_OPTION)
CXX_FLAGS=$(CXX_FLAGS) $(DEBUG_OPT_OPTION)
!include $(WorkSpace)/make/windows/makefiles/vm.make
!include local.make
@ -52,8 +52,8 @@ vm.def: $(Obj_Files)
sh $(WorkSpace)/make/windows/build_vm_def.sh
$(AOUT): $(Res_Files) $(Obj_Files) vm.def
$(LINK) @<<
$(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
$(LD) @<<
$(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
<<
!if "$(MT)" != ""
# The previous link command created a .manifest file that we want to

View File

@ -38,7 +38,7 @@ default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA
!include ../local.make
!include compile.make
CPP_FLAGS=$(CPP_FLAGS) $(FASTDEBUG_OPT_OPTION)
CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION)
!include $(WorkSpace)/make/windows/makefiles/vm.make
!include local.make
@ -52,8 +52,8 @@ vm.def: $(Obj_Files)
sh $(WorkSpace)/make/windows/build_vm_def.sh
$(AOUT): $(Res_Files) $(Obj_Files) vm.def
$(LINK) @<<
$(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
$(LD) @<<
$(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
<<
!if "$(MT)" != ""
# The previous link command created a .manifest file that we want to

View File

@ -23,7 +23,7 @@
#
LAUNCHER_FLAGS=$(CPP_FLAGS) $(ARCHFLAG) \
LAUNCHER_FLAGS=$(CXX_FLAGS) $(ARCHFLAG) \
/D FULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \
/D JDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \
/D JDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \
@ -39,18 +39,18 @@ LAUNCHER_FLAGS=$(CPP_FLAGS) $(ARCHFLAG) \
/I $(WorkSpace)\src\cpu\$(Platform_arch)\vm \
/I $(WorkSpace)\src\os\windows\vm
LINK_FLAGS=/manifest $(HS_INTERNAL_NAME).lib kernel32.lib user32.lib /nologo /machine:$(MACHINE) /map /debug /subsystem:console
LD_FLAGS=/manifest $(HS_INTERNAL_NAME).lib kernel32.lib user32.lib /nologo /machine:$(MACHINE) /map /debug /subsystem:console
!if "$(COMPILER_NAME)" == "VS2005"
# This VS2005 compiler has /GS as a default and requires bufferoverflowU.lib
# on the link command line, otherwise we get missing __security_check_cookie
# externals at link time. Even with /GS-, you need bufferoverflowU.lib.
BUFFEROVERFLOWLIB = bufferoverflowU.lib
LINK_FLAGS = $(LINK_FLAGS) $(BUFFEROVERFLOWLIB)
LD_FLAGS = $(LD_FLAGS) $(BUFFEROVERFLOWLIB)
!endif
!if "$(COMPILER_NAME)" == "VS2010" && "$(BUILDARCH)" == "i486"
LINK_FLAGS = /SAFESEH $(LINK_FLAGS)
LD_FLAGS = /SAFESEH $(LD_FLAGS)
!endif
LAUNCHERDIR = $(WorkSpace)/src/os/windows/launcher
@ -60,14 +60,14 @@ OUTDIR = launcher
{$(LAUNCHERDIR)}.c{$(OUTDIR)}.obj:
-mkdir $(OUTDIR) 2>NUL >NUL
$(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $<
$(CXX) $(LAUNCHER_FLAGS) /c /Fo$@ $<
{$(LAUNCHERDIR_SHARE)}.c{$(OUTDIR)}.obj:
-mkdir $(OUTDIR) 2>NUL >NUL
$(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $<
$(CXX) $(LAUNCHER_FLAGS) /c /Fo$@ $<
$(OUTDIR)\*.obj: $(LAUNCHERDIR)\*.c $(LAUNCHERDIR)\*.h $(LAUNCHERDIR_SHARE)\*.c $(LAUNCHERDIR_SHARE)\*.h
launcher: $(OUTDIR)\java.obj $(OUTDIR)\java_md.obj $(OUTDIR)\jli_util.obj
echo $(JAVA_HOME) > jdkpath.txt
$(LINK) $(LINK_FLAGS) /out:hotspot.exe $**
$(LD) $(LD_FLAGS) /out:hotspot.exe $**

View File

@ -37,7 +37,7 @@ default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA
!include ../local.make
!include compile.make
CPP_FLAGS=$(CPP_FLAGS) $(PRODUCT_OPT_OPTION)
CXX_FLAGS=$(CXX_FLAGS) $(PRODUCT_OPT_OPTION)
RELEASE=
@ -54,16 +54,16 @@ $(Res_Files): FORCE
# Kernel doesn't need exported vtbl symbols.
!if "$(Variant)" == "kernel"
$(AOUT): $(Res_Files) $(Obj_Files)
$(LINK) @<<
$(LINK_FLAGS) /out:$@ /implib:$*.lib $(Obj_Files) $(Res_Files)
$(LD) @<<
$(LD_FLAGS) /out:$@ /implib:$*.lib $(Obj_Files) $(Res_Files)
<<
!else
vm.def: $(Obj_Files)
sh $(WorkSpace)/make/windows/build_vm_def.sh
$(AOUT): $(Res_Files) $(Obj_Files) vm.def
$(LINK) @<<
$(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
$(LD) @<<
$(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
<<
!endif
!if "$(MT)" != ""

View File

@ -89,7 +89,7 @@ ProjectCreatorIDEOptions=\
-jdkTargetRoot $(HOTSPOTJDKDIST) \
-define ALIGN_STACK_FRAMES \
-define VM_LITTLE_ENDIAN \
-prelink "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) set JAVA_HOME=$(HOTSPOTJDKDIST) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LINK_VER)" \
-prelink "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) set JAVA_HOME=$(HOTSPOTJDKDIST) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LD_VER)" \
-postbuild "" "Building hotspot.exe..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) nmake -f $(HOTSPOTWORKSPACE)\make\windows\projectfiles\common\Makefile LOCAL_MAKE=$(HOTSPOTBUILDSPACE)\%f\local.make JAVA_HOME=$(HOTSPOTJDKDIST) launcher" \
-ignoreFile jsig.c \
-ignoreFile jvmtiEnvRecommended.cpp \

View File

@ -91,16 +91,16 @@ SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 $(GX_OPTION) /Od /D "WIN32" /D "WIN
!if "$(COMPILER_NAME)" == "VS2005"
# On amd64, VS2005 compiler requires bufferoverflowU.lib on the link command line,
# otherwise we get missing __security_check_cookie externals at link time.
SA_LINK_FLAGS = bufferoverflowU.lib
SA_LD_FLAGS = bufferoverflowU.lib
!endif
!else
SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 /Gm $(GX_OPTION) /ZI /Od /D "WIN32" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
!endif
!if "$(MT)" != ""
SA_LINK_FLAGS = /manifest $(SA_LINK_FLAGS)
SA_LD_FLAGS = /manifest $(SA_LD_FLAGS)
!endif
SASRCFILE = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp
SA_LFLAGS = $(SA_LINK_FLAGS) /nologo /subsystem:console /map /debug /machine:$(MACHINE)
SA_LFLAGS = $(SA_LD_FLAGS) /nologo /subsystem:console /map /debug /machine:$(MACHINE)
# Note that we do not keep sawindbj.obj around as it would then
# get included in the dumpbin command in build_vm_def.sh
@ -110,14 +110,14 @@ SA_LFLAGS = $(SA_LINK_FLAGS) /nologo /subsystem:console /map /debug /machine:$(M
# Use ";#2" for .dll and ";#1" for .exe in the MT command below:
$(SAWINDBG): $(SASRCFILE)
set INCLUDE=$(SA_INCLUDE)$(INCLUDE)
$(CPP) @<<
$(CXX) @<<
/I"$(BootStrapDir)/include" /I"$(BootStrapDir)/include/win32"
/I"$(GENERATED)" $(SA_CFLAGS)
$(SASRCFILE)
/out:sawindbg.obj
<<
set LIB=$(SA_LIB)$(LIB)
$(LINK) /out:$@ /DLL sawindbg.obj dbgeng.lib $(SA_LFLAGS)
$(LD) /out:$@ /DLL sawindbg.obj dbgeng.lib $(SA_LFLAGS)
!if "$(MT)" != ""
$(MT) /manifest $(@F).manifest /outputresource:$(@F);#2
!endif

View File

@ -31,5 +31,5 @@ checkCL:
echo *** WARNING *** unrecognized cl.exe version $(MSC_VER) ($(RAW_MSC_VER)). Use FORCE_MSC_VER to override automatic detection.
checkLink:
@ if "$(LINK_VER)" NEQ "710" if "$(LINK_VER)" NEQ "800" if "$(LINK_VER)" NEQ "900" if "$(LINK_VER)" NEQ "1000" \
echo *** WARNING *** unrecognized link.exe version $(LINK_VER) ($(RAW_LINK_VER)). Use FORCE_LINK_VER to override automatic detection.
@ if "$(LD_VER)" NEQ "710" if "$(LD_VER)" NEQ "800" if "$(LD_VER)" NEQ "900" if "$(LD_VER)" NEQ "1000" \
echo *** WARNING *** unrecognized link.exe version $(LD_VER) ($(RAW_LD_VER)). Use FORCE_LD_VER to override automatic detection.

View File

@ -30,8 +30,8 @@ default:: $(SUBDIRS)
DIR=.
!endif
!ifndef CPP
CPP=cl.exe
!ifndef CXX
CXX=cl.exe
!endif

View File

@ -32,12 +32,12 @@ ALTSRC=$(WorkSpace)\src\closed
!ifdef RELEASE
!ifdef DEVELOP
CPP_FLAGS=$(CPP_FLAGS) /D "DEBUG"
CXX_FLAGS=$(CXX_FLAGS) /D "DEBUG"
!else
CPP_FLAGS=$(CPP_FLAGS) /D "PRODUCT"
CXX_FLAGS=$(CXX_FLAGS) /D "PRODUCT"
!endif
!else
CPP_FLAGS=$(CPP_FLAGS) /D "ASSERT"
CXX_FLAGS=$(CXX_FLAGS) /D "ASSERT"
!endif
!if "$(Variant)" == "core"
@ -45,19 +45,19 @@ CPP_FLAGS=$(CPP_FLAGS) /D "ASSERT"
!endif
!if "$(Variant)" == "kernel"
CPP_FLAGS=$(CPP_FLAGS) /D "KERNEL"
CXX_FLAGS=$(CXX_FLAGS) /D "KERNEL"
!endif
!if "$(Variant)" == "compiler1"
CPP_FLAGS=$(CPP_FLAGS) /D "COMPILER1"
CXX_FLAGS=$(CXX_FLAGS) /D "COMPILER1"
!endif
!if "$(Variant)" == "compiler2"
CPP_FLAGS=$(CPP_FLAGS) /D "COMPILER2"
CXX_FLAGS=$(CXX_FLAGS) /D "COMPILER2"
!endif
!if "$(Variant)" == "tiered"
CPP_FLAGS=$(CPP_FLAGS) /D "COMPILER1" /D "COMPILER2"
CXX_FLAGS=$(CXX_FLAGS) /D "COMPILER1" /D "COMPILER2"
!endif
!if "$(BUILDARCH)" == "i486"
@ -67,21 +67,21 @@ HOTSPOT_LIB_ARCH=$(BUILDARCH)
!endif
# The following variables are defined in the generated local.make file.
CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_RELEASE_VERSION=\"$(HS_BUILD_VER)\""
CPP_FLAGS=$(CPP_FLAGS) /D "JRE_RELEASE_VERSION=\"$(JRE_RELEASE_VER)\""
CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_LIB_ARCH=\"$(HOTSPOT_LIB_ARCH)\""
CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_TARGET=\"$(BUILD_FLAVOR)\""
CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\""
CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\""
CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_RELEASE_VERSION=\"$(HS_BUILD_VER)\""
CXX_FLAGS=$(CXX_FLAGS) /D "JRE_RELEASE_VERSION=\"$(JRE_RELEASE_VER)\""
CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_LIB_ARCH=\"$(HOTSPOT_LIB_ARCH)\""
CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_BUILD_TARGET=\"$(BUILD_FLAVOR)\""
CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\""
CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\""
!ifndef JAVASE_EMBEDDED
CPP_FLAGS=$(CPP_FLAGS) /D "INCLUDE_TRACE"
CXX_FLAGS=$(CXX_FLAGS) /D "INCLUDE_TRACE"
!endif
CPP_FLAGS=$(CPP_FLAGS) $(CPP_INCLUDE_DIRS)
CXX_FLAGS=$(CXX_FLAGS) $(CXX_INCLUDE_DIRS)
# Define that so jni.h is on correct side
CPP_FLAGS=$(CPP_FLAGS) /D "_JNI_IMPLEMENTATION_"
CXX_FLAGS=$(CXX_FLAGS) /D "_JNI_IMPLEMENTATION_"
!if "$(BUILDARCH)" == "ia64"
STACK_SIZE="/STACK:1048576,262144"
@ -102,7 +102,7 @@ AGCT_EXPORT=/export:AsyncGetCallTrace
# If you modify exports below please do the corresponding changes in
# src/share/tools/ProjectCreator/WinGammaPlatformVC7.java
LINK_FLAGS=$(LINK_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 \
LD_FLAGS=$(LD_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 \
/export:JNI_GetDefaultJavaVMInitArgs \
/export:JNI_CreateJavaVM \
/export:JVM_FindClassFromBootLoader \
@ -118,25 +118,25 @@ LINK_FLAGS=$(LINK_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 \
/export:JVM_GetThreadStateValues \
/export:JVM_InitAgentProperties
CPP_INCLUDE_DIRS=/I "..\generated"
CXX_INCLUDE_DIRS=/I "..\generated"
!if exists($(ALTSRC)\share\vm)
CPP_INCLUDE_DIRS=$(CPP_INCLUDE_DIRS) /I "$(ALTSRC)\share\vm"
CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\share\vm"
!endif
!if exists($(ALTSRC)\os\windows\vm)
CPP_INCLUDE_DIRS=$(CPP_INCLUDE_DIRS) /I "$(ALTSRC)\os\windows\vm"
CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\os\windows\vm"
!endif
!if exists($(ALTSRC)\os_cpu\windows_$(Platform_arch)\vm)
CPP_INCLUDE_DIRS=$(CPP_INCLUDE_DIRS) /I "$(ALTSRC)\os_cpu\windows_$(Platform_arch)\vm"
CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\os_cpu\windows_$(Platform_arch)\vm"
!endif
!if exists($(ALTSRC)\cpu\$(Platform_arch)\vm)
CPP_INCLUDE_DIRS=$(CPP_INCLUDE_DIRS) /I "$(ALTSRC)\cpu\$(Platform_arch)\vm"
CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\cpu\$(Platform_arch)\vm"
!endif
CPP_INCLUDE_DIRS=$(CPP_INCLUDE_DIRS) \
CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) \
/I "$(COMMONSRC)\share\vm" \
/I "$(COMMONSRC)\share\vm\precompiled" \
/I "$(COMMONSRC)\share\vm\prims" \
@ -144,12 +144,12 @@ CPP_INCLUDE_DIRS=$(CPP_INCLUDE_DIRS) \
/I "$(COMMONSRC)\os_cpu\windows_$(Platform_arch)\vm" \
/I "$(COMMONSRC)\cpu\$(Platform_arch)\vm"
CPP_DONT_USE_PCH=/D DONT_USE_PRECOMPILED_HEADER
CXX_DONT_USE_PCH=/D DONT_USE_PRECOMPILED_HEADER
!if "$(USE_PRECOMPILED_HEADER)" != "0"
CPP_USE_PCH=/Fp"vm.pch" /Yu"precompiled.hpp"
CXX_USE_PCH=/Fp"vm.pch" /Yu"precompiled.hpp"
!else
CPP_USE_PCH=$(CPP_DONT_USE_PCH)
CXX_USE_PCH=$(CXX_DONT_USE_PCH)
!endif
# Where to find the source code for the virtual machine (is this used?)
@ -194,101 +194,101 @@ VM_PATH={$(VM_PATH)}
# Special case files not using precompiled header files.
c1_RInfo_$(Platform_arch).obj: $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp
$(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp
$(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp
os_windows.obj: $(WorkSpace)\src\os\windows\vm\os_windows.cpp
$(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\os_windows.cpp
$(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\os_windows.cpp
os_windows_$(Platform_arch).obj: $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\os_windows_$(Platform_arch).cpp
$(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\os_windows_$(Platform_arch).cpp
$(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\os_windows_$(Platform_arch).cpp
osThread_windows.obj: $(WorkSpace)\src\os\windows\vm\osThread_windows.cpp
$(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\osThread_windows.cpp
$(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\osThread_windows.cpp
conditionVar_windows.obj: $(WorkSpace)\src\os\windows\vm\conditionVar_windows.cpp
$(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\conditionVar_windows.cpp
$(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\os\windows\vm\conditionVar_windows.cpp
getThread_windows_$(Platform_arch).obj: $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\getThread_windows_$(Platform_arch).cpp
$(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\getThread_windows_$(Platform_arch).cpp
$(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\os_cpu\windows_$(Platform_arch)\vm\getThread_windows_$(Platform_arch).cpp
opcodes.obj: $(WorkSpace)\src\share\vm\opto\opcodes.cpp
$(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\share\vm\opto\opcodes.cpp
$(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\share\vm\opto\opcodes.cpp
bytecodeInterpreter.obj: $(WorkSpace)\src\share\vm\interpreter\bytecodeInterpreter.cpp
$(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c $(WorkSpace)\src\share\vm\interpreter\bytecodeInterpreter.cpp
$(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\share\vm\interpreter\bytecodeInterpreter.cpp
bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWithChecks.cpp
$(CPP) $(CPP_FLAGS) $(CPP_DONT_USE_PCH) /c ..\generated\jvmtifiles\bytecodeInterpreterWithChecks.cpp
$(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c ..\generated\jvmtifiles\bytecodeInterpreterWithChecks.cpp
# Default rules for the Virtual Machine
{$(COMMONSRC)\share\vm\c1}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\compiler}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\code}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\interpreter}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\ci}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\classfile}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\gc_implementation\parallelScavenge}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\gc_implementation\shared}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\gc_implementation\parNew}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\gc_implementation\concurrentMarkSweep}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\gc_implementation\g1}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\gc_interface}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\asm}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\memory}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\oops}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\prims}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\runtime}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\services}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\trace}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\utilities}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\libadt}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\share\vm\opto}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\os\windows\vm}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
# This guy should remain a single colon rule because
# otherwise we can't specify the output filename.
@ -296,113 +296,113 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
@$(RC) $(RC_FLAGS) /fo"$@" $<
{$(COMMONSRC)\cpu\$(Platform_arch)\vm}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(COMMONSRC)\os_cpu\windows_$(Platform_arch)\vm}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\c1}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\compiler}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\code}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\interpreter}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\ci}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\classfile}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\gc_implementation\parallelScavenge}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\gc_implementation\shared}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\gc_implementation\parNew}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\gc_implementation\concurrentMarkSweep}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\gc_implementation\g1}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\gc_interface}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\asm}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\memory}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\oops}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\prims}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\runtime}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\services}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\trace}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\utilities}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\libadt}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\opto}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\os\windows\vm}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
# otherwise we can't specify the output filename.
{$(ALTSRC)\os\windows\vm}.rc.res:
@$(RC) $(RC_FLAGS) /fo"$@" $<
{$(ALTSRC)\cpu\$(Platform_arch)\vm}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\os_cpu\windows_$(Platform_arch)\vm}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{..\generated\incls}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{..\generated\adfiles}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{..\generated\jvmtifiles}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr\agent}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr\agent\isolated_deps\util}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr\jvm}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
default::
_build_pch_file.obj:
@echo #include "precompiled.hpp" > ../generated/_build_pch_file.cpp
$(CPP) $(CPP_FLAGS) /Fp"vm.pch" /Yc"precompiled.hpp" /c ../generated/_build_pch_file.cpp
$(CXX) $(CXX_FLAGS) /Fp"vm.pch" /Yc"precompiled.hpp" /c ../generated/_build_pch_file.cpp

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2134,6 +2134,7 @@ public:
// address pseudos: make these names unlike instruction names to avoid confusion
inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
inline void load_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
inline void load_bool_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
inline void load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
inline void store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
inline void store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
@ -2249,7 +2250,7 @@ public:
// this platform we assume byte size
inline void stbool(Register d, const Address& a) { stb(d, a); }
inline void ldbool(const Address& a, Register d) { ldsb(a, d); }
inline void ldbool(const Address& a, Register d) { ldub(a, d); }
inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
// klass oop manipulations if compressed

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -692,6 +692,17 @@ inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Registe
}
inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {
patchable_sethi(addrlit, d);
} else {
sethi(addrlit, d);
}
ldub(d, addrlit.low10() + offset, d);
}
inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
assert_not_delayed();
if (ForceUnreachable) {

View File

@ -472,7 +472,7 @@ void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
__ load_klass(src_reg, tmp_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
__ ld(ref_type_adr, tmp_reg);
__ ldub(ref_type_adr, tmp_reg);
// _reference_type field is of type ReferenceType (enum)
assert(REF_NONE == 0, "check this code");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,7 +42,7 @@ define_pd_global(bool, ProfileInterpreter, false);
#else
define_pd_global(bool, ProfileInterpreter, true);
#endif // CC_INTERP
define_pd_global(bool, TieredCompilation, true);
define_pd_global(bool, TieredCompilation, trueInTiered);
define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, BackEdgeThreshold, 140000);

View File

@ -28,6 +28,7 @@
#include "oops/markOop.hpp"
#include "oops/methodOop.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -321,6 +321,16 @@ static int reg2offset(VMReg r) {
return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
}
static VMRegPair reg64_to_VMRegPair(Register r) {
VMRegPair ret;
if (wordSize == 8) {
ret.set2(r->as_VMReg());
} else {
ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
}
return ret;
}
// ---------------------------------------------------------------------------
// Read the array of BasicTypes from a signature, and compute where the
// arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
@ -1444,6 +1454,25 @@ static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
}
static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
// stack to stack
__ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5);
__ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
} else {
// stack to reg
__ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
}
} else if (dst.first()->is_stack()) {
// reg to stack
__ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
} else {
__ mov(src.first()->as_Register(), dst.first()->as_Register());
}
}
// An oop arg. Must pass a handle not the oop itself
static void object_move(MacroAssembler* masm,
OopMap* map,
@ -1748,6 +1777,166 @@ static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
}
}
static void save_or_restore_arguments(MacroAssembler* masm,
const int stack_slots,
const int total_in_args,
const int arg_save_area,
OopMap* map,
VMRegPair* in_regs,
BasicType* in_sig_bt) {
// if map is non-NULL then the code should store the values,
// otherwise it should load them.
if (map != NULL) {
// Fill in the map
for (int i = 0; i < total_in_args; i++) {
if (in_sig_bt[i] == T_ARRAY) {
if (in_regs[i].first()->is_stack()) {
int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
} else if (in_regs[i].first()->is_Register()) {
map->set_oop(in_regs[i].first());
} else {
ShouldNotReachHere();
}
}
}
}
// Save or restore double word values
int handle_index = 0;
for (int i = 0; i < total_in_args; i++) {
int slot = handle_index + arg_save_area;
int offset = slot * VMRegImpl::stack_slot_size;
if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) {
const Register reg = in_regs[i].first()->as_Register();
if (reg->is_global()) {
handle_index += 2;
assert(handle_index <= stack_slots, "overflow");
if (map != NULL) {
__ stx(reg, SP, offset + STACK_BIAS);
} else {
__ ldx(SP, offset + STACK_BIAS, reg);
}
}
} else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) {
handle_index += 2;
assert(handle_index <= stack_slots, "overflow");
if (map != NULL) {
__ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
} else {
__ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
}
}
}
// Save floats
for (int i = 0; i < total_in_args; i++) {
int slot = handle_index + arg_save_area;
int offset = slot * VMRegImpl::stack_slot_size;
if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) {
handle_index++;
assert(handle_index <= stack_slots, "overflow");
if (map != NULL) {
__ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
} else {
__ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
}
}
}
}
// Check GC_locker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an
// OopMap.
static void check_needs_gc_for_critical_native(MacroAssembler* masm,
const int stack_slots,
const int total_in_args,
const int arg_save_area,
OopMapSet* oop_maps,
VMRegPair* in_regs,
BasicType* in_sig_bt) {
__ block_comment("check GC_locker::needs_gc");
Label cont;
AddressLiteral sync_state(GC_locker::needs_gc_address());
__ load_bool_contents(sync_state, G3_scratch);
__ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
__ delayed()->nop();
// Save down any values that are live in registers and call into the
// runtime to halt for a GC
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, map, in_regs, in_sig_bt);
__ mov(G2_thread, L7_thread_cache);
__ set_last_Java_frame(SP, noreg);
__ block_comment("block_for_jni_critical");
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type);
__ delayed()->mov(L7_thread_cache, O0);
oop_maps->add_gc_map( __ offset(), map);
__ restore_thread(L7_thread_cache); // restore G2_thread
__ reset_last_Java_frame();
// Reload all the register arguments
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, NULL, in_regs, in_sig_bt);
__ bind(cont);
#ifdef ASSERT
if (StressCriticalJNINatives) {
// Stress register saving
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, map, in_regs, in_sig_bt);
// Destroy argument registers
for (int i = 0; i < total_in_args; i++) {
if (in_regs[i].first()->is_Register()) {
const Register reg = in_regs[i].first()->as_Register();
if (reg->is_global()) {
__ mov(G0, reg);
}
} else if (in_regs[i].first()->is_FloatRegister()) {
__ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
}
}
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, NULL, in_regs, in_sig_bt);
}
#endif
}
// Unpack an array argument into a pointer to the body and the length
// if the array is non-null, otherwise pass 0 for both.
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
// Pass the length, ptr pair
Label is_null, done;
if (reg.first()->is_stack()) {
VMRegPair tmp = reg64_to_VMRegPair(L2);
// Load the arg up from the stack
move_ptr(masm, reg, tmp);
reg = tmp;
}
__ cmp(reg.first()->as_Register(), G0);
__ brx(Assembler::equal, false, Assembler::pt, is_null);
__ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4);
move_ptr(masm, reg64_to_VMRegPair(L4), body_arg);
__ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4);
move32_64(masm, reg64_to_VMRegPair(L4), length_arg);
__ ba_short(done);
__ bind(is_null);
// Pass zeros
move_ptr(masm, reg64_to_VMRegPair(G0), body_arg);
move32_64(masm, reg64_to_VMRegPair(G0), length_arg);
__ bind(done);
}
// ---------------------------------------------------------------------------
// Generate a native wrapper for a given method. The method takes arguments
// in the Java compiled code convention, marshals them to the native
@ -1762,6 +1951,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
BasicType *in_sig_bt,
VMRegPair *in_regs,
BasicType ret_type) {
bool is_critical_native = true;
address native_func = method->critical_native_function();
if (native_func == NULL) {
native_func = method->native_function();
is_critical_native = false;
}
assert(native_func != NULL, "must have function");
// Native nmethod wrappers never take possesion of the oop arguments.
// So the caller will gc the arguments. The only thing we need an
@ -1841,22 +2037,70 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// we convert the java signature to a C signature by inserting
// the hidden arguments as arg[0] and possibly arg[1] (static method)
int total_c_args = total_in_args + 1;
if (method->is_static()) {
total_c_args++;
int total_c_args = total_in_args;
int total_save_slots = 6 * VMRegImpl::slots_per_word;
if (!is_critical_native) {
total_c_args += 1;
if (method->is_static()) {
total_c_args++;
}
} else {
for (int i = 0; i < total_in_args; i++) {
if (in_sig_bt[i] == T_ARRAY) {
// These have to be saved and restored across the safepoint
total_c_args++;
}
}
}
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
VMRegPair * out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
BasicType* in_elem_bt = NULL;
int argc = 0;
out_sig_bt[argc++] = T_ADDRESS;
if (method->is_static()) {
out_sig_bt[argc++] = T_OBJECT;
}
if (!is_critical_native) {
out_sig_bt[argc++] = T_ADDRESS;
if (method->is_static()) {
out_sig_bt[argc++] = T_OBJECT;
}
for (int i = 0; i < total_in_args ; i++ ) {
out_sig_bt[argc++] = in_sig_bt[i];
for (int i = 0; i < total_in_args ; i++ ) {
out_sig_bt[argc++] = in_sig_bt[i];
}
} else {
Thread* THREAD = Thread::current();
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
SignatureStream ss(method->signature());
for (int i = 0; i < total_in_args ; i++ ) {
if (in_sig_bt[i] == T_ARRAY) {
// Arrays are passed as int, elem* pair
out_sig_bt[argc++] = T_INT;
out_sig_bt[argc++] = T_ADDRESS;
Symbol* atype = ss.as_symbol(CHECK_NULL);
const char* at = atype->as_C_string();
if (strlen(at) == 2) {
assert(at[0] == '[', "must be");
switch (at[1]) {
case 'B': in_elem_bt[i] = T_BYTE; break;
case 'C': in_elem_bt[i] = T_CHAR; break;
case 'D': in_elem_bt[i] = T_DOUBLE; break;
case 'F': in_elem_bt[i] = T_FLOAT; break;
case 'I': in_elem_bt[i] = T_INT; break;
case 'J': in_elem_bt[i] = T_LONG; break;
case 'S': in_elem_bt[i] = T_SHORT; break;
case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
default: ShouldNotReachHere();
}
}
} else {
out_sig_bt[argc++] = in_sig_bt[i];
in_elem_bt[i] = T_VOID;
}
if (in_sig_bt[i] != T_VOID) {
assert(in_sig_bt[i] == ss.type(), "must match");
ss.next();
}
}
}
// Now figure out where the args must be stored and how much stack space
@ -1866,6 +2110,35 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
int out_arg_slots;
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
if (is_critical_native) {
// Critical natives may have to call out so they need a save area
// for register arguments.
int double_slots = 0;
int single_slots = 0;
for ( int i = 0; i < total_in_args; i++) {
if (in_regs[i].first()->is_Register()) {
const Register reg = in_regs[i].first()->as_Register();
switch (in_sig_bt[i]) {
case T_ARRAY:
case T_BOOLEAN:
case T_BYTE:
case T_SHORT:
case T_CHAR:
case T_INT: assert(reg->is_in(), "don't need to save these"); break;
case T_LONG: if (reg->is_global()) double_slots++; break;
default: ShouldNotReachHere();
}
} else if (in_regs[i].first()->is_FloatRegister()) {
switch (in_sig_bt[i]) {
case T_FLOAT: single_slots++; break;
case T_DOUBLE: double_slots++; break;
default: ShouldNotReachHere();
}
}
}
total_save_slots = double_slots * 2 + single_slots;
}
// Compute framesize for the wrapper. We need to handlize all oops in
// registers. We must create space for them here that is disjoint from
// the windowed save area because we have no control over when we might
@ -1885,12 +2158,11 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Now the space for the inbound oop handle area
int oop_handle_offset = stack_slots;
stack_slots += 6*VMRegImpl::slots_per_word;
int oop_handle_offset = round_to(stack_slots, 2);
stack_slots += total_save_slots;
// Now any space we need for handlizing a klass if static method
int oop_temp_slot_offset = 0;
int klass_slot_offset = 0;
int klass_offset = -1;
int lock_slot_offset = 0;
@ -1954,6 +2226,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ verify_thread();
if (is_critical_native) {
check_needs_gc_for_critical_native(masm, stack_slots, total_in_args,
oop_handle_offset, oop_maps, in_regs, in_sig_bt);
}
//
// We immediately shuffle the arguments so that any vm call we have to
@ -1982,7 +2258,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// caller.
//
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
int c_arg = total_c_args - 1;
// Record sp-based slot for receiver on stack for non-static methods
int receiver_offset = -1;
@ -2002,7 +2277,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
#endif /* ASSERT */
for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) {
#ifdef ASSERT
if (in_regs[i].first()->is_Register()) {
@ -2019,7 +2294,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
switch (in_sig_bt[i]) {
case T_ARRAY:
if (is_critical_native) {
unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]);
c_arg--;
break;
}
case T_OBJECT:
assert(!is_critical_native, "no oop arguments");
object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
((i == 0) && (!is_static)),
&receiver_offset);
@ -2029,7 +2310,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
case T_FLOAT:
float_move(masm, in_regs[i], out_regs[c_arg]);
break;
break;
case T_DOUBLE:
assert( i + 1 < total_in_args &&
@ -2051,7 +2332,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Pre-load a static method's oop into O1. Used both by locking code and
// the normal JNI call code.
if (method->is_static()) {
if (method->is_static() && !is_critical_native) {
__ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
// Now handlize the static class mirror in O1. It's known not-null.
@ -2064,13 +2345,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
const Register L6_handle = L6;
if (method->is_synchronized()) {
assert(!is_critical_native, "unhandled");
__ mov(O1, L6_handle);
}
// We have all of the arguments setup at this point. We MUST NOT touch any Oregs
// except O6/O7. So if we must call out we must push a new frame. We immediately
// push a new frame and flush the windows.
#ifdef _LP64
intptr_t thepc = (intptr_t) __ pc();
{
@ -2202,32 +2483,28 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
}
// get JNIEnv* which is first argument to native
__ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
if (!is_critical_native) {
__ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
}
// Use that pc we placed in O7 a while back as the current frame anchor
__ set_last_Java_frame(SP, O7);
// We flushed the windows ages ago now mark them as flushed before transitioning.
__ set(JavaFrameAnchor::flushed, G3_scratch);
__ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
// Transition from _thread_in_Java to _thread_in_native.
__ set(_thread_in_native, G3_scratch);
__ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
// We flushed the windows ages ago now mark them as flushed
// mark windows as flushed
__ set(JavaFrameAnchor::flushed, G3_scratch);
Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
#ifdef _LP64
AddressLiteral dest(method->native_function());
AddressLiteral dest(native_func);
__ relocate(relocInfo::runtime_call_type);
__ jumpl_to(dest, O7, O7);
#else
__ call(method->native_function(), relocInfo::runtime_call_type);
__ call(native_func, relocInfo::runtime_call_type);
#endif
__ delayed()->st(G3_scratch, flags);
__ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
__ restore_thread(L7_thread_cache); // restore G2_thread
@ -2259,6 +2536,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
ShouldNotReachHere();
}
Label after_transition;
// must we block?
// Block, if necessary, before resuming in _thread_in_Java state.
@ -2303,22 +2581,34 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// a distinct one for this pc
//
save_native_result(masm, ret_type, stack_slots);
__ call_VM_leaf(L7_thread_cache,
CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
G2_thread);
if (!is_critical_native) {
__ call_VM_leaf(L7_thread_cache,
CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
G2_thread);
} else {
__ call_VM_leaf(L7_thread_cache,
CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
G2_thread);
}
// Restore any method result value
restore_native_result(masm, ret_type, stack_slots);
if (is_critical_native) {
// The call above performed the transition to thread_in_Java so
// skip the transition logic below.
__ ba(after_transition);
__ delayed()->nop();
}
__ bind(no_block);
}
// thread state is thread_in_native_trans. Any safepoint blocking has already
// happened so we can now change state to _thread_in_Java.
__ set(_thread_in_Java, G3_scratch);
__ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
__ bind(after_transition);
Label no_reguard;
__ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
@ -2416,12 +2706,14 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ verify_oop(I0);
}
// reset handle block
__ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
__ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
if (!is_critical_native) {
// reset handle block
__ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
__ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
__ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
check_forward_pending_exception(masm, G3_scratch);
__ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
check_forward_pending_exception(masm, G3_scratch);
}
// Return
@ -2450,6 +2742,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
(is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
in_ByteSize(lock_offset),
oop_maps);
if (is_critical_native) {
nm->set_lazy_critical_native(true);
}
return nm;
}
@ -2473,17 +2769,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
static bool offsets_initialized = false;
static VMRegPair reg64_to_VMRegPair(Register r) {
VMRegPair ret;
if (wordSize == 8) {
ret.set2(r->as_VMReg());
} else {
ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
}
return ret;
}
nmethod *SharedRuntime::generate_dtrace_nmethod(
MacroAssembler *masm, methodHandle method) {

View File

@ -520,7 +520,7 @@ void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
__ load_klass(tmp_reg, src_reg);
Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
__ cmpl(ref_type_adr, REF_NONE);
__ cmpb(ref_type_adr, REF_NONE);
__ jcc(Assembler::equal, _continuation);
// Is marking active?

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,7 @@ define_pd_global(bool, ProfileInterpreter, false);
#else
define_pd_global(bool, ProfileInterpreter, true);
#endif // CC_INTERP
define_pd_global(bool, TieredCompilation, true);
define_pd_global(bool, TieredCompilation, trueInTiered);
define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, BackEdgeThreshold, 100000);

View File

@ -28,6 +28,7 @@
#include "oops/markOop.hpp"
#include "oops/methodOop.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"

View File

@ -2364,23 +2364,19 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// grab another temp
Register rsi_temp = rsi;
{ if (rsi_temp == saved_last_sp) __ push(saved_last_sp); }
// (preceding push must be done after argslot address is taken!)
#define UNPUSH_RSI \
{ if (rsi_temp == saved_last_sp) __ pop(saved_last_sp); }
// arx_argslot points both to the array and to the first output arg
vmarg = Address(rax_argslot, 0);
// Get the array value.
Register rsi_array = rsi_temp;
Register rdi_array = rdi_temp;
Register rdx_array_klass = rdx_temp;
BasicType elem_type = ek_adapter_opt_spread_type(ek);
int elem_slots = type2size[elem_type]; // 1 or 2
int array_slots = 1; // array is always a T_OBJECT
int length_offset = arrayOopDesc::length_offset_in_bytes();
int elem0_offset = arrayOopDesc::base_offset_in_bytes(elem_type);
__ movptr(rsi_array, vmarg);
__ movptr(rdi_array, vmarg);
Label L_array_is_empty, L_insert_arg_space, L_copy_args, L_args_done;
if (length_can_be_zero) {
@ -2391,12 +2387,30 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ testl(rbx_temp, rbx_temp);
__ jcc(Assembler::notZero, L_skip);
}
__ testptr(rsi_array, rsi_array);
__ jcc(Assembler::zero, L_array_is_empty);
__ testptr(rdi_array, rdi_array);
__ jcc(Assembler::notZero, L_skip);
// If 'rsi' contains the 'saved_last_sp' (this is only the
// case in a 32-bit version of the VM) we have to save 'rsi'
// on the stack because later on (at 'L_array_is_empty') 'rsi'
// will be overwritten.
{ if (rsi_temp == saved_last_sp) __ push(saved_last_sp); }
// Also prepare a handy macro which restores 'rsi' if required.
#define UNPUSH_RSI \
{ if (rsi_temp == saved_last_sp) __ pop(saved_last_sp); }
__ jmp(L_array_is_empty);
__ bind(L_skip);
}
__ null_check(rsi_array, oopDesc::klass_offset_in_bytes());
__ load_klass(rdx_array_klass, rsi_array);
__ null_check(rdi_array, oopDesc::klass_offset_in_bytes());
__ load_klass(rdx_array_klass, rdi_array);
// Save 'rsi' if required (see comment above). Do this only
// after the null check such that the exception handler which is
// called in the case of a null pointer exception will not be
// confused by the extra value on the stack (it expects the
// return pointer on top of the stack)
{ if (rsi_temp == saved_last_sp) __ push(saved_last_sp); }
// Check the array type.
Register rbx_klass = rbx_temp;
@ -2404,18 +2418,18 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
load_klass_from_Class(_masm, rbx_klass);
Label ok_array_klass, bad_array_klass, bad_array_length;
__ check_klass_subtype(rdx_array_klass, rbx_klass, rdi_temp, ok_array_klass);
__ check_klass_subtype(rdx_array_klass, rbx_klass, rsi_temp, ok_array_klass);
// If we get here, the type check failed!
__ jmp(bad_array_klass);
__ BIND(ok_array_klass);
// Check length.
if (length_constant >= 0) {
__ cmpl(Address(rsi_array, length_offset), length_constant);
__ cmpl(Address(rdi_array, length_offset), length_constant);
} else {
Register rbx_vminfo = rbx_temp;
load_conversion_vminfo(_masm, rbx_vminfo, rcx_amh_conversion);
__ cmpl(rbx_vminfo, Address(rsi_array, length_offset));
__ cmpl(rbx_vminfo, Address(rdi_array, length_offset));
}
__ jcc(Assembler::notEqual, bad_array_length);
@ -2427,9 +2441,9 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize));
// 'stack_move' is negative number of words to insert
// This number already accounts for elem_slots.
Register rdi_stack_move = rdi_temp;
load_stack_move(_masm, rdi_stack_move, rcx_recv, true);
__ cmpptr(rdi_stack_move, 0);
Register rsi_stack_move = rsi_temp;
load_stack_move(_masm, rsi_stack_move, rcx_recv, true);
__ cmpptr(rsi_stack_move, 0);
assert(stack_move_unit() < 0, "else change this comparison");
__ jcc(Assembler::less, L_insert_arg_space);
__ jcc(Assembler::equal, L_copy_args);
@ -2440,12 +2454,12 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ jmp(L_args_done); // no spreading to do
__ BIND(L_insert_arg_space);
// come here in the usual case, stack_move < 0 (2 or more spread arguments)
Register rsi_temp = rsi_array; // spill this
insert_arg_slots(_masm, rdi_stack_move,
rax_argslot, rbx_temp, rsi_temp);
Register rdi_temp = rdi_array; // spill this
insert_arg_slots(_masm, rsi_stack_move,
rax_argslot, rbx_temp, rdi_temp);
// reload the array since rsi was killed
// reload from rdx_argslot_limit since rax_argslot is now decremented
__ movptr(rsi_array, Address(rdx_argslot_limit, -Interpreter::stackElementSize));
__ movptr(rdi_array, Address(rdx_argslot_limit, -Interpreter::stackElementSize));
} else if (length_constant >= 1) {
int new_slots = (length_constant * elem_slots) - array_slots;
insert_arg_slots(_masm, new_slots * stack_move_unit(),
@ -2468,16 +2482,16 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
if (length_constant == -1) {
// [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
// Array element [0] goes at rdx_argslot_limit[-wordSize].
Register rsi_source = rsi_array;
__ lea(rsi_source, Address(rsi_array, elem0_offset));
Register rdi_source = rdi_array;
__ lea(rdi_source, Address(rdi_array, elem0_offset));
Register rdx_fill_ptr = rdx_argslot_limit;
Label loop;
__ BIND(loop);
__ addptr(rdx_fill_ptr, -Interpreter::stackElementSize * elem_slots);
move_typed_arg(_masm, elem_type, true,
Address(rdx_fill_ptr, 0), Address(rsi_source, 0),
rbx_temp, rdi_temp);
__ addptr(rsi_source, type2aelembytes(elem_type));
Address(rdx_fill_ptr, 0), Address(rdi_source, 0),
rbx_temp, rsi_temp);
__ addptr(rdi_source, type2aelembytes(elem_type));
__ cmpptr(rdx_fill_ptr, rax_argslot);
__ jcc(Assembler::above, loop);
} else if (length_constant == 0) {
@ -2488,8 +2502,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
for (int index = 0; index < length_constant; index++) {
slot_offset -= Interpreter::stackElementSize * elem_slots; // fill backward
move_typed_arg(_masm, elem_type, true,
Address(rax_argslot, slot_offset), Address(rsi_array, elem_offset),
rbx_temp, rdi_temp);
Address(rax_argslot, slot_offset), Address(rdi_array, elem_offset),
rbx_temp, rsi_temp);
elem_offset += type2aelembytes(elem_type);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1091,12 +1091,238 @@ void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_ty
}
}
static void save_or_restore_arguments(MacroAssembler* masm,
const int stack_slots,
const int total_in_args,
const int arg_save_area,
OopMap* map,
VMRegPair* in_regs,
BasicType* in_sig_bt) {
// if map is non-NULL then the code should store the values,
// otherwise it should load them.
int handle_index = 0;
// Save down double word first
for ( int i = 0; i < total_in_args; i++) {
if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
int offset = slot * VMRegImpl::stack_slot_size;
handle_index += 2;
assert(handle_index <= stack_slots, "overflow");
if (map != NULL) {
__ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
} else {
__ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
}
}
if (in_regs[i].first()->is_Register() && in_sig_bt[i] == T_LONG) {
int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
int offset = slot * VMRegImpl::stack_slot_size;
handle_index += 2;
assert(handle_index <= stack_slots, "overflow");
if (map != NULL) {
__ movl(Address(rsp, offset), in_regs[i].first()->as_Register());
if (in_regs[i].second()->is_Register()) {
__ movl(Address(rsp, offset + 4), in_regs[i].second()->as_Register());
}
} else {
__ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
if (in_regs[i].second()->is_Register()) {
__ movl(in_regs[i].second()->as_Register(), Address(rsp, offset + 4));
}
}
}
}
// Save or restore single word registers
for ( int i = 0; i < total_in_args; i++) {
if (in_regs[i].first()->is_Register()) {
int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
int offset = slot * VMRegImpl::stack_slot_size;
assert(handle_index <= stack_slots, "overflow");
if (in_sig_bt[i] == T_ARRAY && map != NULL) {
map->set_oop(VMRegImpl::stack2reg(slot));;
}
// Value is in an input register pass we must flush it to the stack
const Register reg = in_regs[i].first()->as_Register();
switch (in_sig_bt[i]) {
case T_ARRAY:
if (map != NULL) {
__ movptr(Address(rsp, offset), reg);
} else {
__ movptr(reg, Address(rsp, offset));
}
break;
case T_BOOLEAN:
case T_CHAR:
case T_BYTE:
case T_SHORT:
case T_INT:
if (map != NULL) {
__ movl(Address(rsp, offset), reg);
} else {
__ movl(reg, Address(rsp, offset));
}
break;
case T_OBJECT:
default: ShouldNotReachHere();
}
} else if (in_regs[i].first()->is_XMMRegister()) {
if (in_sig_bt[i] == T_FLOAT) {
int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
int offset = slot * VMRegImpl::stack_slot_size;
assert(handle_index <= stack_slots, "overflow");
if (map != NULL) {
__ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
} else {
__ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
}
}
} else if (in_regs[i].first()->is_stack()) {
if (in_sig_bt[i] == T_ARRAY && map != NULL) {
int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
}
}
}
}
// Check GC_locker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an
// OopMap.
static void check_needs_gc_for_critical_native(MacroAssembler* masm,
Register thread,
int stack_slots,
int total_c_args,
int total_in_args,
int arg_save_area,
OopMapSet* oop_maps,
VMRegPair* in_regs,
BasicType* in_sig_bt) {
__ block_comment("check GC_locker::needs_gc");
Label cont;
__ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
__ jcc(Assembler::equal, cont);
// Save down any incoming oops and call into the runtime to halt for a GC
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, map, in_regs, in_sig_bt);
address the_pc = __ pc();
oop_maps->add_gc_map( __ offset(), map);
__ set_last_Java_frame(thread, rsp, noreg, the_pc);
__ block_comment("block_for_jni_critical");
__ push(thread);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
__ increment(rsp, wordSize);
__ get_thread(thread);
__ reset_last_Java_frame(thread, false, true);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, NULL, in_regs, in_sig_bt);
__ bind(cont);
#ifdef ASSERT
if (StressCriticalJNINatives) {
// Stress register saving
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, map, in_regs, in_sig_bt);
// Destroy argument registers
for (int i = 0; i < total_in_args - 1; i++) {
if (in_regs[i].first()->is_Register()) {
const Register reg = in_regs[i].first()->as_Register();
__ xorptr(reg, reg);
} else if (in_regs[i].first()->is_XMMRegister()) {
__ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
} else if (in_regs[i].first()->is_FloatRegister()) {
ShouldNotReachHere();
} else if (in_regs[i].first()->is_stack()) {
// Nothing to do
} else {
ShouldNotReachHere();
}
if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
i++;
}
}
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, NULL, in_regs, in_sig_bt);
}
#endif
}
// Unpack an array argument into a pointer to the body and the length
// if the array is non-null, otherwise pass 0 for both.
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
Register tmp_reg = rax;
assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
"possible collision");
assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
"possible collision");
// Pass the length, ptr pair
Label is_null, done;
VMRegPair tmp(tmp_reg->as_VMReg());
if (reg.first()->is_stack()) {
// Load the arg up from the stack
simple_move32(masm, reg, tmp);
reg = tmp;
}
__ testptr(reg.first()->as_Register(), reg.first()->as_Register());
__ jccb(Assembler::equal, is_null);
__ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
simple_move32(masm, tmp, body_arg);
// load the length relative to the body.
__ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
arrayOopDesc::base_offset_in_bytes(in_elem_type)));
simple_move32(masm, tmp, length_arg);
__ jmpb(done);
__ bind(is_null);
// Pass zeros
__ xorptr(tmp_reg, tmp_reg);
simple_move32(masm, tmp, body_arg);
simple_move32(masm, tmp, length_arg);
__ bind(done);
}
// ---------------------------------------------------------------------------
// Generate a native wrapper for a given method. The method takes arguments
// in the Java compiled code convention, marshals them to the native
// convention (handlizes oops, etc), transitions to native, makes the call,
// returns to java state (possibly blocking), unhandlizes any result and
// returns.
//
// Critical native functions are a shorthand for the use of
// GetPrimtiveArrayCritical and disallow the use of any other JNI
// functions. The wrapper is expected to unpack the arguments before
// passing them to the callee and perform checks before and after the
// native call to ensure that they GC_locker
// lock_critical/unlock_critical semantics are followed. Some other
// parts of JNI setup are skipped like the tear down of the JNI handle
// block and the check for pending exceptions it's impossible for them
// to be thrown.
//
// They are roughly structured like this:
// if (GC_locker::needs_gc())
// SharedRuntime::block_for_jni_critical();
// tranistion to thread_in_native
// unpack arrray arguments and call native entry point
// check for safepoint in progress
// check if any thread suspend flags are set
// call into JVM and possible unlock the JNI critical
// if a GC was suppressed while in the critical native.
// transition back to thread_in_Java
// return to caller
//
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
methodHandle method,
int compile_id,
@ -1105,6 +1331,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
BasicType *in_sig_bt,
VMRegPair *in_regs,
BasicType ret_type) {
bool is_critical_native = true;
address native_func = method->critical_native_function();
if (native_func == NULL) {
native_func = method->native_function();
is_critical_native = false;
}
assert(native_func != NULL, "must have function");
// An OopMap for lock (and class if static)
OopMapSet *oop_maps = new OopMapSet();
@ -1115,30 +1348,72 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// we convert the java signature to a C signature by inserting
// the hidden arguments as arg[0] and possibly arg[1] (static method)
int total_c_args = total_in_args + 1;
if (method->is_static()) {
total_c_args++;
int total_c_args = total_in_args;
if (!is_critical_native) {
total_c_args += 1;
if (method->is_static()) {
total_c_args++;
}
} else {
for (int i = 0; i < total_in_args; i++) {
if (in_sig_bt[i] == T_ARRAY) {
total_c_args++;
}
}
}
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
BasicType* in_elem_bt = NULL;
int argc = 0;
out_sig_bt[argc++] = T_ADDRESS;
if (method->is_static()) {
out_sig_bt[argc++] = T_OBJECT;
}
if (!is_critical_native) {
out_sig_bt[argc++] = T_ADDRESS;
if (method->is_static()) {
out_sig_bt[argc++] = T_OBJECT;
}
int i;
for (i = 0; i < total_in_args ; i++ ) {
out_sig_bt[argc++] = in_sig_bt[i];
for (int i = 0; i < total_in_args ; i++ ) {
out_sig_bt[argc++] = in_sig_bt[i];
}
} else {
Thread* THREAD = Thread::current();
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
SignatureStream ss(method->signature());
for (int i = 0; i < total_in_args ; i++ ) {
if (in_sig_bt[i] == T_ARRAY) {
// Arrays are passed as int, elem* pair
out_sig_bt[argc++] = T_INT;
out_sig_bt[argc++] = T_ADDRESS;
Symbol* atype = ss.as_symbol(CHECK_NULL);
const char* at = atype->as_C_string();
if (strlen(at) == 2) {
assert(at[0] == '[', "must be");
switch (at[1]) {
case 'B': in_elem_bt[i] = T_BYTE; break;
case 'C': in_elem_bt[i] = T_CHAR; break;
case 'D': in_elem_bt[i] = T_DOUBLE; break;
case 'F': in_elem_bt[i] = T_FLOAT; break;
case 'I': in_elem_bt[i] = T_INT; break;
case 'J': in_elem_bt[i] = T_LONG; break;
case 'S': in_elem_bt[i] = T_SHORT; break;
case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
default: ShouldNotReachHere();
}
}
} else {
out_sig_bt[argc++] = in_sig_bt[i];
in_elem_bt[i] = T_VOID;
}
if (in_sig_bt[i] != T_VOID) {
assert(in_sig_bt[i] == ss.type(), "must match");
ss.next();
}
}
}
// Now figure out where the args must be stored and how much stack space
// they require (neglecting out_preserve_stack_slots but space for storing
// the 1st six register arguments). It's weird see int_stk_helper.
//
// they require.
int out_arg_slots;
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
@ -1151,9 +1426,44 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
// Now the space for the inbound oop handle area
int total_save_slots = 2 * VMRegImpl::slots_per_word; // 2 arguments passed in registers
if (is_critical_native) {
// Critical natives may have to call out so they need a save area
// for register arguments.
int double_slots = 0;
int single_slots = 0;
for ( int i = 0; i < total_in_args; i++) {
if (in_regs[i].first()->is_Register()) {
const Register reg = in_regs[i].first()->as_Register();
switch (in_sig_bt[i]) {
case T_ARRAY:
case T_BOOLEAN:
case T_BYTE:
case T_SHORT:
case T_CHAR:
case T_INT: single_slots++; break;
case T_LONG: double_slots++; break;
default: ShouldNotReachHere();
}
} else if (in_regs[i].first()->is_XMMRegister()) {
switch (in_sig_bt[i]) {
case T_FLOAT: single_slots++; break;
case T_DOUBLE: double_slots++; break;
default: ShouldNotReachHere();
}
} else if (in_regs[i].first()->is_FloatRegister()) {
ShouldNotReachHere();
}
}
total_save_slots = double_slots * 2 + single_slots;
// align the save area
if (double_slots != 0) {
stack_slots = round_to(stack_slots, 2);
}
}
int oop_handle_offset = stack_slots;
stack_slots += 2*VMRegImpl::slots_per_word;
stack_slots += total_save_slots;
// Now any space we need for handlizing a klass if static method
@ -1161,7 +1471,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
int klass_offset = -1;
int lock_slot_offset = 0;
bool is_static = false;
int oop_temp_slot_offset = 0;
if (method->is_static()) {
klass_slot_offset = stack_slots;
@ -1221,7 +1530,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// First thing make an ic check to see if we should even be here
// We are free to use all registers as temps without saving them and
// restoring them except rbp,. rbp, is the only callee save register
// restoring them except rbp. rbp is the only callee save register
// as far as the interpreter and the compiler(s) are concerned.
@ -1230,7 +1539,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
Label hit;
Label exception_pending;
__ verify_oop(receiver);
__ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
__ jcc(Assembler::equal, hit);
@ -1292,11 +1600,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Generate a new frame for the wrapper.
__ enter();
// -2 because return address is already present and so is saved rbp,
// -2 because return address is already present and so is saved rbp
__ subptr(rsp, stack_size - 2*wordSize);
// Frame is now completed as far a size and linkage.
// Frame is now completed as far as size and linkage.
int frame_complete = ((intptr_t)__ pc()) - start;
// Calculate the difference between rsp and rbp,. We need to know it
@ -1319,7 +1626,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Compute the rbp, offset for any slots used after the jni call
int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
int oop_temp_slot_rbp_offset = (oop_temp_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
// We use rdi as a thread pointer because it is callee save and
// if we load it once it is usable thru the entire wrapper
@ -1332,6 +1638,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ get_thread(thread);
if (is_critical_native) {
check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
oop_handle_offset, oop_maps, in_regs, in_sig_bt);
}
//
// We immediately shuffle the arguments so that any vm call we have to
@ -1353,7 +1663,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// vectors we have in our possession. We simply walk the java vector to
// get the source locations and the c vector to get the destinations.
int c_arg = method->is_static() ? 2 : 1 ;
int c_arg = is_critical_native ? 0 : (method->is_static() ? 2 : 1 );
// Record rsp-based slot for receiver on stack for non-static methods
int receiver_offset = -1;
@ -1373,10 +1683,16 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Are free to temporaries if we have to do stack to steck moves.
// All inbound args are referenced based on rbp, and all outbound args via rsp.
for (i = 0; i < total_in_args ; i++, c_arg++ ) {
for (int i = 0; i < total_in_args ; i++, c_arg++ ) {
switch (in_sig_bt[i]) {
case T_ARRAY:
if (is_critical_native) {
unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
c_arg++;
break;
}
case T_OBJECT:
assert(!is_critical_native, "no oop arguments");
object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
((i == 0) && (!is_static)),
&receiver_offset);
@ -1408,7 +1724,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Pre-load a static method's oop into rsi. Used both by locking code and
// the normal JNI call code.
if (method->is_static()) {
if (method->is_static() && !is_critical_native) {
// load opp into a register
__ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
@ -1463,6 +1779,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Lock a synchronized method
if (method->is_synchronized()) {
assert(!is_critical_native, "unhandled");
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
@ -1529,14 +1846,15 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// get JNIEnv* which is first argument to native
__ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
__ movptr(Address(rsp, 0), rdx);
if (!is_critical_native) {
__ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
__ movptr(Address(rsp, 0), rdx);
}
// Now set thread in native
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
__ call(RuntimeAddress(method->native_function()));
__ call(RuntimeAddress(native_func));
// WARNING - on Windows Java Natives use pascal calling convention and pop the
// arguments off of the stack. We could just re-adjust the stack pointer here
@ -1591,6 +1909,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
}
Label after_transition;
// check for safepoint operation in progress and/or pending suspend requests
{ Label Continue;
@ -1611,17 +1931,29 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
//
save_native_result(masm, ret_type, stack_slots);
__ push(thread);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
JavaThread::check_special_condition_for_native_trans)));
if (!is_critical_native) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
JavaThread::check_special_condition_for_native_trans)));
} else {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
JavaThread::check_special_condition_for_native_trans_and_transition)));
}
__ increment(rsp, wordSize);
// Restore any method result value
restore_native_result(masm, ret_type, stack_slots);
if (is_critical_native) {
// The call above performed the transition to thread_in_Java so
// skip the transition logic below.
__ jmpb(after_transition);
}
__ bind(Continue);
}
// change thread state
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
__ bind(after_transition);
Label reguard;
Label reguard_done;
@ -1710,15 +2042,15 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ verify_oop(rax);
}
// reset handle block
__ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
__ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
// Any exception pending?
__ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, exception_pending);
if (!is_critical_native) {
// reset handle block
__ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
__ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
// Any exception pending?
__ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, exception_pending);
}
// no exception, we're almost done
@ -1829,16 +2161,18 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// BEGIN EXCEPTION PROCESSING
// Forward the exception
__ bind(exception_pending);
if (!is_critical_native) {
// Forward the exception
__ bind(exception_pending);
// remove possible return value from FPU register stack
__ empty_FPU_stack();
// remove possible return value from FPU register stack
__ empty_FPU_stack();
// pop our frame
__ leave();
// and forward the exception
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
// pop our frame
__ leave();
// and forward the exception
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
}
__ flush();
@ -1851,6 +2185,11 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
(is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
oop_maps);
if (is_critical_native) {
nm->set_lazy_critical_native(true);
}
return nm;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -938,6 +938,25 @@ static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
}
}
static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
// stack to stack
__ movq(rax, Address(rbp, reg2offset_in(src.first())));
__ movq(Address(rsp, reg2offset_out(dst.first())), rax);
} else {
// stack to reg
__ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
}
} else if (dst.first()->is_stack()) {
// reg to stack
__ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
} else {
if (dst.first() != src.first()) {
__ movq(dst.first()->as_Register(), src.first()->as_Register());
}
}
}
// An oop arg. Must pass a handle not the oop itself
static void object_move(MacroAssembler* masm,
@ -1152,6 +1171,203 @@ static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMR
}
}
static void save_or_restore_arguments(MacroAssembler* masm,
const int stack_slots,
const int total_in_args,
const int arg_save_area,
OopMap* map,
VMRegPair* in_regs,
BasicType* in_sig_bt) {
// if map is non-NULL then the code should store the values,
// otherwise it should load them.
int handle_index = 0;
// Save down double word first
for ( int i = 0; i < total_in_args; i++) {
if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
int offset = slot * VMRegImpl::stack_slot_size;
handle_index += 2;
assert(handle_index <= stack_slots, "overflow");
if (map != NULL) {
__ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
} else {
__ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
}
}
if (in_regs[i].first()->is_Register() &&
(in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
int slot = handle_index * VMRegImpl::slots_per_word + arg_save_area;
int offset = slot * VMRegImpl::stack_slot_size;
handle_index += 2;
assert(handle_index <= stack_slots, "overflow");
if (map != NULL) {
__ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
if (in_sig_bt[i] == T_ARRAY) {
map->set_oop(VMRegImpl::stack2reg(slot));;
}
} else {
__ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
}
}
}
// Save or restore single word registers
for ( int i = 0; i < total_in_args; i++) {
if (in_regs[i].first()->is_Register()) {
int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
int offset = slot * VMRegImpl::stack_slot_size;
assert(handle_index <= stack_slots, "overflow");
// Value is in an input register pass we must flush it to the stack
const Register reg = in_regs[i].first()->as_Register();
switch (in_sig_bt[i]) {
case T_BOOLEAN:
case T_CHAR:
case T_BYTE:
case T_SHORT:
case T_INT:
if (map != NULL) {
__ movl(Address(rsp, offset), reg);
} else {
__ movl(reg, Address(rsp, offset));
}
break;
case T_ARRAY:
case T_LONG:
// handled above
break;
case T_OBJECT:
default: ShouldNotReachHere();
}
} else if (in_regs[i].first()->is_XMMRegister()) {
if (in_sig_bt[i] == T_FLOAT) {
int slot = handle_index++ * VMRegImpl::slots_per_word + arg_save_area;
int offset = slot * VMRegImpl::stack_slot_size;
assert(handle_index <= stack_slots, "overflow");
if (map != NULL) {
__ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
} else {
__ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
}
}
} else if (in_regs[i].first()->is_stack()) {
if (in_sig_bt[i] == T_ARRAY && map != NULL) {
int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
}
}
}
}
// Check GC_locker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an
// OopMap.
static void check_needs_gc_for_critical_native(MacroAssembler* masm,
int stack_slots,
int total_c_args,
int total_in_args,
int arg_save_area,
OopMapSet* oop_maps,
VMRegPair* in_regs,
BasicType* in_sig_bt) {
__ block_comment("check GC_locker::needs_gc");
Label cont;
__ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
__ jcc(Assembler::equal, cont);
// Save down any incoming oops and call into the runtime to halt for a GC
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, map, in_regs, in_sig_bt);
address the_pc = __ pc();
oop_maps->add_gc_map( __ offset(), map);
__ set_last_Java_frame(rsp, noreg, the_pc);
__ block_comment("block_for_jni_critical");
__ movptr(c_rarg0, r15_thread);
__ mov(r12, rsp); // remember sp
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
__ reset_last_Java_frame(false, true);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, NULL, in_regs, in_sig_bt);
__ bind(cont);
#ifdef ASSERT
if (StressCriticalJNINatives) {
// Stress register saving
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, map, in_regs, in_sig_bt);
// Destroy argument registers
for (int i = 0; i < total_in_args - 1; i++) {
if (in_regs[i].first()->is_Register()) {
const Register reg = in_regs[i].first()->as_Register();
__ xorptr(reg, reg);
} else if (in_regs[i].first()->is_XMMRegister()) {
__ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
} else if (in_regs[i].first()->is_FloatRegister()) {
ShouldNotReachHere();
} else if (in_regs[i].first()->is_stack()) {
// Nothing to do
} else {
ShouldNotReachHere();
}
if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
i++;
}
}
save_or_restore_arguments(masm, stack_slots, total_in_args,
arg_save_area, NULL, in_regs, in_sig_bt);
}
#endif
}
// Unpack an array argument into a pointer to the body and the length
// if the array is non-null, otherwise pass 0 for both.
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
Register tmp_reg = rax;
assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
"possible collision");
assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
"possible collision");
// Pass the length, ptr pair
Label is_null, done;
VMRegPair tmp;
tmp.set_ptr(tmp_reg->as_VMReg());
if (reg.first()->is_stack()) {
// Load the arg up from the stack
move_ptr(masm, reg, tmp);
reg = tmp;
}
__ testptr(reg.first()->as_Register(), reg.first()->as_Register());
__ jccb(Assembler::equal, is_null);
__ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
move_ptr(masm, tmp, body_arg);
// load the length relative to the body.
__ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
arrayOopDesc::base_offset_in_bytes(in_elem_type)));
move32_64(masm, tmp, length_arg);
__ jmpb(done);
__ bind(is_null);
// Pass zeros
__ xorptr(tmp_reg, tmp_reg);
move_ptr(masm, tmp, body_arg);
move32_64(masm, tmp, length_arg);
__ bind(done);
}
// ---------------------------------------------------------------------------
// Generate a native wrapper for a given method. The method takes arguments
// in the Java compiled code convention, marshals them to the native
@ -1166,10 +1382,14 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
BasicType *in_sig_bt,
VMRegPair *in_regs,
BasicType ret_type) {
// Native nmethod wrappers never take possesion of the oop arguments.
// So the caller will gc the arguments. The only thing we need an
// oopMap for is if the call is static
//
bool is_critical_native = true;
address native_func = method->critical_native_function();
if (native_func == NULL) {
native_func = method->native_function();
is_critical_native = false;
}
assert(native_func != NULL, "must have function");
// An OopMap for lock (and class if static)
OopMapSet *oop_maps = new OopMapSet();
intptr_t start = (intptr_t)__ pc();
@ -1180,27 +1400,72 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// we convert the java signature to a C signature by inserting
// the hidden arguments as arg[0] and possibly arg[1] (static method)
int total_c_args = total_in_args + 1;
if (method->is_static()) {
total_c_args++;
int total_c_args = total_in_args;
if (!is_critical_native) {
total_c_args += 1;
if (method->is_static()) {
total_c_args++;
}
} else {
for (int i = 0; i < total_in_args; i++) {
if (in_sig_bt[i] == T_ARRAY) {
total_c_args++;
}
}
}
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
BasicType* in_elem_bt = NULL;
int argc = 0;
out_sig_bt[argc++] = T_ADDRESS;
if (method->is_static()) {
out_sig_bt[argc++] = T_OBJECT;
}
if (!is_critical_native) {
out_sig_bt[argc++] = T_ADDRESS;
if (method->is_static()) {
out_sig_bt[argc++] = T_OBJECT;
}
for (int i = 0; i < total_in_args ; i++ ) {
out_sig_bt[argc++] = in_sig_bt[i];
for (int i = 0; i < total_in_args ; i++ ) {
out_sig_bt[argc++] = in_sig_bt[i];
}
} else {
Thread* THREAD = Thread::current();
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
SignatureStream ss(method->signature());
for (int i = 0; i < total_in_args ; i++ ) {
if (in_sig_bt[i] == T_ARRAY) {
// Arrays are passed as int, elem* pair
out_sig_bt[argc++] = T_INT;
out_sig_bt[argc++] = T_ADDRESS;
Symbol* atype = ss.as_symbol(CHECK_NULL);
const char* at = atype->as_C_string();
if (strlen(at) == 2) {
assert(at[0] == '[', "must be");
switch (at[1]) {
case 'B': in_elem_bt[i] = T_BYTE; break;
case 'C': in_elem_bt[i] = T_CHAR; break;
case 'D': in_elem_bt[i] = T_DOUBLE; break;
case 'F': in_elem_bt[i] = T_FLOAT; break;
case 'I': in_elem_bt[i] = T_INT; break;
case 'J': in_elem_bt[i] = T_LONG; break;
case 'S': in_elem_bt[i] = T_SHORT; break;
case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
default: ShouldNotReachHere();
}
}
} else {
out_sig_bt[argc++] = in_sig_bt[i];
in_elem_bt[i] = T_VOID;
}
if (in_sig_bt[i] != T_VOID) {
assert(in_sig_bt[i] == ss.type(), "must match");
ss.next();
}
}
}
// Now figure out where the args must be stored and how much stack space
// they require.
//
int out_arg_slots;
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
@ -1213,13 +1478,47 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
// Now the space for the inbound oop handle area
int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers
if (is_critical_native) {
// Critical natives may have to call out so they need a save area
// for register arguments.
int double_slots = 0;
int single_slots = 0;
for ( int i = 0; i < total_in_args; i++) {
if (in_regs[i].first()->is_Register()) {
const Register reg = in_regs[i].first()->as_Register();
switch (in_sig_bt[i]) {
case T_ARRAY:
case T_BOOLEAN:
case T_BYTE:
case T_SHORT:
case T_CHAR:
case T_INT: single_slots++; break;
case T_LONG: double_slots++; break;
default: ShouldNotReachHere();
}
} else if (in_regs[i].first()->is_XMMRegister()) {
switch (in_sig_bt[i]) {
case T_FLOAT: single_slots++; break;
case T_DOUBLE: double_slots++; break;
default: ShouldNotReachHere();
}
} else if (in_regs[i].first()->is_FloatRegister()) {
ShouldNotReachHere();
}
}
total_save_slots = double_slots * 2 + single_slots;
// align the save area
if (double_slots != 0) {
stack_slots = round_to(stack_slots, 2);
}
}
int oop_handle_offset = stack_slots;
stack_slots += 6*VMRegImpl::slots_per_word;
stack_slots += total_save_slots;
// Now any space we need for handlizing a klass if static method
int oop_temp_slot_offset = 0;
int klass_slot_offset = 0;
int klass_offset = -1;
int lock_slot_offset = 0;
@ -1272,7 +1571,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
int stack_size = stack_slots * VMRegImpl::stack_slot_size;
// First thing make an ic check to see if we should even be here
// We are free to use all registers as temps without saving them and
@ -1283,22 +1581,22 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
const Register ic_reg = rax;
const Register receiver = j_rarg0;
Label ok;
Label hit;
Label exception_pending;
assert_different_registers(ic_reg, receiver, rscratch1);
__ verify_oop(receiver);
__ load_klass(rscratch1, receiver);
__ cmpq(ic_reg, rscratch1);
__ jcc(Assembler::equal, ok);
__ jcc(Assembler::equal, hit);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ bind(ok);
// Verified entry point must be aligned
__ align(8);
__ bind(hit);
int vep_offset = ((intptr_t)__ pc()) - start;
// The instruction at the verified entry point must be 5 bytes or longer
@ -1319,9 +1617,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// -2 because return address is already present and so is saved rbp
__ subptr(rsp, stack_size - 2*wordSize);
// Frame is now completed as far as size and linkage.
int frame_complete = ((intptr_t)__ pc()) - start;
// Frame is now completed as far as size and linkage.
int frame_complete = ((intptr_t)__ pc()) - start;
#ifdef ASSERT
{
@ -1341,7 +1638,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
const Register oop_handle_reg = r14;
if (is_critical_native) {
check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
oop_handle_offset, oop_maps, in_regs, in_sig_bt);
}
//
// We immediately shuffle the arguments so that any vm call we have to
@ -1390,9 +1690,36 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
#endif /* ASSERT */
if (is_critical_native) {
// The mapping of Java and C arguments passed in registers are
// rotated by one, which helps when passing arguments to regular
// Java method but for critical natives that creates a cycle which
// can cause arguments to be killed before they are used. Break
// the cycle by moving the first argument into a temporary
// register.
for (int i = 0; i < total_c_args; i++) {
if (in_regs[i].first()->is_Register() &&
in_regs[i].first()->as_Register() == rdi) {
__ mov(rbx, rdi);
in_regs[i].set1(rbx->as_VMReg());
}
}
}
// This may iterate in two different directions depending on the
// kind of native it is. The reason is that for regular JNI natives
// the incoming and outgoing registers are offset upwards and for
// critical natives they are offset down.
int c_arg = total_c_args - 1;
for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
int stride = -1;
int init = total_in_args - 1;
if (is_critical_native) {
// stride forwards
c_arg = 0;
stride = 1;
init = 0;
}
for (int i = init, count = 0; count < total_in_args; i += stride, c_arg += stride, count++ ) {
#ifdef ASSERT
if (in_regs[i].first()->is_Register()) {
assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
@ -1407,7 +1734,20 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
#endif /* ASSERT */
switch (in_sig_bt[i]) {
case T_ARRAY:
if (is_critical_native) {
unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
c_arg++;
#ifdef ASSERT
if (out_regs[c_arg].first()->is_Register()) {
reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
} else if (out_regs[c_arg].first()->is_XMMRegister()) {
freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
}
#endif
break;
}
case T_OBJECT:
assert(!is_critical_native, "no oop arguments");
object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
((i == 0) && (!is_static)),
&receiver_offset);
@ -1443,7 +1783,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Pre-load a static method's oop into r14. Used both by locking code and
// the normal JNI call code.
if (method->is_static()) {
if (method->is_static() && !is_critical_native) {
// load oop into a register
__ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
@ -1509,6 +1849,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
Label lock_done;
if (method->is_synchronized()) {
assert(!is_critical_native, "unhandled");
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
@ -1572,13 +1913,14 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// get JNIEnv* which is first argument to native
__ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
if (!is_critical_native) {
__ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
}
// Now set thread in native
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
__ call(RuntimeAddress(method->native_function()));
__ call(RuntimeAddress(native_func));
// Either restore the MXCSR register after returning from the JNI Call
// or verify that it wasn't changed.
@ -1634,6 +1976,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
}
}
Label after_transition;
// check for safepoint operation in progress and/or pending suspend requests
{
@ -1659,16 +2002,28 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ mov(r12, rsp); // remember sp
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
if (!is_critical_native) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
} else {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
}
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
// Restore any method result value
restore_native_result(masm, ret_type, stack_slots);
if (is_critical_native) {
// The call above performed the transition to thread_in_Java so
// skip the transition logic below.
__ jmpb(after_transition);
}
__ bind(Continue);
}
// change thread state
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
__ bind(after_transition);
Label reguard;
Label reguard_done;
@ -1746,17 +2101,21 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ verify_oop(rax);
}
// reset handle block
__ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
__ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
if (!is_critical_native) {
// reset handle block
__ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
__ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
}
// pop our frame
__ leave();
// Any exception pending?
__ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, exception_pending);
if (!is_critical_native) {
// Any exception pending?
__ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, exception_pending);
}
// Return
@ -1764,12 +2123,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Unexpected paths are out of line and go here
// forward the exception
__ bind(exception_pending);
// and forward the exception
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
if (!is_critical_native) {
// forward the exception
__ bind(exception_pending);
// and forward the exception
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
}
// Slow path locking & unlocking
if (method->is_synchronized()) {
@ -1876,6 +2236,11 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
(is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
oop_maps);
if (is_critical_native) {
nm->set_lazy_critical_native(true);
}
return nm;
}

View File

@ -2978,7 +2978,9 @@ class StubGenerator: public StubCodeGenerator {
int frame_complete = __ pc() - start;
// Set up last_Java_sp and last_Java_fp
__ set_last_Java_frame(rsp, rbp, NULL);
address the_pc = __ pc();
__ set_last_Java_frame(rsp, rbp, the_pc);
__ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
// Call runtime
if (arg1 != noreg) {
@ -2997,7 +2999,7 @@ class StubGenerator: public StubCodeGenerator {
oop_maps->add_gc_map(__ pc() - start, map);
__ reset_last_Java_frame(true, false);
__ reset_last_Java_frame(true, true);
__ leave(); // required for proper stackwalking of RuntimeStub frame

View File

@ -4007,7 +4007,6 @@ int set_lwp_class_and_priority(int ThreadID, int lwpid,
iaInfo->ia_uprilim = cur_class == new_class
? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
iaInfo->ia_mode = IA_NOCHANGE;
iaInfo->ia_nice = cur_class == new_class ? IA_NOCHANGE : NZERO;
if (ThreadPriorityVerbose) {
tty->print_cr("IA: [%d...%d] %d->%d\n",
iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);

View File

@ -2088,7 +2088,6 @@ LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
#elif _M_AMD64
PCONTEXT ctx = exceptionInfo->ContextRecord;
address pc = (address)ctx->Rip;
NOT_PRODUCT(Events::log("idiv overflow exception at " INTPTR_FORMAT , pc));
assert(pc[0] == 0xF7, "not an idiv opcode");
assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
assert(ctx->Rax == min_jint, "unexpected idiv exception");
@ -2100,7 +2099,6 @@ LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
#else
PCONTEXT ctx = exceptionInfo->ContextRecord;
address pc = (address)ctx->Eip;
NOT_PRODUCT(Events::log("idiv overflow exception at " INTPTR_FORMAT , pc));
assert(pc[0] == 0xF7, "not an idiv opcode");
assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
assert(ctx->Eax == min_jint, "unexpected idiv exception");
@ -5336,4 +5334,3 @@ BOOL os::Advapi32Dll::AdvapiAvailable() {
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -362,7 +362,7 @@ frame os::get_sender_for_C_frame(frame* fr) {
}
intptr_t* _get_previous_fp() {
#if defined(SPARC_WORKS) || defined(__clang__)
#if defined(SPARC_WORKS) || defined(__clang__) || defined(__llvm__)
register intptr_t **ebp;
__asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
#else

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -243,6 +243,7 @@ class BuildConfig {
sysDefines.add("_WINDOWS");
sysDefines.add("HOTSPOT_BUILD_USER=\\\""+System.getProperty("user.name")+"\\\"");
sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\"");
sysDefines.add("INCLUDE_TRACE");
sysDefines.add("_JNI_IMPLEMENTATION_");
if (vars.get("PlatformName").equals("Win32")) {
sysDefines.add("HOTSPOT_LIB_ARCH=\\\"i386\\\"");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1592,6 +1592,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
// this happened while running the JCK invokevirtual tests under doit. TKR
ciMethod* cha_monomorphic_target = NULL;
ciMethod* exact_target = NULL;
Value better_receiver = NULL;
if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
!target->is_method_handle_invoke()) {
Value receiver = NULL;
@ -1653,6 +1654,18 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
ciInstanceKlass* singleton = NULL;
if (target->holder()->nof_implementors() == 1) {
singleton = target->holder()->implementor(0);
assert(holder->is_interface(), "invokeinterface to non interface?");
ciInstanceKlass* decl_interface = (ciInstanceKlass*)holder;
// the number of implementors for decl_interface is less or
// equal to the number of implementors for target->holder() so
// if number of implementors of target->holder() == 1 then
// number of implementors for decl_interface is 0 or 1. If
// it's 0 then no class implements decl_interface and there's
// no point in inlining.
if (!holder->is_loaded() || decl_interface->nof_implementors() != 1) {
singleton = NULL;
}
}
if (singleton) {
cha_monomorphic_target = target->find_monomorphic_target(calling_klass, target->holder(), singleton);
@ -1667,7 +1680,9 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
CheckCast* c = new CheckCast(klass, receiver, copy_state_for_exception());
c->set_incompatible_class_change_check();
c->set_direct_compare(klass->is_final());
append_split(c);
// pass the result of the checkcast so that the compiler has
// more accurate type info in the inlinee
better_receiver = append_split(c);
}
}
}
@ -1709,7 +1724,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
}
if (!success) {
// static binding => check if callee is ok
success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL));
success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), better_receiver);
}
CHECK_BAILOUT();
@ -3034,7 +3049,7 @@ int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const {
}
bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, Value receiver) {
// Clear out any existing inline bailout condition
clear_inline_bailout();
@ -3056,7 +3071,7 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
} else if (callee->is_abstract()) {
INLINE_BAILOUT("abstract")
} else {
return try_inline_full(callee, holder_known);
return try_inline_full(callee, holder_known, NULL, receiver);
}
}
@ -3405,7 +3420,7 @@ void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool
}
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBegin* cont_block) {
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBegin* cont_block, Value receiver) {
assert(!callee->is_native(), "callee must not be native");
if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) {
INLINE_BAILOUT("inlining prohibited by policy");
@ -3541,6 +3556,9 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBeg
Value arg = caller_state->stack_at_inc(i);
// NOTE: take base() of arg->type() to avoid problems storing
// constants
if (receiver != NULL && par_no == 0) {
arg = receiver;
}
store_local(callee_state, arg, arg->type()->base(), par_no);
}
}
@ -3683,56 +3701,61 @@ bool GraphBuilder::for_method_handle_inline(ciMethod* callee) {
// Get the two MethodHandle inputs from the Phi.
Value op1 = phi->operand_at(0);
Value op2 = phi->operand_at(1);
ciMethodHandle* mh1 = op1->type()->as_ObjectType()->constant_value()->as_method_handle();
ciMethodHandle* mh2 = op2->type()->as_ObjectType()->constant_value()->as_method_handle();
ObjectType* op1type = op1->type()->as_ObjectType();
ObjectType* op2type = op2->type()->as_ObjectType();
// Set the callee to have access to the class and signature in
// the MethodHandleCompiler.
mh1->set_callee(callee);
mh1->set_caller(method());
mh2->set_callee(callee);
mh2->set_caller(method());
if (op1type->is_constant() && op2type->is_constant()) {
ciMethodHandle* mh1 = op1type->constant_value()->as_method_handle();
ciMethodHandle* mh2 = op2type->constant_value()->as_method_handle();
// Get adapters for the MethodHandles.
ciMethod* mh1_adapter = mh1->get_method_handle_adapter();
ciMethod* mh2_adapter = mh2->get_method_handle_adapter();
// Set the callee to have access to the class and signature in
// the MethodHandleCompiler.
mh1->set_callee(callee);
mh1->set_caller(method());
mh2->set_callee(callee);
mh2->set_caller(method());
if (mh1_adapter != NULL && mh2_adapter != NULL) {
set_inline_cleanup_info();
// Get adapters for the MethodHandles.
ciMethod* mh1_adapter = mh1->get_method_handle_adapter();
ciMethod* mh2_adapter = mh2->get_method_handle_adapter();
// Build the If guard
BlockBegin* one = new BlockBegin(next_bci());
BlockBegin* two = new BlockBegin(next_bci());
BlockBegin* end = new BlockBegin(next_bci());
Instruction* iff = append(new If(phi, If::eql, false, op1, one, two, NULL, false));
block()->set_end(iff->as_BlockEnd());
if (mh1_adapter != NULL && mh2_adapter != NULL) {
set_inline_cleanup_info();
// Connect up the states
one->merge(block()->end()->state());
two->merge(block()->end()->state());
// Build the If guard
BlockBegin* one = new BlockBegin(next_bci());
BlockBegin* two = new BlockBegin(next_bci());
BlockBegin* end = new BlockBegin(next_bci());
Instruction* iff = append(new If(phi, If::eql, false, op1, one, two, NULL, false));
block()->set_end(iff->as_BlockEnd());
// Save the state for the second inlinee
ValueStack* state_before = copy_state_before();
// Connect up the states
one->merge(block()->end()->state());
two->merge(block()->end()->state());
// Parse first adapter
_last = _block = one;
if (!try_inline_full(mh1_adapter, /*holder_known=*/ true, end)) {
restore_inline_cleanup_info();
block()->clear_end(); // remove appended iff
return false;
// Save the state for the second inlinee
ValueStack* state_before = copy_state_before();
// Parse first adapter
_last = _block = one;
if (!try_inline_full(mh1_adapter, /*holder_known=*/ true, end, NULL)) {
restore_inline_cleanup_info();
block()->clear_end(); // remove appended iff
return false;
}
// Parse second adapter
_last = _block = two;
_state = state_before;
if (!try_inline_full(mh2_adapter, /*holder_known=*/ true, end, NULL)) {
restore_inline_cleanup_info();
block()->clear_end(); // remove appended iff
return false;
}
connect_to_end(end);
return true;
}
// Parse second adapter
_last = _block = two;
_state = state_before;
if (!try_inline_full(mh2_adapter, /*holder_known=*/ true, end)) {
restore_inline_cleanup_info();
block()->clear_end(); // remove appended iff
return false;
}
connect_to_end(end);
return true;
}
}
}

View File

@ -337,9 +337,9 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false);
// inliners
bool try_inline( ciMethod* callee, bool holder_known);
bool try_inline( ciMethod* callee, bool holder_known, Value receiver = NULL);
bool try_inline_intrinsics(ciMethod* callee);
bool try_inline_full( ciMethod* callee, bool holder_known, BlockBegin* cont_block = NULL);
bool try_inline_full( ciMethod* callee, bool holder_known, BlockBegin* cont_block, Value receiver);
bool try_inline_jsr(int jsr_dest_bci);
// JSR 292 support

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -597,7 +597,6 @@ address Runtime1::exception_handler_for_pc(JavaThread* thread) {
JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* thread, int index))
NOT_PRODUCT(_throw_range_check_exception_count++;)
Events::log("throw_range_check");
char message[jintAsStringSize];
sprintf(message, "%d", index);
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
@ -606,7 +605,6 @@ JRT_END
JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* thread, int index))
NOT_PRODUCT(_throw_index_exception_count++;)
Events::log("throw_index");
char message[16];
sprintf(message, "%d", index);
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IndexOutOfBoundsException(), message);
@ -804,11 +802,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// Note also that in the presence of inlining it is not guaranteed
// that caller_method() == caller_code->method()
int bci = vfst.bci();
Events::log("patch_code @ " INTPTR_FORMAT , caller_frame.pc());
Bytecodes::Code code = caller_method()->java_code_at(bci);
#ifndef PRODUCT

View File

@ -125,6 +125,7 @@ Value ValueMap::find_insert(Value x) {
// otherwise it is possible that they are not evaluated
f->pin(Instruction::PinGlobalValueNumbering);
}
assert(x->type()->tag() == f->type()->tag(), "should have same type");
return f;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -359,7 +359,7 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
case Bytecodes::_nop:
break;
case Bytecodes::_aconst_null:
state.apush(empty_map);
state.apush(unknown_obj);
break;
case Bytecodes::_iconst_m1:
case Bytecodes::_iconst_0:
@ -392,6 +392,8 @@ void BCEscapeAnalyzer::iterate_one_block(ciBlock *blk, StateInfo &state, Growabl
if (tag.is_long() || tag.is_double()) {
// Only longs and doubles use 2 stack slots.
state.lpush();
} else if (tag.basic_type() == T_OBJECT) {
state.apush(unknown_obj);
} else {
state.spush();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -284,6 +284,20 @@ public:
// Return state of appropriate compilability
int compilable() { return _compilable; }
const char* retry_message() const {
switch (_compilable) {
case ciEnv::MethodCompilable_not_at_tier:
return "retry at different tier";
case ciEnv::MethodCompilable_never:
return "not retryable";
case ciEnv::MethodCompilable:
return NULL;
default:
ShouldNotReachHere();
return NULL;
}
}
bool break_at_compile() { return _break_at_compile; }
void set_break_at_compile(bool z) { _break_at_compile = z; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -618,7 +618,8 @@ void Dictionary::print() {
ResourceMark rm;
HandleMark hm;
tty->print_cr("Java system dictionary (classes=%d)", number_of_entries());
tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
table_size(), number_of_entries());
tty->print_cr("^ indicates that initiating loader is different from "
"defining loader");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,6 +64,9 @@ SymbolPropertyTable* SystemDictionary::_invoke_method_table = NULL;
int SystemDictionary::_number_of_modifications = 0;
int SystemDictionary::_sdgeneration = 0;
const int SystemDictionary::_primelist[_prime_array_size] = {1009,2017,4049,5051,10103,
20201,40423,99991};
oop SystemDictionary::_system_loader_lock_obj = NULL;
@ -1178,8 +1181,8 @@ void SystemDictionary::set_shared_dictionary(HashtableBucket* t, int length,
klassOop SystemDictionary::find_shared_class(Symbol* class_name) {
if (shared_dictionary() != NULL) {
unsigned int d_hash = dictionary()->compute_hash(class_name, Handle());
int d_index = dictionary()->hash_to_index(d_hash);
unsigned int d_hash = shared_dictionary()->compute_hash(class_name, Handle());
int d_index = shared_dictionary()->hash_to_index(d_hash);
return shared_dictionary()->find_shared_class(d_index, d_hash, class_name);
} else {
return NULL;
@ -1750,7 +1753,21 @@ void SystemDictionary::placeholders_do(OopClosure* blk) {
placeholders()->oops_do(blk);
}
// Calculate a "good" systemdictionary size based
// on predicted or current loaded classes count
int SystemDictionary::calculate_systemdictionary_size(int classcount) {
int newsize = _old_default_sdsize;
if ((classcount > 0) && !DumpSharedSpaces) {
int desiredsize = classcount/_average_depth_goal;
for (newsize = _primelist[_sdgeneration]; _sdgeneration < _prime_array_size -1;
newsize = _primelist[++_sdgeneration]) {
if (desiredsize <= newsize) {
break;
}
}
}
return newsize;
}
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
bool result = dictionary()->do_unloading(is_alive);
constraints()->purge_loader_constraints(is_alive);
@ -1873,7 +1890,8 @@ void SystemDictionary::initialize(TRAPS) {
// Allocate arrays
assert(dictionary() == NULL,
"SystemDictionary should only be initialized once");
_dictionary = new Dictionary(_nof_buckets);
_sdgeneration = 0;
_dictionary = new Dictionary(calculate_systemdictionary_size(PredictedLoadedClassCount));
_placeholders = new PlaceholderTable(_nof_buckets);
_number_of_modifications = 0;
_loader_constraints = new LoaderConstraintTable(_loader_constraint_size);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -346,6 +346,8 @@ public:
// loaders. Returns "true" iff something was unloaded.
static bool do_unloading(BoolObjectClosure* is_alive);
static int calculate_systemdictionary_size(int loadedclasses);
// Applies "f->do_oop" to all root oops in the system dictionary.
static void oops_do(OopClosure* f);
@ -538,12 +540,20 @@ public:
_loader_constraint_size = 107, // number of entries in constraint table
_resolution_error_size = 107, // number of entries in resolution error table
_invoke_method_size = 139, // number of entries in invoke method table
_nof_buckets = 1009 // number of buckets in hash table
_nof_buckets = 1009, // number of buckets in hash table for placeholders
_old_default_sdsize = 1009, // backward compat for system dictionary size
_prime_array_size = 8, // array of primes for system dictionary size
_average_depth_goal = 3 // goal for lookup length
};
// Static variables
// hashtable sizes for system dictionary to allow growth
// prime numbers for system dictionary size
static int _sdgeneration;
static const int _primelist[_prime_array_size];
// Hashtable holding loaded classes.
static Dictionary* _dictionary;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -165,7 +165,6 @@ void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod
instruction_address(), method->print_value_string(), entry);
}
Events::log("compiledIC " INTPTR_FORMAT " --> megamorphic " INTPTR_FORMAT, this, (address)method());
// We can't check this anymore. With lazy deopt we could have already
// cleaned this IC entry before we even return. This is possible if
// we ran out of space in the inline cache buffer trying to do the

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -462,6 +462,7 @@ void nmethod::init_defaults() {
_speculatively_disconnected = 0;
_has_unsafe_access = 0;
_has_method_handle_invokes = 0;
_lazy_critical_native = 0;
_marked_for_deoptimization = 0;
_lock_count = 0;
_stack_traversal_mark = 0;
@ -704,7 +705,6 @@ nmethod::nmethod(
xtty->tail("print_native_nmethod");
}
}
Events::log("Create nmethod " INTPTR_FORMAT, this);
}
// For dtrace wrappers
@ -781,7 +781,6 @@ nmethod::nmethod(
xtty->tail("print_dtrace_nmethod");
}
}
Events::log("Create nmethod " INTPTR_FORMAT, this);
}
#endif // def HAVE_DTRACE_H
@ -889,13 +888,6 @@ nmethod::nmethod(
if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
print_nmethod(printnmethods);
}
// Note: Do not verify in here as the CodeCache_lock is
// taken which would conflict with the CompiledIC_lock
// which taken during the verification of call sites.
// (was bug - gri 10/25/99)
Events::log("Create nmethod " INTPTR_FORMAT, this);
}
@ -1386,7 +1378,7 @@ void nmethod::flush() {
assert_locked_or_safepoint(CodeCache_lock);
// completely deallocate this method
EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
if (PrintMethodFlushing) {
tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -175,6 +175,7 @@ class nmethod : public CodeBlob {
// set during construction
unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
unsigned int _lazy_critical_native:1; // Lazy JNI critical native
// Protected by Patching_lock
unsigned char _state; // {alive, not_entrant, zombie, unloaded}
@ -430,7 +431,10 @@ class nmethod : public CodeBlob {
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool is_speculatively_disconnected() const { return _speculatively_disconnected; }
void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; }
void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; }
bool is_lazy_critical_native() const { return _lazy_critical_native; }
void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
int comp_level() const { return _comp_level; }

View File

@ -44,6 +44,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/sweeper.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#ifdef COMPILER1
#include "c1/c1_Compiler.hpp"
#endif
@ -189,6 +190,43 @@ CompileTask* CompileBroker::_task_free_list = NULL;
GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
class CompilationLog : public StringEventLog {
public:
CompilationLog() : StringEventLog("Compilation events") {
}
void log_compile(JavaThread* thread, CompileTask* task) {
StringLogMessage lm;
stringStream msg = lm.stream();
// msg.time_stamp().update_to(tty->time_stamp().ticks());
task->print_compilation(&msg, true);
log(thread, "%s", (const char*)lm);
}
void log_nmethod(JavaThread* thread, nmethod* nm) {
log(thread, "nmethod " INTPTR_FORMAT " code ["INTPTR_FORMAT ", " INTPTR_FORMAT "]",
nm, nm->code_begin(), nm->code_end());
}
void log_failure(JavaThread* thread, CompileTask* task, const char* reason, const char* retry_message) {
StringLogMessage lm;
lm.print("%4d COMPILE SKIPPED: %s", task->compile_id(), reason);
if (retry_message != NULL) {
lm.append(" (%s)", retry_message);
}
lm.print("\n");
log(thread, "%s", (const char*)lm);
}
};
static CompilationLog* _compilation_log = NULL;
void compileBroker_init() {
if (LogEvents) {
_compilation_log = new CompilationLog();
}
}
CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) {
CompilerThread* thread = CompilerThread::current();
thread->set_task(task);
@ -326,8 +364,12 @@ void CompileTask::print_line() {
// ------------------------------------------------------------------
// CompileTask::print_compilation_impl
void CompileTask::print_compilation_impl(outputStream* st, methodOop method, int compile_id, int comp_level, bool is_osr_method, int osr_bci, bool is_blocking, const char* msg) {
st->print("%7d ", (int) st->time_stamp().milliseconds()); // print timestamp
void CompileTask::print_compilation_impl(outputStream* st, methodOop method, int compile_id, int comp_level,
bool is_osr_method, int osr_bci, bool is_blocking,
const char* msg, bool short_form) {
if (!short_form) {
st->print("%7d ", (int) st->time_stamp().milliseconds()); // print timestamp
}
st->print("%4d ", compile_id); // print compilation number
// For unloaded methods the transition to zombie occurs after the
@ -370,7 +412,9 @@ void CompileTask::print_compilation_impl(outputStream* st, methodOop method, int
if (msg != NULL) {
st->print(" %s", msg);
}
st->cr();
if (!short_form) {
st->cr();
}
}
// ------------------------------------------------------------------
@ -426,12 +470,12 @@ void CompileTask::print_inline_indent(int inline_level, outputStream* st) {
// ------------------------------------------------------------------
// CompileTask::print_compilation
void CompileTask::print_compilation(outputStream* st) {
void CompileTask::print_compilation(outputStream* st, bool short_form) {
oop rem = JNIHandles::resolve(method_handle());
assert(rem != NULL && rem->is_method(), "must be");
methodOop method = (methodOop) rem;
bool is_osr_method = osr_bci() != InvocationEntryBci;
print_compilation_impl(st, method, compile_id(), comp_level(), is_osr_method, osr_bci(), is_blocking());
print_compilation_impl(st, method, compile_id(), comp_level(), is_osr_method, osr_bci(), is_blocking(), NULL, short_form);
}
// ------------------------------------------------------------------
@ -1649,6 +1693,10 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
CompilerThread* thread = CompilerThread::current();
ResourceMark rm(thread);
if (LogEvents) {
_compilation_log->log_compile(thread, task);
}
// Common flags.
uint compile_id = task->compile_id();
int osr_bci = task->osr_bci();
@ -1717,22 +1765,30 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
ci_env.record_method_not_compilable("compile failed", !TieredCompilation);
}
// Copy this bit to the enclosing block:
compilable = ci_env.compilable();
if (ci_env.failing()) {
// Copy this bit to the enclosing block:
compilable = ci_env.compilable();
const char* retry_message = ci_env.retry_message();
if (_compilation_log != NULL) {
_compilation_log->log_failure(thread, task, ci_env.failure_reason(), retry_message);
}
if (PrintCompilation) {
const char* reason = ci_env.failure_reason();
if (compilable == ciEnv::MethodCompilable_not_at_tier) {
tty->print_cr("%4d COMPILE SKIPPED: %s (retry at different tier)", compile_id, reason);
} else if (compilable == ciEnv::MethodCompilable_never) {
tty->print_cr("%4d COMPILE SKIPPED: %s (not retryable)", compile_id, reason);
} else if (compilable == ciEnv::MethodCompilable) {
tty->print_cr("%4d COMPILE SKIPPED: %s", compile_id, reason);
tty->print("%4d COMPILE SKIPPED: %s", compile_id, ci_env.failure_reason());
if (retry_message != NULL) {
tty->print(" (%s)", retry_message);
}
tty->cr();
}
} else {
task->mark_success();
task->set_num_inlined_bytecodes(ci_env.num_inlined_bytecodes());
if (_compilation_log != NULL) {
nmethod* code = task->code();
if (code != NULL) {
_compilation_log->log_nmethod(thread, code);
}
}
}
}
pop_jni_handle_block();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -98,12 +98,16 @@ class CompileTask : public CHeapObj {
void set_prev(CompileTask* prev) { _prev = prev; }
private:
static void print_compilation_impl(outputStream* st, methodOop method, int compile_id, int comp_level, bool is_osr_method = false, int osr_bci = -1, bool is_blocking = false, const char* msg = NULL);
static void print_compilation_impl(outputStream* st, methodOop method, int compile_id, int comp_level,
bool is_osr_method = false, int osr_bci = -1, bool is_blocking = false,
const char* msg = NULL, bool short_form = false);
public:
void print_compilation(outputStream* st = tty);
void print_compilation(outputStream* st = tty, bool short_form = false);
static void print_compilation(outputStream* st, const nmethod* nm, const char* msg = NULL) {
print_compilation_impl(st, nm->method(), nm->compile_id(), nm->comp_level(), nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false, msg);
print_compilation_impl(st, nm->method(), nm->compile_id(), nm->comp_level(),
nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false,
msg);
}
static void print_inlining(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg = NULL);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -5594,6 +5594,7 @@ void CMSCollector::do_remark_parallel() {
GenCollectedHeap::StrongRootsScope srs(gch);
workers->run_task(&tsk);
} else {
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
GenCollectedHeap::StrongRootsScope srs(gch);
tsk.work(0);
}
@ -5608,6 +5609,8 @@ void CMSCollector::do_remark_non_parallel() {
ResourceMark rm;
HandleMark hm;
GenCollectedHeap* gch = GenCollectedHeap::heap();
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
MarkRefsIntoAndScanClosure
mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
&_markStack, &_revisitStack, this,

View File

@ -1238,9 +1238,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
SvcGCMarker sgcm(SvcGCMarker::FULL);
ResourceMark rm;
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
print_heap_before_gc();
HRSPhaseSetter x(HRSPhaseFullGC);
verify_region_sets_optional();
@ -1492,9 +1490,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
_hrs.verify_optional();
verify_region_sets_optional();
if (PrintHeapAtGC) {
Universe::print_heap_after_gc();
}
print_heap_after_gc();
g1mm()->update_sizes();
post_full_gc_dump();
@ -3560,9 +3556,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
SvcGCMarker sgcm(SvcGCMarker::MINOR);
ResourceMark rm;
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
print_heap_before_gc();
HRSPhaseSetter x(HRSPhaseEvacuation);
verify_region_sets_optional();
@ -3937,9 +3931,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
if (PrintHeapAtGC) {
Universe::print_heap_after_gc();
}
print_heap_after_gc();
g1mm()->update_sizes();
if (G1SummarizeRSetStats &&

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -126,7 +126,6 @@ void G1MarkSweep::allocate_stacks() {
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
EventMark m("1 mark object");
TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
GenMarkSweep::trace(" 1");
@ -292,7 +291,6 @@ void G1MarkSweep::mark_sweep_phase2() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
Generation* pg = g1h->perm_gen();
EventMark m("2 compute new addresses");
TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
GenMarkSweep::trace("2");
@ -337,7 +335,6 @@ void G1MarkSweep::mark_sweep_phase3() {
Generation* pg = g1h->perm_gen();
// Adjust the pointers to reflect the new locations
EventMark m("3 adjust pointers");
TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
GenMarkSweep::trace("3");
@ -402,7 +399,6 @@ void G1MarkSweep::mark_sweep_phase4() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
Generation* pg = g1h->perm_gen();
EventMark m("4 compact heap");
TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
GenMarkSweep::trace("4");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -132,9 +132,7 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
heap->print_heap_before_gc();
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
@ -377,9 +375,7 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
if (PrintHeapAtGC) {
Universe::print_heap_after_gc();
}
heap->print_heap_after_gc();
heap->post_full_gc_dump();
@ -504,7 +500,6 @@ void PSMarkSweep::deallocate_stacks() {
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
EventMark m("1 mark object");
TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
trace(" 1");
@ -563,7 +558,6 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
void PSMarkSweep::mark_sweep_phase2() {
EventMark m("2 compute new addresses");
TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
trace("2");
@ -608,7 +602,6 @@ static PSAlwaysTrueClosure always_true;
void PSMarkSweep::mark_sweep_phase3() {
// Adjust the pointers to reflect the new locations
EventMark m("3 adjust pointers");
TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
trace("3");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -983,9 +983,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
// We need to track unique mark sweep invocations as well.
_total_invocations++;
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
heap->print_heap_before_gc();
// Fill in TLABs
heap->accumulate_statistics_all_tlabs();
@ -1838,7 +1836,6 @@ void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
void PSParallelCompact::summary_phase(ParCompactionManager* cm,
bool maximum_compaction)
{
EventMark m("2 summarize");
TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
// trace("2");
@ -2237,9 +2234,7 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
collection_exit.update();
if (PrintHeapAtGC) {
Universe::print_heap_after_gc();
}
heap->print_heap_after_gc();
if (PrintGCTaskTimeStamps) {
gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
INT64_FORMAT,
@ -2352,7 +2347,6 @@ GCTaskManager* const PSParallelCompact::gc_task_manager() {
void PSParallelCompact::marking_phase(ParCompactionManager* cm,
bool maximum_heap_compaction) {
// Recursively traverse all live objects and mark them
EventMark m("1 mark object");
TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
ParallelScavengeHeap* heap = gc_heap();
@ -2438,7 +2432,6 @@ static PSAlwaysTrueClosure always_true;
void PSParallelCompact::adjust_roots() {
// Adjust the pointers to reflect the new locations
EventMark m("3 adjust roots");
TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
// General strong roots.
@ -2469,7 +2462,6 @@ void PSParallelCompact::adjust_roots() {
}
void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
EventMark m("4 compact perm");
TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
// trace("4");
@ -2647,7 +2639,6 @@ void PSParallelCompact::enqueue_region_stealing_tasks(
}
void PSParallelCompact::compact() {
EventMark m("5 compact");
// trace("5");
TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
@ -3502,4 +3493,3 @@ void PSParallelCompact::compact_prologue() {
_updated_int_array_klass_obj = (klassOop)
summary_data().calc_new_pointer(Universe::intArrayKlassObj());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -295,9 +295,7 @@ bool PSScavenge::invoke_no_policy() {
heap->record_gen_tops_before_GC();
}
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
heap->print_heap_before_gc();
assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
@ -643,9 +641,7 @@ bool PSScavenge::invoke_no_policy() {
Universe::verify(false);
}
if (PrintHeapAtGC) {
Universe::print_heap_after_gc();
}
heap->print_heap_after_gc();
if (ZapUnusedHeapArea) {
young_gen->eden_space()->check_mangled_unused_area_complete();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,6 +51,31 @@ int CollectedHeap::_fire_out_of_memory_count = 0;
size_t CollectedHeap::_filler_array_max_size = 0;
template <>
void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
st->print_cr("GC heap %s", m.is_before ? "before" : "after");
st->print_raw(m);
}
void GCHeapLog::log_heap(bool before) {
if (!should_log()) {
return;
}
jlong timestamp = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
int index = compute_log_index();
_records[index].thread = NULL; // Its the GC thread so it's not that interesting.
_records[index].timestamp = timestamp;
_records[index].data.is_before = before;
stringStream st(_records[index].data.buffer(), _records[index].data.size());
if (before) {
Universe::print_heap_before_gc(&st);
} else {
Universe::print_heap_after_gc(&st);
}
}
// Memory state functions.
@ -81,6 +106,12 @@ CollectedHeap::CollectedHeap() : _n_par_threads(0)
80, GCCause::to_string(_gc_lastcause), CHECK);
}
_defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
// Create the ring log
if (LogEvents) {
_gc_heap_log = new GCHeapLog();
} else {
_gc_heap_log = NULL;
}
}
void CollectedHeap::pre_initialize() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,7 @@
#include "runtime/handles.hpp"
#include "runtime/perfData.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/events.hpp"
// A "CollectedHeap" is an implementation of a java heap for HotSpot. This
// is an abstract class: there may be many different kinds of heaps. This
@ -43,6 +44,29 @@ class AdaptiveSizePolicy;
class Thread;
class CollectorPolicy;
class GCMessage : public FormatBuffer<1024> {
public:
bool is_before;
public:
GCMessage() {}
};
class GCHeapLog : public EventLogBase<GCMessage> {
private:
void log_heap(bool before);
public:
GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
void log_heap_before() {
log_heap(true);
}
void log_heap_after() {
log_heap(false);
}
};
//
// CollectedHeap
// SharedHeap
@ -62,6 +86,8 @@ class CollectedHeap : public CHeapObj {
// Used for filler objects (static, but initialized in ctor).
static size_t _filler_array_max_size;
GCHeapLog* _gc_heap_log;
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
bool _defer_initial_card_mark;
@ -618,6 +644,27 @@ class CollectedHeap : public CHeapObj {
// Default implementation does nothing.
virtual void print_tracing_info() const = 0;
// If PrintHeapAtGC is set call the appropriate routi
void print_heap_before_gc() {
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
if (_gc_heap_log != NULL) {
_gc_heap_log->log_heap_before();
}
}
void print_heap_after_gc() {
if (PrintHeapAtGC) {
Universe::print_heap_after_gc();
}
if (_gc_heap_log != NULL) {
_gc_heap_log->log_heap_after();
}
}
// Allocate GCHeapLog during VM startup
static void initialize_heap_log();
// Heap verification
virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,38 +31,93 @@ volatile jint GC_locker::_jni_lock_count = 0;
volatile jint GC_locker::_lock_count = 0;
volatile bool GC_locker::_needs_gc = false;
volatile bool GC_locker::_doing_gc = false;
jlong GC_locker::_wait_begin = 0;
#ifdef ASSERT
volatile jint GC_locker::_debug_jni_lock_count = 0;
#endif
#ifdef ASSERT
void GC_locker::verify_critical_count() {
if (SafepointSynchronize::is_at_safepoint()) {
assert(!needs_gc() || _debug_jni_lock_count == _jni_lock_count, "must agree");
int count = 0;
// Count the number of threads with critical operations in progress
for (JavaThread* thr = Threads::first(); thr; thr = thr->next()) {
if (thr->in_critical()) {
count++;
}
}
if (_jni_lock_count != count) {
tty->print_cr("critical counts don't match: %d != %d", _jni_lock_count, count);
for (JavaThread* thr = Threads::first(); thr; thr = thr->next()) {
if (thr->in_critical()) {
tty->print_cr(INTPTR_FORMAT " in_critical %d", thr, thr->in_critical());
}
}
}
assert(_jni_lock_count == count, "must be equal");
}
}
#endif
bool GC_locker::check_active_before_gc() {
assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
if (is_active() && !_needs_gc) {
verify_critical_count();
_needs_gc = true;
if (PrintJNIGCStalls && PrintGCDetails) {
ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
_wait_begin = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
gclog_or_tty->print_cr(INT64_FORMAT ": Setting _needs_gc. Thread \"%s\" %d locked.",
_wait_begin, Thread::current()->name(), _jni_lock_count);
}
}
return is_active();
}
void GC_locker::stall_until_clear() {
assert(!JavaThread::current()->in_critical(), "Would deadlock");
if (PrintJNIGCStalls && PrintGCDetails) {
ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
gclog_or_tty->print_cr(
"Allocation failed. Thread \"%s\" is stalled by JNI critical section.",
JavaThread::current()->name());
}
MutexLocker ml(JNICritical_lock);
if (needs_gc()) {
if (PrintJNIGCStalls && PrintGCDetails) {
ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
gclog_or_tty->print_cr(INT64_FORMAT ": Allocation failed. Thread \"%s\" is stalled by JNI critical section, %d locked.",
(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) - _wait_begin, Thread::current()->name(), _jni_lock_count);
}
}
// Wait for _needs_gc to be cleared
while (GC_locker::needs_gc()) {
while (needs_gc()) {
JNICritical_lock->wait();
}
}
void GC_locker::jni_lock_slow() {
void GC_locker::jni_lock(JavaThread* thread) {
assert(!thread->in_critical(), "shouldn't currently be in a critical region");
MutexLocker mu(JNICritical_lock);
// Block entering threads if we know at least one thread is in a
// JNI critical region and we need a GC.
// We check that at least one thread is in a critical region before
// blocking because blocked threads are woken up by a thread exiting
// a JNI critical region.
while ((is_jni_active() && needs_gc()) || _doing_gc) {
while ((needs_gc() && is_jni_active()) || _doing_gc) {
JNICritical_lock->wait();
}
jni_lock();
thread->enter_critical();
_jni_lock_count++;
increment_debug_jni_lock_count();
}
void GC_locker::jni_unlock_slow() {
void GC_locker::jni_unlock(JavaThread* thread) {
assert(thread->in_last_critical(), "should be exiting critical region");
MutexLocker mu(JNICritical_lock);
jni_unlock();
_jni_lock_count--;
decrement_debug_jni_lock_count();
thread->exit_critical();
if (needs_gc() && !is_jni_active()) {
// We're the last thread out. Cause a GC to occur.
// GC will also check is_active, so this check is not
@ -74,11 +129,17 @@ void GC_locker::jni_unlock_slow() {
{
// Must give up the lock while at a safepoint
MutexUnlocker munlock(JNICritical_lock);
if (PrintJNIGCStalls && PrintGCDetails) {
ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
gclog_or_tty->print_cr(INT64_FORMAT ": Thread \"%s\" is performing GC after exiting critical section, %d locked",
(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) - _wait_begin, Thread::current()->name(), _jni_lock_count);
}
Universe::heap()->collect(GCCause::_gc_locker);
}
_doing_gc = false;
}
clear_needs_gc();
_needs_gc = false;
JNICritical_lock->notify_all();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,53 +51,70 @@
class GC_locker: public AllStatic {
private:
static volatile jint _jni_lock_count; // number of jni active instances
// The _jni_lock_count keeps track of the number of threads that are
// currently in a critical region. It's only kept up to date when
// _needs_gc is true. The current value is computed during
// safepointing and decremented during the slow path of GC_locker
// unlocking.
static volatile jint _jni_lock_count; // number of jni active instances.
static volatile jint _lock_count; // number of other active instances
static volatile bool _needs_gc; // heap is filling, we need a GC
// note: bool is typedef'd as jint
static volatile bool _doing_gc; // unlock_critical() is doing a GC
static jlong _wait_begin; // Timestamp for the setting of _needs_gc.
// Used only by printing code.
#ifdef ASSERT
// This lock count is updated for all operations and is used to
// validate the jni_lock_count that is computed during safepoints.
static volatile jint _debug_jni_lock_count;
#endif
// Accessors
static bool is_jni_active() {
assert(_needs_gc, "only valid when _needs_gc is set");
return _jni_lock_count > 0;
}
static void set_needs_gc() {
assert(SafepointSynchronize::is_at_safepoint(),
"needs_gc is only set at a safepoint");
_needs_gc = true;
}
// At a safepoint, visit all threads and count the number of active
// critical sections. This is used to ensure that all active
// critical sections are exited before a new one is started.
static void verify_critical_count() NOT_DEBUG_RETURN;
static void clear_needs_gc() {
assert_lock_strong(JNICritical_lock);
_needs_gc = false;
}
static void jni_lock() {
Atomic::inc(&_jni_lock_count);
CHECK_UNHANDLED_OOPS_ONLY(
if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count++; })
assert(Universe::heap() == NULL || !Universe::heap()->is_gc_active(),
"locking failed");
}
static void jni_unlock() {
Atomic::dec(&_jni_lock_count);
CHECK_UNHANDLED_OOPS_ONLY(
if (CheckUnhandledOops) { Thread::current()->_gc_locked_out_count--; })
}
static void jni_lock_slow();
static void jni_unlock_slow();
static void jni_lock(JavaThread* thread);
static void jni_unlock(JavaThread* thread);
public:
// Accessors
static bool is_active();
static bool needs_gc() { return _needs_gc; }
// Shorthand
static bool is_active_and_needs_gc() { return is_active() && needs_gc();}
// Calls set_needs_gc() if is_active() is true. Returns is_active().
// Shorthand
static bool is_active_and_needs_gc() { return needs_gc() && is_active(); }
// In debug mode track the locking state at all times
static void increment_debug_jni_lock_count() {
#ifdef ASSERT
assert(_debug_jni_lock_count >= 0, "bad value");
Atomic::inc(&_debug_jni_lock_count);
#endif
}
static void decrement_debug_jni_lock_count() {
#ifdef ASSERT
assert(_debug_jni_lock_count > 0, "bad value");
Atomic::dec(&_debug_jni_lock_count);
#endif
}
// Set the current lock count
static void set_jni_lock_count(int count) {
_jni_lock_count = count;
verify_critical_count();
}
// Sets _needs_gc if is_active() is true. Returns is_active().
static bool check_active_before_gc();
// Stalls the caller (who should not be in a jni critical section)
@ -131,22 +148,24 @@ class GC_locker: public AllStatic {
// JNI critical regions are the only participants in this scheme
// because they are, by spec, well bounded while in a critical region.
//
// Each of the following two method is split into a fast path and a slow
// path. JNICritical_lock is only grabbed in the slow path.
// Each of the following two method is split into a fast path and a
// slow path. JNICritical_lock is only grabbed in the slow path.
// _needs_gc is initially false and every java thread will go
// through the fast path (which does the same thing as the slow path
// when _needs_gc is false). When GC happens at a safepoint,
// GC_locker::is_active() is checked. Since there is no safepoint in the
// fast path of lock_critical() and unlock_critical(), there is no race
// condition between the fast path and GC. After _needs_gc is set at a
// safepoint, every thread will go through the slow path after the safepoint.
// Since after a safepoint, each of the following two methods is either
// entered from the method entry and falls into the slow path, or is
// resumed from the safepoints in the method, which only exist in the slow
// path. So when _needs_gc is set, the slow path is always taken, till
// _needs_gc is cleared.
// through the fast path, which simply increments or decrements the
// current thread's critical count. When GC happens at a safepoint,
// GC_locker::is_active() is checked. Since there is no safepoint in
// the fast path of lock_critical() and unlock_critical(), there is
// no race condition between the fast path and GC. After _needs_gc
// is set at a safepoint, every thread will go through the slow path
// after the safepoint. Since after a safepoint, each of the
// following two methods is either entered from the method entry and
// falls into the slow path, or is resumed from the safepoints in
// the method, which only exist in the slow path. So when _needs_gc
// is set, the slow path is always taken, till _needs_gc is cleared.
static void lock_critical(JavaThread* thread);
static void unlock_critical(JavaThread* thread);
static address needs_gc_address() { return (address) &_needs_gc; }
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,16 +28,11 @@
#include "memory/gcLocker.hpp"
inline bool GC_locker::is_active() {
assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
verify_critical_count();
return _lock_count > 0 || _jni_lock_count > 0;
}
inline bool GC_locker::check_active_before_gc() {
if (is_active()) {
set_needs_gc();
}
return is_active();
}
inline void GC_locker::lock() {
// cast away volatile
Atomic::inc(&_lock_count);
@ -56,24 +51,28 @@ inline void GC_locker::unlock() {
inline void GC_locker::lock_critical(JavaThread* thread) {
if (!thread->in_critical()) {
if (!needs_gc()) {
jni_lock();
} else {
jni_lock_slow();
if (needs_gc()) {
// jni_lock call calls enter_critical under the lock so that the
// global lock count and per thread count are in agreement.
jni_lock(thread);
return;
}
increment_debug_jni_lock_count();
}
thread->enter_critical();
}
inline void GC_locker::unlock_critical(JavaThread* thread) {
thread->exit_critical();
if (!thread->in_critical()) {
if (!needs_gc()) {
jni_unlock();
} else {
jni_unlock_slow();
if (thread->in_last_critical()) {
if (needs_gc()) {
// jni_unlock call calls exit_critical under the lock so that
// the global lock count and per thread count are in agreement.
jni_unlock(thread);
return;
}
decrement_debug_jni_lock_count();
}
thread->exit_critical();
}
#endif // SHARE_VM_MEMORY_GCLOCKER_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -479,11 +479,9 @@ void GenCollectedHeap::do_collection(bool full,
const size_t perm_prev_used = perm_gen()->used();
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
if (Verbose) {
gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
}
print_heap_before_gc();
if (Verbose) {
gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
}
{
@ -685,9 +683,7 @@ void GenCollectedHeap::do_collection(bool full,
AdaptiveSizePolicy* sp = gen_policy()->size_policy();
AdaptiveSizePolicyOutput(sp, total_collections());
if (PrintHeapAtGC) {
Universe::print_heap_after_gc();
}
print_heap_after_gc();
#ifdef TRACESPINNING
ParallelTaskTerminator::print_termination_counts();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -254,7 +254,6 @@ void GenMarkSweep::deallocate_stacks() {
void GenMarkSweep::mark_sweep_phase1(int level,
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
EventMark m("1 mark object");
TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
trace(" 1");
@ -325,7 +324,6 @@ void GenMarkSweep::mark_sweep_phase2() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
Generation* pg = gch->perm_gen();
EventMark m("2 compute new addresses");
TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
trace("2");
@ -350,7 +348,6 @@ void GenMarkSweep::mark_sweep_phase3(int level) {
Generation* pg = gch->perm_gen();
// Adjust the pointers to reflect the new locations
EventMark m("3 adjust pointers");
TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
trace("3");
@ -411,7 +408,6 @@ void GenMarkSweep::mark_sweep_phase4() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
Generation* pg = gch->perm_gen();
EventMark m("4 compact heap");
TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
trace("4");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#ifndef PRODUCT
#include "oops/arrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/globalDefinitions.hpp"
bool arrayOopDesc::check_max_length_overflow(BasicType type) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -269,7 +269,7 @@ klassOop constantPoolOopDesc::klass_ref_at_if_loaded_check(constantPoolHandle th
methodOop constantPoolOopDesc::method_at_if_loaded(constantPoolHandle cpool,
int which, Bytecodes::Code invoke_code) {
assert(!constantPoolCacheOopDesc::is_secondary_index(which), "no indy instruction here");
if (cpool->cache() == NULL) return false; // nothing to load yet
if (cpool->cache() == NULL) return NULL; // nothing to load yet
int cache_index = which - CPCACHE_INDEX_TAG;
if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) {
if (PrintMiscellaneous && (Verbose||WizardMode)) {

View File

@ -240,7 +240,6 @@ class instanceKlass: public Klass {
Thread* _init_thread; // Pointer to current thread doing initialization (to handle recusive initialization)
int _vtable_len; // length of Java vtable (in words)
int _itable_len; // length of Java itable (in words)
ReferenceType _reference_type; // reference type
OopMapCache* volatile _oop_map_cache; // OopMapCache for all methods in the klass (allocated lazily)
JNIid* _jni_ids; // First JNI identifier for static fields in this class
jmethodID* _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or NULL if none
@ -265,6 +264,8 @@ class instanceKlass: public Klass {
// _idnum_allocated_count.
u1 _init_state; // state of class
u1 _reference_type; // reference type
// embedded Java vtable follows here
// embedded Java itables follows here
// embedded static fields follows here
@ -407,8 +408,11 @@ class instanceKlass: public Klass {
void eager_initialize(Thread *thread);
// reference type
ReferenceType reference_type() const { return _reference_type; }
void set_reference_type(ReferenceType t) { _reference_type = t; }
ReferenceType reference_type() const { return (ReferenceType)_reference_type; }
void set_reference_type(ReferenceType t) {
assert(t == (u1)t, "overflow");
_reference_type = (u1)t;
}
static ByteSize reference_type_offset() { return in_ByteSize(sizeof(klassOopDesc) + offset_of(instanceKlass, _reference_type)); }
@ -570,9 +574,9 @@ class instanceKlass: public Klass {
void set_method_annotations_of(int idnum, typeArrayOop anno)
{ set_methods_annotations_of(idnum, anno, &_methods_annotations); }
void set_method_parameter_annotations_of(int idnum, typeArrayOop anno)
{ set_methods_annotations_of(idnum, anno, &_methods_annotations); }
{ set_methods_annotations_of(idnum, anno, &_methods_parameter_annotations); }
void set_method_default_annotations_of(int idnum, typeArrayOop anno)
{ set_methods_annotations_of(idnum, anno, &_methods_annotations); }
{ set_methods_annotations_of(idnum, anno, &_methods_default_annotations); }
// allocation
DEFINE_ALLOCATE_PERMANENT(instanceKlass);

View File

@ -158,9 +158,7 @@ klassOop Klass::base_create_klass_oop(KlassHandle& klass, int size,
kl->set_next_sibling(NULL);
kl->set_alloc_count(0);
kl->set_alloc_size(0);
#ifdef TRACE_SET_KLASS_TRACE_ID
TRACE_SET_KLASS_TRACE_ID(kl, 0);
#endif
kl->set_prototype_header(markOopDesc::prototype());
kl->set_biased_lock_revocation_count(0);

View File

@ -265,9 +265,7 @@ class Klass : public Klass_vtbl {
markOop _prototype_header; // Used when biased locking is both enabled and disabled for this type
jint _biased_lock_revocation_count;
#ifdef TRACE_DEFINE_KLASS_TRACE_ID
TRACE_DEFINE_KLASS_TRACE_ID;
#endif
public:
// returns the enclosing klassOop
@ -688,9 +686,7 @@ class Klass : public Klass_vtbl {
jlong last_biased_lock_bulk_revocation_time() { return _last_biased_lock_bulk_revocation_time; }
void set_last_biased_lock_bulk_revocation_time(jlong cur_time) { _last_biased_lock_bulk_revocation_time = cur_time; }
#ifdef TRACE_DEFINE_KLASS_METHODS
TRACE_DEFINE_KLASS_METHODS;
#endif
// garbage collection support
virtual void follow_weak_klass_links(

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -596,6 +596,11 @@ void methodOopDesc::clear_native_function() {
clear_code();
}
address methodOopDesc::critical_native_function() {
methodHandle mh(this);
return NativeLookup::lookup_critical_entry(mh);
}
void methodOopDesc::set_signature_handler(address handler) {
address* signature_handler = signature_handler_addr();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -403,6 +403,8 @@ class methodOopDesc : public oopDesc {
native_bind_event_is_interesting = true
};
address native_function() const { return *(native_function_addr()); }
address critical_native_function();
// Must specify a real function (not NULL).
// Use clear_native_function() to unregister.
void set_native_function(address function, bool post_event_flag);

Some files were not shown because too many files have changed in this diff Show More