This commit is contained in:
Gary Collins 2012-10-12 10:49:39 -07:00
commit ad3844358d
57 changed files with 1064 additions and 354 deletions

@ -76,6 +76,8 @@ endif
include $(GAMMADIR)/make/altsrc.make
-include $(HS_ALT_MAKE)/Makefile.make
ifneq ($(ALT_OUTPUTDIR),)
ALT_OUT=ALT_OUTPUTDIR=$(ALT_OUTPUTDIR)
else
@ -88,16 +90,23 @@ C2_VM_TARGETS=product fastdebug optimized jvmg
KERNEL_VM_TARGETS=productkernel fastdebugkernel optimizedkernel jvmgkernel
ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero jvmgzero
SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark jvmgshark
MINIMAL1_VM_TARGETS=productminimal1 fastdebugminimal1 jvmgminimal1
COMMON_VM_PRODUCT_TARGETS=product product1 productkernel docs export_product
COMMON_VM_FASTDEBUG_TARGETS=fastdebug fastdebug1 fastdebugkernel docs export_fastdebug
COMMON_VM_DEBUG_TARGETS=jvmg jvmg1 jvmgkernel docs export_debug
COMMON_VM_PRODUCT_TARGETS=product product1 docs export_product
COMMON_VM_FASTDEBUG_TARGETS=fastdebug fastdebug1 docs export_fastdebug
COMMON_VM_DEBUG_TARGETS=jvmg jvmg1 docs export_debug
# JDK directory list
JDK_DIRS=bin include jre lib demo
all: all_product all_fastdebug
ifeq ($(JVM_VARIANT_MINIMAL1),true)
all_product: productminimal1
all_fastdebug: fastdebugminimal1
all_debug: jvmgminimal1
endif
ifdef BUILD_CLIENT_ONLY
all_product: product1 docs export_product
all_fastdebug: fastdebug1 docs export_fastdebug
@ -114,7 +123,7 @@ all_debug: $(COMMON_VM_DEBUG_TARGETS)
endif
endif
all_optimized: optimized optimized1 optimizedkernel docs export_optimized
all_optimized: optimized optimized1 docs export_optimized
allzero: all_productzero all_fastdebugzero
all_productzero: productzero docs export_product
@ -167,6 +176,11 @@ $(SHARK_VM_TARGETS):
$(MAKE) BUILD_FLAVOR=$(@:%shark=%) VM_TARGET=$@ \
generic_buildshark $(ALT_OUT)
$(MINIMAL1_VM_TARGETS):
$(CD) $(GAMMADIR)/make; \
$(MAKE) BUILD_FLAVOR=$(@:%minimal1=%) VM_TARGET=$@ \
generic_buildminimal1 $(ALT_OUT)
# Build compiler1 (client) rule, different for platforms
generic_build1:
$(MKDIR) -p $(OUTPUTDIR)
@ -239,6 +253,27 @@ generic_buildshark:
$(MAKE) -f $(ABS_OS_MAKEFILE) \
$(MAKE_ARGS) $(VM_TARGET)
generic_buildminimal1:
ifeq ($(JVM_VARIANT_MINIMAL1),true)
$(MKDIR) -p $(OUTPUTDIR)
ifeq ($(ARCH_DATA_MODEL), 32)
ifeq ($(OSNAME),windows)
$(ECHO) "No ($(VM_TARGET)) for $(OSNAME) ARCH_DATA_MODEL=$(ARCH_DATA_MODEL)" ;
else
ifeq ($(OSNAME),solaris)
$(ECHO) "No ($(VM_TARGET)) for $(OSNAME) ARCH_DATA_MODEL=$(ARCH_DATA_MODEL)" ;
else
$(CD) $(OUTPUTDIR); \
$(MAKE) -f $(ABS_OS_MAKEFILE) $(MAKE_ARGS) $(VM_TARGET) ;
endif
endif
else
@$(ECHO) "No ($(VM_TARGET)) for $(OSNAME) ARCH_DATA_MODEL=$(ARCH_DATA_MODEL)"
endif
else
@$(ECHO) "Error: trying to build a minimal target but JVM_VARIANT_MINIMAL1 is not true."
endif
# Export file rule
generic_export: $(EXPORT_LIST)
export_product:
@ -287,6 +322,8 @@ C2_DIR=$(C2_BASE_DIR)/$(VM_SUBDIR)
KERNEL_DIR=$(KERNEL_BASE_DIR)/$(VM_SUBDIR)
ZERO_DIR=$(ZERO_BASE_DIR)/$(VM_SUBDIR)
SHARK_DIR=$(SHARK_BASE_DIR)/$(VM_SUBDIR)
MINIMAL1_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1
MINIMAL1_DIR=$(MINIMAL1_BASE_DIR)/$(VM_SUBDIR)
ifeq ($(JVM_VARIANT_SERVER), true)
MISC_DIR=$(C2_DIR)
@ -308,6 +345,10 @@ ifeq ($(JVM_VARIANT_ZERO), true)
MISC_DIR=$(ZERO_DIR)
GEN_DIR=$(ZERO_BASE_DIR)/generated
endif
ifeq ($(JVM_VARIANT_MINIMAL1), true)
MISC_DIR=$(MINIMAL1_DIR)
GEN_DIR=$(MINIMAL1_BASE_DIR)/generated
endif
# Bin files (windows)
ifeq ($(OSNAME),windows)
@ -357,6 +398,16 @@ $(EXPORT_KERNEL_DIR)/%.map: $(KERNEL_DIR)/%.map
$(install-file)
endif
# Minimal JVM files always come from minimal area
$(EXPORT_MINIMAL_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz
$(install-file)
$(EXPORT_MINIMAL_DIR)/%.dll: $(MINIMAL1_DIR)/%.dll
$(install-file)
$(EXPORT_MINIMAL_DIR)/%.pdb: $(MINIMAL1_DIR)/%.pdb
$(install-file)
$(EXPORT_MINIMAL_DIR)/%.map: $(MINIMAL1_DIR)/%.map
$(install-file)
# Shared Library
ifneq ($(OSNAME),windows)
ifeq ($(JVM_VARIANT_SERVER), true)
@ -411,6 +462,26 @@ ifneq ($(OSNAME),windows)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
endif
ifeq ($(JVM_VARIANT_MINIMAL1), true)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_MINIMAL_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_MINIMAL_DIR)/64/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo
$(install-file)
$(EXPORT_MINIMAL_DIR)/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo
$(install-file)
$(EXPORT_MINIMAL_DIR)/64/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo
$(install-file)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz
$(install-file)
$(EXPORT_MINIMAL_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz
$(install-file)
$(EXPORT_MINIMAL_DIR)/64/%.diz: $(MINIMAL1_DIR)/%.diz
$(install-file)
endif
endif
# Jar file (sa-jdi.jar)
@ -451,7 +522,7 @@ $(EXPORT_DOCS_DIR)/platform/jvmti/%: $(DOCS_DIR)/%
$(install-file)
# Xusage file
$(EXPORT_SERVER_DIR)/Xusage.txt $(EXPORT_CLIENT_DIR)/Xusage.txt $(EXPORT_KERNEL_DIR)/Xusage.txt: $(XUSAGE)
$(EXPORT_SERVER_DIR)/Xusage.txt $(EXPORT_CLIENT_DIR)/Xusage.txt $(EXPORT_KERNEL_DIR)/Xusage.txt $(EXPORT_MINIMAL_DIR)/Xusage.txt: $(XUSAGE)
$(prep-target)
$(RM) $@.temp
$(SED) 's/\(separated by \)[;:]/\1$(PATH_SEP)/g' $< > $@.temp
@ -467,6 +538,7 @@ clean_build:
$(RM) -r $(KERNEL_DIR)
$(RM) -r $(ZERO_DIR)
$(RM) -r $(SHARK_DIR)
$(RM) -r $(MINIMAL1_DIR)
clean_export:
$(RM) -r $(EXPORT_PATH)
clean_jdk:
@ -574,10 +646,11 @@ target_help:
@$(ECHO) "create_jdk: Create JDK image, export all files into it"
@$(ECHO) "update_jdk: Update JDK image with fresh exported files"
@$(ECHO) " "
@$(ECHO) "Others targets are:"
@$(ECHO) "Other targets are:"
@$(ECHO) " $(C1_VM_TARGETS)"
@$(ECHO) " $(C2_VM_TARGETS)"
@$(ECHO) " $(KERNEL_VM_TARGETS)"
@$(ECHO) " $(MINIMAL1_VM_TARGETS)"
# Variable help (only common ones used by this workspace)
variable_help: variable_help_intro variable_list variable_help_end
@ -672,9 +745,10 @@ endif
include $(GAMMADIR)/make/jprt.gmk
.PHONY: all world clobber clean help $(C1_VM_TARGETS) $(C2_VM_TARGETS) \
$(KERNEL_VM_TARGETS) \
generic_build1 generic_build2 generic_buildkernel generic_export \
$(KERNEL_VM_TARGETS) $(MINIMAL1_VM_TARGETS) \
generic_build1 generic_build2 generic_buildkernel generic_buildminimal1 generic_export \
export_product export_fastdebug export_debug export_optimized \
export_jdk_product export_jdk_fastdebug export_jdk_debug \
create_jdk copy_jdk update_jdk test_jdk \
copy_product_jdk copy_fastdebug_jdk copy_debug_jdk
copy_product_jdk copy_fastdebug_jdk copy_debug_jdk \
$(HS_ALT_MAKE)/Makefile.make

@ -175,6 +175,10 @@ VARIANTARCH = $(subst i386,i486,$(ZERO_LIBARCH))
# profiledshark shark <os>_<arch>_shark/profiled
# productshark shark <os>_<arch>_shark/product
#
# fastdebugminimal1 minimal1 <os>_<arch>_minimal1/fastdebug
# jvmgminimal1 minimal1 <os>_<arch>_minimal1/jvmg
# productminimal1 minimal1 <os>_<arch>_minimal1/product
#
# What you get with each target:
#
# debug* - "thin" libjvm_g - debug info linked into the gamma_g launcher
@ -199,6 +203,7 @@ SUBDIRS_TIERED = $(addprefix $(OSNAME)_$(BUILDARCH)_tiered/,$(TARGETS))
SUBDIRS_CORE = $(addprefix $(OSNAME)_$(BUILDARCH)_core/,$(TARGETS))
SUBDIRS_ZERO = $(addprefix $(OSNAME)_$(VARIANTARCH)_zero/,$(TARGETS))
SUBDIRS_SHARK = $(addprefix $(OSNAME)_$(VARIANTARCH)_shark/,$(TARGETS))
SUBDIRS_MINIMAL1 = $(addprefix $(OSNAME)_$(BUILDARCH)_minimal1/,$(TARGETS))
TARGETS_C2 = $(TARGETS)
TARGETS_C1 = $(addsuffix 1,$(TARGETS))
@ -206,6 +211,7 @@ TARGETS_TIERED = $(addsuffix tiered,$(TARGETS))
TARGETS_CORE = $(addsuffix core,$(TARGETS))
TARGETS_ZERO = $(addsuffix zero,$(TARGETS))
TARGETS_SHARK = $(addsuffix shark,$(TARGETS))
TARGETS_MINIMAL1 = $(addsuffix minimal1,$(TARGETS))
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) LIBRARY_SUFFIX=$(LIBRARY_SUFFIX)
@ -223,6 +229,7 @@ all:
@echo " $(TARGETS_CORE)"
@echo " $(TARGETS_ZERO)"
@echo " $(TARGETS_SHARK)"
@echo " $(TARGETS_MINIMAL1)"
checks: check_os_version check_j2se_version
@ -281,6 +288,10 @@ $(SUBDIRS_SHARK): $(BUILDTREE_MAKE) platform_zero
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=shark VARIANTARCH=$(VARIANTARCH)
$(SUBDIRS_MINIMAL1): $(BUILDTREE_MAKE)
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=minimal1
platform_zero: $(GAMMADIR)/make/$(OSNAME)/platform_zero.in
$(SED) 's/@ZERO_ARCHDEF@/$(ZERO_ARCHDEF)/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@
@ -340,12 +351,22 @@ ifdef INSTALL
cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && $(MAKE) $(MFLAGS) install
endif
$(TARGETS_MINIMAL1): $(SUBDIRS_MINIMAL1)
cd $(OSNAME)_$(BUILDARCH)_minimal1/$(patsubst %minimal1,%,$@) && $(MAKE) $(MFLAGS)
ifeq ($(TEST_IN_BUILD),true)
cd $(OSNAME)_$(BUILDARCH)_minimal1/$(patsubst %minimal1,%,$@) && ./test_gamma
endif
ifdef INSTALL
cd $(OSNAME)_$(BUILDARCH)_minimal1/$(patsubst %minimal1,%,$@) && $(MAKE) $(MFLAGS) install
endif
# Just build the tree, and nothing else:
tree: $(SUBDIRS_C2)
tree1: $(SUBDIRS_C1)
treecore: $(SUBDIRS_CORE)
treezero: $(SUBDIRS_ZERO)
treeshark: $(SUBDIRS_SHARK)
treeminimal1: $(SUBDIRS_MINIMAL1)
# Doc target. This is the same for all build options.
# Hence create a docs directory beside ...$(ARCH)_[...]
@ -367,17 +388,23 @@ shark: jvmgshark productshark
clean_docs:
rm -rf $(SUBDIR_DOCS)
clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark:
clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark clean_minimal1:
rm -rf $(OSNAME)_$(BUILDARCH)_$(subst clean_,,$@)
clean: clean_compiler2 clean_compiler1 clean_core clean_zero clean_shark clean_docs
clean: clean_compiler2 clean_compiler1 clean_core clean_zero clean_shark clean_minimal1 clean_docs
include $(GAMMADIR)/make/cscope.make
#
# Include alternate Makefile if it exists.
#
-include $(HS_ALT_MAKE)/$(OSNAME)/Makefile.make
#-------------------------------------------------------------------------------
.PHONY: $(TARGETS_C2) $(TARGETS_C1) $(TARGETS_CORE) $(TARGETS_ZERO) $(TARGETS_SHARK)
.PHONY: $(TARGETS_C2) $(TARGETS_C1) $(TARGETS_CORE) $(TARGETS_ZERO) $(TARGETS_SHARK) $(TARGETS_MINIMAL1)
.PHONY: tree tree1 treecore treezero treeshark
.PHONY: all compiler1 compiler2 core zero shark
.PHONY: clean clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark docs clean_docs
.PHONY: checks check_os_version check_j2se_version
.PHONY: $(HS_ALT_MAKE)/$(OSNAME)/Makefile.make

@ -60,6 +60,7 @@ OS_VENDOR:=$(shell uname -s)
-include $(SPEC)
include $(GAMMADIR)/make/scm.make
include $(GAMMADIR)/make/defs.make
include $(GAMMADIR)/make/altsrc.make
@ -202,6 +203,8 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
sed -n '/=/s/^ */Platform_/p' < $(PLATFORM_FILE); \
echo; \
echo "GAMMADIR = $(GAMMADIR)"; \
echo "HS_ALT_MAKE = $(HS_ALT_MAKE)"; \
echo "OSNAME = $(OSNAME)"; \
echo "SYSDEFS = \$$(Platform_sysdefs)"; \
echo "SRCARCH = $(SRCARCH)"; \
echo "BUILDARCH = $(BUILDARCH)"; \
@ -259,6 +262,7 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
[ -n "$(SPEC)" ] && \
echo "include $(SPEC)"; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \
echo "include \$$(GAMMADIR)/make/excludeSrc.make"; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \
) > $@

@ -155,6 +155,7 @@ EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
EXPORT_LIST += $(EXPORT_JRE_LIB_DIR)/wb.jar
@ -168,6 +169,19 @@ ifeq ($(JVM_VARIANT_CLIENT),true)
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
endif
ifeq ($(JVM_VARIANT_MINIMAL1),true)
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/Xusage.txt
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.diz
else
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo
endif
endif
endif
# Serviceability Binaries
# No SA Support for PPC, IA64, ARM or zero
ADD_SA_BINARIES/x86 = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \

@ -151,11 +151,6 @@ ifdef CC_INTERP
CFLAGS += -DCC_INTERP
endif
# Build for embedded targets
ifdef JAVASE_EMBEDDED
CFLAGS += -DJAVASE_EMBEDDED
endif
# Keep temporary files (.ii, .s)
ifdef NEED_ASM
CFLAGS += -save-temps
@ -186,20 +181,32 @@ ifeq ($(OS_VENDOR), Darwin)
CFLAGS_WARN/os_bsd.o = $(CFLAGS_WARN/DEFAULT) -Wno-deprecated-declarations
endif
OPT_CFLAGS/SIZE=-Os
OPT_CFLAGS/SPEED=-O3
# Hotspot uses very unstrict aliasing turn this optimization off
# This option is added to CFLAGS rather than OPT_CFLAGS
# so that OPT_CFLAGS overrides get this option too.
CFLAGS += -fno-strict-aliasing
# The flags to use for an Optimized g++ build
ifeq ($(OS_VENDOR), Darwin)
# use -Os by default, unless -O3 can be proved to be worth the cost, as per policy
# <http://wikis.sun.com/display/OpenJDK/Mac+OS+X+Port+Compilers>
OPT_CFLAGS += -Os
OPT_CFLAGS_DEFAULT ?= SIZE
else
OPT_CFLAGS += -O3
OPT_CFLAGS_DEFAULT ?= SPEED
endif
# Hotspot uses very unstrict aliasing turn this optimization off
OPT_CFLAGS += -fno-strict-aliasing
ifdef OPT_CFLAGS
ifneq ("$(origin OPT_CFLAGS)", "command line")
$(error " Use OPT_EXTRAS instead of OPT_CFLAGS to add extra flags to OPT_CFLAGS.")
endif
endif
# The gcc compiler segv's on ia64 when compiling bytecodeInterpreter.cpp
OPT_CFLAGS = $(OPT_CFLAGS/$(OPT_CFLAGS_DEFAULT)) $(OPT_EXTRAS)
# The gcc compiler segv's on ia64 when compiling bytecodeInterpreter.cpp
# if we use expensive-optimizations
ifeq ($(BUILDARCH), ia64)
OPT_CFLAGS += -fno-expensive-optimizations

@ -1,5 +1,5 @@
#
# Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -25,8 +25,6 @@
#
# IA64 only uses c++ based interpreter
CFLAGS += -DCC_INTERP -D_LP64=1 -DVM_LITTLE_ENDIAN
# Hotspot uses very unstrict aliasing turn this optimization off
OPT_CFLAGS += -fno-strict-aliasing
ifeq ($(VERSION),debug)
ASM_FLAGS= -DDEBUG
else

@ -0,0 +1,46 @@
#
# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
TYPE=MINIMAL1
INCLUDE_JVMTI ?= false
INCLUDE_FPROF ?= false
INCLUDE_VM_STRUCTS ?= false
INCLUDE_JNI_CHECK ?= false
INCLUDE_SERVICES ?= false
INCLUDE_MANAGEMENT ?= false
INCLUDE_ALTERNATE_GCS ?= false
INCLUDE_NMT ?= false
INCLUDE_CDS ?= false
CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"
CFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"
Src_Dirs/MINIMAL1 = $(CORE_PATHS) $(COMPILER1_PATHS)
Src_Files_EXCLUDE/MINIMAL1 += $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-include $(HS_ALT_MAKE)/$(OSNAME)/makefiles/minimal1.make
.PHONY: $(HS_ALT_MAKE)/$(OSNAME)/makefiles/minimal1.make

@ -190,7 +190,7 @@ SHARK_SPECIFIC_FILES := shark
ZERO_SPECIFIC_FILES := zero
# Always exclude these.
Src_Files_EXCLUDE := jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
Src_Files_EXCLUDE += jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
# Exclude per type.
Src_Files_EXCLUDE/CORE := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp

@ -22,6 +22,27 @@
#
#
# The common definitions for hotspot builds.
# Optionally include SPEC file generated by configure.
ifneq ($(SPEC),)
include $(SPEC)
endif
# Directory paths and user name
# Unless GAMMADIR is set on the command line, search upward from
# the current directory for a parent directory containing "src/share/vm".
# If that fails, look for $GAMMADIR in the environment.
# When the tree of subdirs is built, this setting is stored in each flags.make.
GAMMADIR := $(shell until ([ -d dev ]&&echo $${GAMMADIR:-/GAMMADIR/}) || ([ -d src/share/vm ]&&pwd); do cd ..; done)
HS_SRC_DIR=$(GAMMADIR)/src
HS_MAKE_DIR=$(GAMMADIR)/make
HS_BUILD_DIR=$(GAMMADIR)/build
ifeq ($(USER),)
USER=$(USERNAME)
endif
ifeq ($(HS_ALT_MAKE),)
ifneq ($(OPENJDK),true)
HS_ALT_MAKE=$(GAMMADIR)/make/closed
@ -30,12 +51,10 @@ ifeq ($(HS_ALT_MAKE),)
endif
endif
# The common definitions for hotspot builds.
# Optionally include SPEC file generated by configure.
ifneq ($(SPEC),)
include $(SPEC)
endif
#
# Include alternate defs.make if it exists
#
-include $(HS_ALT_MAKE)/defs.make
# Default to verbose build logs (show all compile lines):
MAKE_VERBOSE=y
@ -84,20 +103,6 @@ ifeq ($(JVM_VARIANTS),)
endif
endif
# Directory paths and user name
# Unless GAMMADIR is set on the command line, search upward from
# the current directory for a parent directory containing "src/share/vm".
# If that fails, look for $GAMMADIR in the environment.
# When the tree of subdirs is built, this setting is stored in each flags.make.
GAMMADIR := $(shell until ([ -d dev ]&&echo $${GAMMADIR:-/GAMMADIR/}) || ([ -d src/share/vm ]&&pwd); do cd ..; done)
HS_SRC_DIR=$(GAMMADIR)/src
HS_MAKE_DIR=$(GAMMADIR)/make
HS_BUILD_DIR=$(GAMMADIR)/build
ifeq ($(USER),)
USER=$(USERNAME)
endif
# hotspot version definitions
include $(GAMMADIR)/make/hotspot_version
@ -339,3 +344,4 @@ ifndef JAVASE_EMBEDDED
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h
endif
.PHONY: $(HS_ALT_MAKE)/defs.make

@ -0,0 +1,110 @@
#
# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
ifeq ($(INCLUDE_JVMTI), false)
CXXFLAGS += -DINCLUDE_JVMTI=0
CFLAGS += -DINCLUDE_JVMTI=0
Src_Files_EXCLUDE += jvmtiGetLoadedClasses.cpp forte.cpp jvmtiThreadState.cpp jvmtiExtensions.cpp \
jvmtiImpl.cpp jvmtiManageCapabilities.cpp jvmtiRawMonitor.cpp jvmtiUtil.cpp jvmtiTrace.cpp \
jvmtiCodeBlobEvents.cpp jvmtiEnv.cpp jvmtiRedefineClasses.cpp jvmtiEnvBase.cpp jvmtiEnvThreadState.cpp \
jvmtiTagMap.cpp jvmtiEventController.cpp evmCompat.cpp jvmtiEnter.xsl jvmtiExport.cpp
endif
ifeq ($(INCLUDE_FPROF), false)
CXXFLAGS += -DINCLUDE_FPROF=0
CFLAGS += -DINCLUDE_FPROF=0
Src_Files_EXCLUDE += fprofiler.cpp
endif
ifeq ($(INCLUDE_VM_STRUCTS), false)
CXXFLAGS += -DINCLUDE_VM_STRUCTS=0
CFLAGS += -DINCLUDE_VM_STRUCTS=0
Src_Files_EXCLUDE += vmStructs.cpp
endif
ifeq ($(INCLUDE_JNI_CHECK), false)
CXXFLAGS += -DINCLUDE_JNI_CHECK=0
CFLAGS += -DINCLUDE_JNI_CHECK=0
Src_Files_EXCLUDE += jniCheck.cpp
endif
ifeq ($(INCLUDE_SERVICES), false)
CXXFLAGS += -DINCLUDE_SERVICES=0
CFLAGS += -DINCLUDE_SERVICES=0
Src_Files_EXCLUDE += heapDumper.cpp heapInspection.cpp \
attachListener_linux.cpp attachListener.cpp
endif
ifeq ($(INCLUDE_MANAGEMENT), false)
CXXFLAGS += -DINCLUDE_MANAGEMENT=0
CFLAGS += -DINCLUDE_MANAGEMENT=0
endif
ifeq ($(INCLUDE_CDS), false)
CXXFLAGS += -DINCLUDE_CDS=0
CFLAGS += -DINCLUDE_CDS=0
Src_Files_EXCLUDE += metaspaceShared.cpp
endif
ifeq ($(INCLUDE_ALTERNATE_GCS), false)
CXXFLAGS += -DINCLUDE_ALTERNATE_GCS=0
CFLAGS += -DINCLUDE_ALTERNATE_GCS=0
CXXFLAGS += -DSERIALGC
CFLAGS += -DSERIALGC
Src_Files_EXCLUDE += \
binaryTreeDictionary.cpp cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp cmsPermGen.cpp compactibleFreeListSpace.cpp \
concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp freeBlockDictionary.cpp \
freeChunk.cpp freeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \
dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \
g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \
g1MMUTracker.cpp g1MonitoringSupport.cpp g1RemSet.cpp g1SATBCardTableModRefBS.cpp heapRegion.cpp \
heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp ptrQueue.cpp \
satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp adjoiningGenerations.cpp \
adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp cardTableExtension.cpp \
gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp parallelScavengeHeap.cpp parMarkBitMap.cpp \
pcTasks.cpp psAdaptiveSizePolicy.cpp psCompactionManager.cpp psGCAdaptivePolicyCounters.cpp \
psGenerationCounters.cpp psMarkSweep.cpp psMarkSweepDecorator.cpp psOldGen.cpp psParallelCompact.cpp \
psPermGen.cpp psPromotionLAB.cpp psPromotionManager.cpp psScavenge.cpp psTasks.cpp psVirtualspace.cpp \
psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp parCardTableModRefBS.cpp \
parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp gSpaceCounters.cpp allocationStats.cpp \
spaceCounters.cpp gcAdaptivePolicyCounters.cpp mutableNUMASpace.cpp immutableSpace.cpp \
immutableSpace.cpp g1MemoryPool.cpp psMemoryPool.cpp yieldWorkingGroup.cpp g1Log.cpp
endif
ifeq ($(INCLUDE_NMT), false)
CXXFLAGS += -DINCLUDE_NMT=0
CFLAGS += -DINCLUDE_NMT=0
Src_Files_EXCLUDE += \
memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \
memTracker.cpp nmtDCmd.cpp
endif

@ -175,6 +175,10 @@ VARIANTARCH = $(subst i386,i486,$(ZERO_LIBARCH))
# profiledshark shark <os>_<arch>_shark/profiled
# productshark shark <os>_<arch>_shark/product
#
# fastdebugminimal1 minimal1 <os>_<arch>_minimal1/fastdebug
# jvmgminimal1 minimal1 <os>_<arch>_minimal1/jvmg
# productminimal1 minimal1 <os>_<arch>_minimal1/product
#
# What you get with each target:
#
# debug* - "thin" libjvm_g - debug info linked into the gamma_g launcher
@ -199,6 +203,7 @@ SUBDIRS_TIERED = $(addprefix $(OSNAME)_$(BUILDARCH)_tiered/,$(TARGETS))
SUBDIRS_CORE = $(addprefix $(OSNAME)_$(BUILDARCH)_core/,$(TARGETS))
SUBDIRS_ZERO = $(addprefix $(OSNAME)_$(VARIANTARCH)_zero/,$(TARGETS))
SUBDIRS_SHARK = $(addprefix $(OSNAME)_$(VARIANTARCH)_shark/,$(TARGETS))
SUBDIRS_MINIMAL1 = $(addprefix $(OSNAME)_$(BUILDARCH)_minimal1/,$(TARGETS))
TARGETS_C2 = $(TARGETS)
TARGETS_C1 = $(addsuffix 1,$(TARGETS))
@ -206,6 +211,7 @@ TARGETS_TIERED = $(addsuffix tiered,$(TARGETS))
TARGETS_CORE = $(addsuffix core,$(TARGETS))
TARGETS_ZERO = $(addsuffix zero,$(TARGETS))
TARGETS_SHARK = $(addsuffix shark,$(TARGETS))
TARGETS_MINIMAL1 = $(addsuffix minimal1,$(TARGETS))
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH)
@ -224,6 +230,7 @@ all:
@echo " $(TARGETS_CORE)"
@echo " $(TARGETS_ZERO)"
@echo " $(TARGETS_SHARK)"
@echo " $(TARGETS_MINIMAL1)"
checks: check_os_version check_j2se_version
@ -281,6 +288,11 @@ $(SUBDIRS_SHARK): $(BUILDTREE_MAKE) platform_zero
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=shark VARIANTARCH=$(VARIANTARCH)
$(SUBDIRS_MINIMAL1): $(BUILDTREE_MAKE)
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=minimal1
platform_zero: $(GAMMADIR)/make/$(OSNAME)/platform_zero.in
$(SED) 's/@ZERO_ARCHDEF@/$(ZERO_ARCHDEF)/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@
@ -340,12 +352,22 @@ ifdef INSTALL
cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && $(MAKE) $(MFLAGS) install
endif
$(TARGETS_MINIMAL1): $(SUBDIRS_MINIMAL1)
cd $(OSNAME)_$(BUILDARCH)_minimal1/$(patsubst %minimal1,%,$@) && $(MAKE) $(MFLAGS)
ifeq ($(TEST_IN_BUILD),true)
cd $(OSNAME)_$(BUILDARCH)_minimal1/$(patsubst %minimal1,%,$@) && ./test_gamma
endif
ifdef INSTALL
cd $(OSNAME)_$(BUILDARCH)_minimal1/$(patsubst %minimal1,%,$@) && $(MAKE) $(MFLAGS) install
endif
# Just build the tree, and nothing else:
tree: $(SUBDIRS_C2)
tree1: $(SUBDIRS_C1)
treecore: $(SUBDIRS_CORE)
treezero: $(SUBDIRS_ZERO)
treeshark: $(SUBDIRS_SHARK)
treeminimal1: $(SUBDIRS_MINIMAL1)
# Doc target. This is the same for all build options.
# Hence create a docs directory beside ...$(ARCH)_[...]
@ -369,17 +391,23 @@ shark: jvmgshark productshark
clean_docs:
rm -rf $(SUBDIR_DOCS)
clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark:
clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark clean_minimal1:
rm -rf $(OSNAME)_$(BUILDARCH)_$(subst clean_,,$@)
clean: clean_compiler2 clean_compiler1 clean_core clean_zero clean_shark clean_docs
clean: clean_compiler2 clean_compiler1 clean_core clean_zero clean_shark clean_minimal1 clean_docs
include $(GAMMADIR)/make/cscope.make
#
# Include alternate Makefile if it exists.
#
-include $(HS_ALT_MAKE)/$(OSNAME)/Makefile.make
#-------------------------------------------------------------------------------
.PHONY: $(TARGETS_C2) $(TARGETS_C1) $(TARGETS_CORE) $(TARGETS_ZERO) $(TARGETS_SHARK)
.PHONY: $(TARGETS_C2) $(TARGETS_C1) $(TARGETS_CORE) $(TARGETS_ZERO) $(TARGETS_SHARK) $(TARGETS_MINIMAL1)
.PHONY: tree tree1 treecore treezero treeshark
.PHONY: all compiler1 compiler2 core zero shark
.PHONY: clean clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark docs clean_docs
.PHONY: checks check_os_version check_j2se_version
.PHONY: $(HS_ALT_MAKE)/$(OSNAME)/Makefile.make

@ -57,6 +57,7 @@
-include $(SPEC)
include $(GAMMADIR)/make/scm.make
include $(GAMMADIR)/make/defs.make
include $(GAMMADIR)/make/altsrc.make
@ -195,6 +196,8 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
sed -n '/=/s/^ */Platform_/p' < $(PLATFORM_FILE); \
echo; \
echo "GAMMADIR = $(GAMMADIR)"; \
echo "HS_ALT_MAKE = $(HS_ALT_MAKE)"; \
echo "OSNAME = $(OSNAME)"; \
echo "SYSDEFS = \$$(Platform_sysdefs)"; \
echo "SRCARCH = $(SRCARCH)"; \
echo "BUILDARCH = $(BUILDARCH)"; \
@ -262,6 +265,7 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
[ -n "$(SPEC)" ] && \
echo "include $(SPEC)"; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \
echo "include \$$(GAMMADIR)/make/excludeSrc.make"; \
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \
) > $@

@ -254,6 +254,7 @@ ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
endif
EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
EXPORT_LIST += $(EXPORT_JRE_LIB_DIR)/wb.jar
@ -281,6 +282,19 @@ ifeq ($(JVM_VARIANT_CLIENT),true)
endif
endif
ifeq ($(JVM_VARIANT_MINIMAL1),true)
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/Xusage.txt
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.$(LIBRARY_SUFFIX)
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
ifeq ($(ZIP_DEBUGINFO_FILES),1)
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.diz
else
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo
endif
endif
endif
# Serviceability Binaries
# No SA Support for PPC, IA64, ARM or zero
ADD_SA_BINARIES/x86 = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \

@ -116,11 +116,6 @@ ifdef CC_INTERP
CFLAGS += -DCC_INTERP
endif
# Build for embedded targets
ifdef JAVASE_EMBEDDED
CFLAGS += -DJAVASE_EMBEDDED
endif
# Keep temporary files (.ii, .s)
ifdef NEED_ASM
CFLAGS += -save-temps
@ -146,10 +141,23 @@ CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(ACCEPTABLE_WARNINGS)
CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))
# The flags to use for an Optimized g++ build
OPT_CFLAGS += -O3
OPT_CFLAGS/SIZE=-Os
OPT_CFLAGS/SPEED=-O3
# Hotspot uses very unstrict aliasing turn this optimization off
OPT_CFLAGS += -fno-strict-aliasing
# This option is added to CFLAGS rather than OPT_CFLAGS
# so that OPT_CFLAGS overrides get this option too.
CFLAGS += -fno-strict-aliasing
OPT_CFLAGS_DEFAULT ?= SPEED
ifdef OPT_CFLAGS
ifneq ("$(origin OPT_CFLAGS)", "command line")
$(error " Use OPT_EXTRAS instead of OPT_CFLAGS to add extra flags to OPT_CFLAGS.")
endif
endif
OPT_CFLAGS = $(OPT_CFLAGS/$(OPT_CFLAGS_DEFAULT)) $(OPT_EXTRAS)
# The gcc compiler segv's on ia64 when compiling bytecodeInterpreter.cpp
# if we use expensive-optimizations

@ -1,5 +1,5 @@
#
# Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -25,8 +25,6 @@
#
# IA64 only uses c++ based interpreter
CFLAGS += -DCC_INTERP -D_LP64=1 -DVM_LITTLE_ENDIAN
# Hotspot uses very unstrict aliasing turn this optimization off
OPT_CFLAGS += -fno-strict-aliasing
ifeq ($(VERSION),debug)
ASM_FLAGS= -DDEBUG
else

@ -0,0 +1,46 @@
#
# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
TYPE=MINIMAL1
INCLUDE_JVMTI ?= false
INCLUDE_FPROF ?= false
INCLUDE_VM_STRUCTS ?= false
INCLUDE_JNI_CHECK ?= false
INCLUDE_SERVICES ?= false
INCLUDE_MANAGEMENT ?= false
INCLUDE_ALTERNATE_GCS ?= false
INCLUDE_NMT ?= false
INCLUDE_CDS ?= false
CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"
CFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"
Src_Dirs/MINIMAL1 = $(CORE_PATHS) $(COMPILER1_PATHS)
Src_Files_EXCLUDE/MINIMAL1 += $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
-include $(HS_ALT_MAKE)/$(OSNAME)/makefiles/minimal1.make
.PHONY: $(HS_ALT_MAKE)/$(OSNAME)/makefiles/minimal1.make

@ -192,7 +192,7 @@ SHARK_SPECIFIC_FILES := shark
ZERO_SPECIFIC_FILES := zero
# Always exclude these.
Src_Files_EXCLUDE := jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
Src_Files_EXCLUDE += jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
# Exclude per type.
Src_Files_EXCLUDE/CORE := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp

@ -154,10 +154,9 @@ MAKE_ARGS += ZIPEXE=$(ZIPEXE)
# On 32 bit windows we build server, client and kernel, on 64 bit just server.
ifeq ($(JVM_VARIANTS),)
ifeq ($(ARCH_DATA_MODEL), 32)
JVM_VARIANTS:=client,server,kernel
JVM_VARIANTS:=client,server
JVM_VARIANT_CLIENT:=true
JVM_VARIANT_SERVER:=true
JVM_VARIANT_KERNEL:=true
else
JVM_VARIANTS:=server
JVM_VARIANT_SERVER:=true

@ -5870,15 +5870,6 @@ extern "C" {
}
}
// Just to get the Kernel build to link on solaris for testing.
extern "C" {
class ASGCT_CallTrace;
void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext)
KERNEL_RETURN;
}
// ObjectMonitor park-unpark infrastructure ...
//
// We implement Solaris and Linux PlatformEvents with the

@ -27,6 +27,7 @@
#include "runtime/globals.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1
#include "c1/c1_globals.hpp"
#endif
@ -157,8 +158,16 @@ enum MemoryType {
typedef unsigned short MEMFLAGS;
#if INCLUDE_NMT
extern bool NMT_track_callsite;
#else
const bool NMT_track_callsite = false;
#endif // INCLUDE_NMT
// debug build does not inline
#if defined(_DEBUG_)
#define CURRENT_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0)

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@
#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#ifndef SERVICES_KERNEL
#if INCLUDE_SERVICES
// HeapInspection
@ -129,12 +129,12 @@ class KlassInfoHisto : public StackObj {
void sort();
};
#endif // SERVICES_KERNEL
#endif // INCLUDE_SERVICES
class HeapInspection : public AllStatic {
public:
static void heap_inspection(outputStream* st, bool need_prologue) KERNEL_RETURN;
static void find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) KERNEL_RETURN;
static void heap_inspection(outputStream* st, bool need_prologue) NOT_SERVICES_RETURN;
static void find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) NOT_SERVICES_RETURN;
};
#endif // SHARE_VM_MEMORY_HEAPINSPECTION_HPP

@ -42,14 +42,8 @@
int MetaspaceShared::_max_alignment = 0;
int MetaspaceShared::max_alignment() { return _max_alignment; }
void MetaspaceShared::set_max_alignment(int alignment) { _max_alignment = alignment; }
// Accessor functions to save shared space created for metadata, which has
// extra space allocated at the end for miscellaneous data and code.
ReservedSpace* MetaspaceShared::_shared_rs = NULL;
ReservedSpace* MetaspaceShared::shared_rs() { return _shared_rs; }
void MetaspaceShared::set_shared_rs(ReservedSpace* rs) { _shared_rs = rs; }
// Read/write a data stream for restoring/preserving metadata pointers and
// miscellaneous data from/to the shared archive file.

@ -56,18 +56,33 @@ class MetaspaceShared : AllStatic {
n_regions = 4
};
static void set_max_alignment(int alignment) KERNEL_RETURN;
static int max_alignment() KERNEL_RETURN_(0);
// Accessor functions to save shared space created for metadata, which has
// extra space allocated at the end for miscellaneous data and code.
static void set_max_alignment(int alignment) {
CDS_ONLY(_max_alignment = alignment);
}
static void preload_and_dump(TRAPS) KERNEL_RETURN;
static ReservedSpace* shared_rs();
static void set_shared_rs(ReservedSpace* rs) KERNEL_RETURN;
static int max_alignment() {
CDS_ONLY(return _max_alignment);
NOT_CDS(return 0);
}
static bool map_shared_spaces(FileMapInfo* mapinfo) KERNEL_RETURN_(false);
static void initialize_shared_spaces() KERNEL_RETURN;
static void preload_and_dump(TRAPS) NOT_CDS_RETURN;
static ReservedSpace* shared_rs() {
CDS_ONLY(return _shared_rs);
NOT_CDS(return NULL);
}
static void set_shared_rs(ReservedSpace* rs) {
CDS_ONLY(_shared_rs = rs;)
}
static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false);
static void initialize_shared_spaces() NOT_CDS_RETURN;
// Return true if given address is in the mapped shared space.
static bool is_in_shared_space(const void* p) KERNEL_RETURN_(false);
static bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
static void generate_vtable_methods(void** vtbl_list,
void** vtable,
@ -79,7 +94,7 @@ class MetaspaceShared : AllStatic {
// Remap the shared readonly space to shared readwrite, private if
// sharing is enabled. Simply returns true if sharing is not enabled
// or if the remapping has already been done by a prior call.
static bool remap_shared_readonly_as_readwrite() KERNEL_RETURN_(true);
static bool remap_shared_readonly_as_readwrite() NOT_CDS_RETURN_(true);
static void print_shared_spaces();
};

@ -752,7 +752,7 @@ jint Universe::initialize_heap() {
#ifndef SERIALGC
Universe::_collectedHeap = new ParallelScavengeHeap();
#else // SERIALGC
fatal("UseParallelGC not supported in java kernel vm.");
fatal("UseParallelGC not supported in this VM.");
#endif // SERIALGC
} else if (UseG1GC) {
@ -777,7 +777,7 @@ jint Universe::initialize_heap() {
gc_policy = new ConcurrentMarkSweepPolicy();
}
#else // SERIALGC
fatal("UseConcMarkSweepGC not supported in java kernel vm.");
fatal("UseConcMarkSweepGC not supported in this VM.");
#endif // SERIALGC
} else { // default old generation
gc_policy = new MarkSweepPolicy();

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
class Forte : AllStatic {
public:
static void register_stub(const char* name, address start, address end)
KERNEL_RETURN;
NOT_JVMTI_RETURN;
// register internal VM stub
};

@ -3003,9 +3003,9 @@ JNI_ENTRY(jobject, jni_GetStaticObjectField(JNIEnv *env, jclass clazz, jfieldID
HOTSPOT_JNI_GETSTATICOBJECTFIELD_ENTRY(
env, clazz, (uintptr_t) fieldID);
#endif /* USDT2 */
#ifndef JNICHECK_KERNEL
#if INCLUDE_JNI_CHECK
DEBUG_ONLY(Klass* param_k = jniCheck::validate_class(thread, clazz);)
#endif // JNICHECK_KERNEL
#endif // INCLUDE_JNI_CHECK
JNIid* id = jfieldIDWorkaround::from_static_jfieldID(fieldID);
assert(id->is_static_field_id(), "invalid static field id");
// Keep JVMTI addition small and only check enabled flag here.
@ -3950,6 +3950,7 @@ DEFINE_SETSCALARARRAYREGION(T_DOUBLE, jdouble, Double, double
// SetNativeMethodPrefix(es) functions in the JVM TI Spec for details.
static Method* find_prefixed_native(KlassHandle k,
Symbol* name, Symbol* signature, TRAPS) {
#if INCLUDE_JVMTI
ResourceMark rm(THREAD);
Method* method;
int name_len = name->utf8_length();
@ -3981,6 +3982,7 @@ static Method* find_prefixed_native(KlassHandle k,
name_len = trial_len;
name_str = trial_name_str;
}
#endif // INCLUDE_JVMTI
return NULL; // not found
}
@ -4974,11 +4976,9 @@ void quicken_jni_functions() {
// Returns the function structure
struct JNINativeInterface_* jni_functions() {
#ifndef JNICHECK_KERNEL
#if INCLUDE_JNI_CHECK
if (CheckJNICalls) return jni_functions_check();
#else // JNICHECK_KERNEL
if (CheckJNICalls) warning("-Xcheck:jni is not supported in kernel vm.");
#endif // JNICHECK_KERNEL
#endif // INCLUDE_JNI_CHECK
return &jni_NativeInterface;
}

@ -1,6 +1,6 @@
<?xml version="1.0"?>
<!--
Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
This code is free software; you can redistribute it and/or modify it
@ -37,6 +37,8 @@
<xsl:call-template name="sourceHeader"/>
<xsl:text>
# include "precompiled.hpp"
# include "utilities/macros.hpp"
#if INCLUDE_JVMTI
# include "prims/jvmtiEnter.hpp"
# include "prims/jvmtiRawMonitor.hpp"
# include "prims/jvmtiUtil.hpp"
@ -247,6 +249,7 @@ struct jvmtiInterface_1_ jvmti</xsl:text>
<xsl:text>
};
#endif // INCLUDE_JVMTI
</xsl:text>
</xsl:template>
@ -469,7 +472,7 @@ static jvmtiError JNICALL
</xsl:text>
<xsl:if test="not(contains(@jkernel,'yes'))">
<xsl:text>&#xA;#ifdef JVMTI_KERNEL &#xA;</xsl:text>
<xsl:text>&#xA;#if !INCLUDE_JVMTI &#xA;</xsl:text>
<xsl:text> return JVMTI_ERROR_NOT_AVAILABLE; &#xA;</xsl:text>
<xsl:text>#else &#xA;</xsl:text>
</xsl:if>
@ -596,7 +599,7 @@ static jvmtiError JNICALL
</xsl:text>
<xsl:if test="not(contains(@jkernel,'yes'))">
<xsl:text>#endif // JVMTI_KERNEL&#xA;</xsl:text>
<xsl:text>#endif // INCLUDE_JVMTI&#xA;</xsl:text>
</xsl:if>
<xsl:text>}&#xA;</xsl:text>

@ -56,7 +56,9 @@ class JvmtiEnvBase : public CHeapObj<mtInternal> {
private:
#if INCLUDE_JVMTI
static JvmtiEnvBase* _head_environment; // head of environment list
#endif // INCLUDE_JVMTI
static bool _globally_initialized;
static jvmtiPhase _phase;
@ -129,7 +131,10 @@ class JvmtiEnvBase : public CHeapObj<mtInternal> {
friend class JvmtiEnvIterator;
JvmtiEnv* next_environment() { return (JvmtiEnv*)_next; }
void set_next_environment(JvmtiEnvBase* env) { _next = env; }
static JvmtiEnv* head_environment() { return (JvmtiEnv*)_head_environment; }
static JvmtiEnv* head_environment() {
JVMTI_ONLY(return (JvmtiEnv*)_head_environment);
NOT_JVMTI(return NULL);
}
public:

@ -46,21 +46,18 @@ class JvmtiEnv;
class JvmtiThreadState;
class AttachOperation;
#ifndef JVMTI_KERNEL
#define JVMTI_SUPPORT_FLAG(key) \
private: \
static bool _##key; \
public: \
inline static void set_##key(bool on) { _##key = (on != 0); } \
inline static bool key() { return _##key; }
#else // JVMTI_KERNEL
#define JVMTI_SUPPORT_FLAG(key) \
private: \
const static bool _##key = false; \
static bool _##key; \
public: \
inline static void set_##key(bool on) { report_unsupported(on); } \
inline static bool key() { return _##key; }
#endif // JVMTI_KERNEL
inline static void set_##key(bool on) { \
JVMTI_ONLY(_##key = (on != 0)); \
NOT_JVMTI(report_unsupported(on)); \
} \
inline static bool key() { \
JVMTI_ONLY(return _##key); \
NOT_JVMTI(return false); \
}
// This class contains the JVMTI interface for the rest of hotspot.
@ -68,6 +65,8 @@ class AttachOperation;
class JvmtiExport : public AllStatic {
friend class VMStructs;
private:
#if INCLUDE_JVMTI
static int _field_access_count;
static int _field_modification_count;
@ -75,6 +74,7 @@ class JvmtiExport : public AllStatic {
static bool _can_hotswap_or_post_breakpoint;
static bool _can_modify_any_class;
static bool _can_walk_any_space;
#endif // INCLUDE_JVMTI
JVMTI_SUPPORT_FLAG(can_get_source_debug_extension)
JVMTI_SUPPORT_FLAG(can_maintain_original_method_order)
@ -125,10 +125,18 @@ class JvmtiExport : public AllStatic {
// these should only be called by the friend class
friend class JvmtiManageCapabilities;
inline static void set_can_modify_any_class(bool on) { _can_modify_any_class = (on != 0); }
inline static void set_can_access_local_variables(bool on) { _can_access_local_variables = (on != 0); }
inline static void set_can_hotswap_or_post_breakpoint(bool on) { _can_hotswap_or_post_breakpoint = (on != 0); }
inline static void set_can_walk_any_space(bool on) { _can_walk_any_space = (on != 0); }
inline static void set_can_modify_any_class(bool on) {
JVMTI_ONLY(_can_modify_any_class = (on != 0);)
}
inline static void set_can_access_local_variables(bool on) {
JVMTI_ONLY(_can_access_local_variables = (on != 0);)
}
inline static void set_can_hotswap_or_post_breakpoint(bool on) {
JVMTI_ONLY(_can_hotswap_or_post_breakpoint = (on != 0);)
}
inline static void set_can_walk_any_space(bool on) {
JVMTI_ONLY(_can_walk_any_space = (on != 0);)
}
enum {
JVMTI_VERSION_MASK = 0x70000000,
@ -144,7 +152,7 @@ class JvmtiExport : public AllStatic {
// posts a DynamicCodeGenerated event (internal/private implementation).
// The public post_dynamic_code_generated* functions make use of the
// internal implementation. Also called from JvmtiDeferredEvent::post()
static void post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) KERNEL_RETURN;
static void post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) NOT_JVMTI_RETURN;
private:
@ -154,9 +162,9 @@ class JvmtiExport : public AllStatic {
static void post_compiled_method_load(JvmtiEnv* env, const jmethodID method, const jint length,
const void *code_begin, const jint map_length,
const jvmtiAddrLocationMap* map) KERNEL_RETURN;
const jvmtiAddrLocationMap* map) NOT_JVMTI_RETURN;
static void post_dynamic_code_generated(JvmtiEnv* env, const char *name, const void *code_begin,
const void *code_end) KERNEL_RETURN;
const void *code_end) NOT_JVMTI_RETURN;
// The RedefineClasses() API breaks some invariants in the "regular"
// system. For example, there are sanity checks when GC'ing nmethods
@ -178,9 +186,8 @@ class JvmtiExport : public AllStatic {
static bool _has_redefined_a_class;
friend class VM_RedefineClasses;
inline static void set_has_redefined_a_class() {
_has_redefined_a_class = true;
JVMTI_ONLY(_has_redefined_a_class = true;)
}
// Flag to indicate if the compiler has recorded all dependencies. When the
// can_redefine_classes capability is enabled in the OnLoad phase then the compiler
// records all dependencies from startup. However if the capability is first
@ -191,7 +198,8 @@ class JvmtiExport : public AllStatic {
public:
inline static bool has_redefined_a_class() {
return _has_redefined_a_class;
JVMTI_ONLY(return _has_redefined_a_class);
NOT_JVMTI(return false);
}
inline static bool all_dependencies_are_recorded() {
@ -204,120 +212,141 @@ class JvmtiExport : public AllStatic {
// let JVMTI know that the JVM_OnLoad code is running
static void enter_onload_phase();
static void enter_onload_phase() NOT_JVMTI_RETURN;
// let JVMTI know that the VM isn't up yet (and JVM_OnLoad code isn't running)
static void enter_primordial_phase();
static void enter_primordial_phase() NOT_JVMTI_RETURN;
// let JVMTI know that the VM isn't up yet but JNI is live
static void enter_start_phase();
static void enter_start_phase() NOT_JVMTI_RETURN;
// let JVMTI know that the VM is fully up and running now
static void enter_live_phase();
static void enter_live_phase() NOT_JVMTI_RETURN;
// ------ can_* conditions (below) are set at OnLoad and never changed ------------
inline static bool can_modify_any_class() { return _can_modify_any_class; }
inline static bool can_access_local_variables() { return _can_access_local_variables; }
inline static bool can_hotswap_or_post_breakpoint() { return _can_hotswap_or_post_breakpoint; }
inline static bool can_walk_any_space() { return _can_walk_any_space; }
inline static bool can_modify_any_class() {
JVMTI_ONLY(return _can_modify_any_class);
NOT_JVMTI(return false);
}
inline static bool can_access_local_variables() {
JVMTI_ONLY(return _can_access_local_variables);
NOT_JVMTI(return false);
}
inline static bool can_hotswap_or_post_breakpoint() {
JVMTI_ONLY(return _can_hotswap_or_post_breakpoint);
NOT_JVMTI(return false);
}
inline static bool can_walk_any_space() {
JVMTI_ONLY(return _can_walk_any_space);
NOT_JVMTI(return false);
}
// field access management
static address get_field_access_count_addr();
static address get_field_access_count_addr() NOT_JVMTI_RETURN_(0);
// field modification management
static address get_field_modification_count_addr();
static address get_field_modification_count_addr() NOT_JVMTI_RETURN_(0);
// -----------------
static bool is_jvmti_version(jint version) { return (version & JVMTI_VERSION_MASK) == JVMTI_VERSION_VALUE; }
static bool is_jvmdi_version(jint version) { return (version & JVMTI_VERSION_MASK) == JVMDI_VERSION_VALUE; }
static jint get_jvmti_interface(JavaVM *jvm, void **penv, jint version);
static bool is_jvmti_version(jint version) {
JVMTI_ONLY(return (version & JVMTI_VERSION_MASK) == JVMTI_VERSION_VALUE);
NOT_JVMTI(return false);
}
static bool is_jvmdi_version(jint version) {
JVMTI_ONLY(return (version & JVMTI_VERSION_MASK) == JVMDI_VERSION_VALUE);
NOT_JVMTI(return false);
}
static jint get_jvmti_interface(JavaVM *jvm, void **penv, jint version) NOT_JVMTI_RETURN_(0);
static void decode_version_values(jint version, int * major, int * minor,
int * micro);
int * micro) NOT_JVMTI_RETURN;
// single stepping management methods
static void at_single_stepping_point(JavaThread *thread, Method* method, address location) KERNEL_RETURN;
static void expose_single_stepping(JavaThread *thread) KERNEL_RETURN;
static bool hide_single_stepping(JavaThread *thread) KERNEL_RETURN_(false);
static void at_single_stepping_point(JavaThread *thread, Method* method, address location) NOT_JVMTI_RETURN;
static void expose_single_stepping(JavaThread *thread) NOT_JVMTI_RETURN;
static bool hide_single_stepping(JavaThread *thread) NOT_JVMTI_RETURN_(false);
// Methods that notify the debugger that something interesting has happened in the VM.
static void post_vm_start ();
static void post_vm_initialized ();
static void post_vm_death ();
static void post_vm_start () NOT_JVMTI_RETURN;
static void post_vm_initialized () NOT_JVMTI_RETURN;
static void post_vm_death () NOT_JVMTI_RETURN;
static void post_single_step (JavaThread *thread, Method* method, address location) KERNEL_RETURN;
static void post_raw_breakpoint (JavaThread *thread, Method* method, address location) KERNEL_RETURN;
static void post_single_step (JavaThread *thread, Method* method, address location) NOT_JVMTI_RETURN;
static void post_raw_breakpoint (JavaThread *thread, Method* method, address location) NOT_JVMTI_RETURN;
static void post_exception_throw (JavaThread *thread, Method* method, address location, oop exception) KERNEL_RETURN;
static void notice_unwind_due_to_exception (JavaThread *thread, Method* method, address location, oop exception, bool in_handler_frame) KERNEL_RETURN;
static void post_exception_throw (JavaThread *thread, Method* method, address location, oop exception) NOT_JVMTI_RETURN;
static void notice_unwind_due_to_exception (JavaThread *thread, Method* method, address location, oop exception, bool in_handler_frame) NOT_JVMTI_RETURN;
static oop jni_GetField_probe (JavaThread *thread, jobject jobj,
oop obj, Klass* klass, jfieldID fieldID, bool is_static)
KERNEL_RETURN_(NULL);
NOT_JVMTI_RETURN_(NULL);
static oop jni_GetField_probe_nh (JavaThread *thread, jobject jobj,
oop obj, Klass* klass, jfieldID fieldID, bool is_static)
KERNEL_RETURN_(NULL);
NOT_JVMTI_RETURN_(NULL);
static void post_field_access_by_jni (JavaThread *thread, oop obj,
Klass* klass, jfieldID fieldID, bool is_static) KERNEL_RETURN;
Klass* klass, jfieldID fieldID, bool is_static) NOT_JVMTI_RETURN;
static void post_field_access (JavaThread *thread, Method* method,
address location, KlassHandle field_klass, Handle object, jfieldID field) KERNEL_RETURN;
address location, KlassHandle field_klass, Handle object, jfieldID field) NOT_JVMTI_RETURN;
static oop jni_SetField_probe (JavaThread *thread, jobject jobj,
oop obj, Klass* klass, jfieldID fieldID, bool is_static, char sig_type,
jvalue *value) KERNEL_RETURN_(NULL);
jvalue *value) NOT_JVMTI_RETURN_(NULL);
static oop jni_SetField_probe_nh (JavaThread *thread, jobject jobj,
oop obj, Klass* klass, jfieldID fieldID, bool is_static, char sig_type,
jvalue *value) KERNEL_RETURN_(NULL);
jvalue *value) NOT_JVMTI_RETURN_(NULL);
static void post_field_modification_by_jni(JavaThread *thread, oop obj,
Klass* klass, jfieldID fieldID, bool is_static, char sig_type,
jvalue *value);
static void post_raw_field_modification(JavaThread *thread, Method* method,
address location, KlassHandle field_klass, Handle object, jfieldID field,
char sig_type, jvalue *value) KERNEL_RETURN;
char sig_type, jvalue *value) NOT_JVMTI_RETURN;
static void post_method_entry (JavaThread *thread, Method* method, frame current_frame) KERNEL_RETURN;
static void post_method_exit (JavaThread *thread, Method* method, frame current_frame) KERNEL_RETURN;
static void post_method_entry (JavaThread *thread, Method* method, frame current_frame) NOT_JVMTI_RETURN;
static void post_method_exit (JavaThread *thread, Method* method, frame current_frame) NOT_JVMTI_RETURN;
static void post_class_load (JavaThread *thread, Klass* klass) KERNEL_RETURN;
static void post_class_unload (Klass* klass) KERNEL_RETURN;
static void post_class_prepare (JavaThread *thread, Klass* klass) KERNEL_RETURN;
static void post_class_load (JavaThread *thread, Klass* klass) NOT_JVMTI_RETURN;
static void post_class_unload (Klass* klass) NOT_JVMTI_RETURN;
static void post_class_prepare (JavaThread *thread, Klass* klass) NOT_JVMTI_RETURN;
static void post_thread_start (JavaThread *thread) KERNEL_RETURN;
static void post_thread_end (JavaThread *thread) KERNEL_RETURN;
static void post_thread_start (JavaThread *thread) NOT_JVMTI_RETURN;
static void post_thread_end (JavaThread *thread) NOT_JVMTI_RETURN;
// Support for java.lang.instrument agent loading.
static bool _should_post_class_file_load_hook;
inline static void set_should_post_class_file_load_hook(bool on) { _should_post_class_file_load_hook = on; }
inline static bool should_post_class_file_load_hook() { return _should_post_class_file_load_hook; }
inline static bool should_post_class_file_load_hook() {
JVMTI_ONLY(return _should_post_class_file_load_hook);
NOT_JVMTI(return false;)
}
static void post_class_file_load_hook(Symbol* h_name, Handle class_loader,
Handle h_protection_domain,
unsigned char **data_ptr, unsigned char **end_ptr,
unsigned char **cached_data_ptr,
jint *cached_length_ptr);
static void post_native_method_bind(Method* method, address* function_ptr) KERNEL_RETURN;
static void post_compiled_method_load(nmethod *nm) KERNEL_RETURN;
static void post_dynamic_code_generated(const char *name, const void *code_begin, const void *code_end) KERNEL_RETURN;
jint *cached_length_ptr) NOT_JVMTI_RETURN;
static void post_native_method_bind(Method* method, address* function_ptr) NOT_JVMTI_RETURN;
static void post_compiled_method_load(nmethod *nm) NOT_JVMTI_RETURN;
static void post_dynamic_code_generated(const char *name, const void *code_begin, const void *code_end) NOT_JVMTI_RETURN;
// used to post a CompiledMethodUnload event
static void post_compiled_method_unload(jmethodID mid, const void *code_begin) KERNEL_RETURN;
static void post_compiled_method_unload(jmethodID mid, const void *code_begin) NOT_JVMTI_RETURN;
// similiar to post_dynamic_code_generated except that it can be used to
// post a DynamicCodeGenerated event while holding locks in the VM. Any event
// posted using this function is recorded by the enclosing event collector
// -- JvmtiDynamicCodeEventCollector.
static void post_dynamic_code_generated_while_holding_locks(const char* name, address code_begin, address code_end) KERNEL_RETURN;
static void post_dynamic_code_generated_while_holding_locks(const char* name, address code_begin, address code_end) NOT_JVMTI_RETURN;
static void post_garbage_collection_finish() KERNEL_RETURN;
static void post_garbage_collection_start() KERNEL_RETURN;
static void post_data_dump() KERNEL_RETURN;
static void post_monitor_contended_enter(JavaThread *thread, ObjectMonitor *obj_mntr) KERNEL_RETURN;
static void post_monitor_contended_entered(JavaThread *thread, ObjectMonitor *obj_mntr) KERNEL_RETURN;
static void post_monitor_wait(JavaThread *thread, oop obj, jlong timeout) KERNEL_RETURN;
static void post_monitor_waited(JavaThread *thread, ObjectMonitor *obj_mntr, jboolean timed_out) KERNEL_RETURN;
static void post_object_free(JvmtiEnv* env, jlong tag) KERNEL_RETURN;
static void post_resource_exhausted(jint resource_exhausted_flags, const char* detail) KERNEL_RETURN;
static void record_vm_internal_object_allocation(oop object) KERNEL_RETURN;
static void post_garbage_collection_finish() NOT_JVMTI_RETURN;
static void post_garbage_collection_start() NOT_JVMTI_RETURN;
static void post_data_dump() NOT_JVMTI_RETURN;
static void post_monitor_contended_enter(JavaThread *thread, ObjectMonitor *obj_mntr) NOT_JVMTI_RETURN;
static void post_monitor_contended_entered(JavaThread *thread, ObjectMonitor *obj_mntr) NOT_JVMTI_RETURN;
static void post_monitor_wait(JavaThread *thread, oop obj, jlong timeout) NOT_JVMTI_RETURN;
static void post_monitor_waited(JavaThread *thread, ObjectMonitor *obj_mntr, jboolean timed_out) NOT_JVMTI_RETURN;
static void post_object_free(JvmtiEnv* env, jlong tag) NOT_JVMTI_RETURN;
static void post_resource_exhausted(jint resource_exhausted_flags, const char* detail) NOT_JVMTI_RETURN;
static void record_vm_internal_object_allocation(oop object) NOT_JVMTI_RETURN;
// Post objects collected by vm_object_alloc_event_collector.
static void post_vm_object_alloc(JavaThread *thread, oop object) KERNEL_RETURN;
static void post_vm_object_alloc(JavaThread *thread, oop object) NOT_JVMTI_RETURN;
// Collects vm internal objects for later event posting.
inline static void vm_object_alloc_event_collector(oop object) {
if (should_post_vm_object_alloc()) {
@ -331,21 +360,19 @@ class JvmtiExport : public AllStatic {
}
}
static void cleanup_thread (JavaThread* thread) KERNEL_RETURN;
static void cleanup_thread (JavaThread* thread) NOT_JVMTI_RETURN;
static void oops_do(OopClosure* f) KERNEL_RETURN;
static void weak_oops_do(BoolObjectClosure* b, OopClosure* f) KERNEL_RETURN;
static void gc_epilogue() KERNEL_RETURN;
static void oops_do(OopClosure* f) NOT_JVMTI_RETURN;
static void weak_oops_do(BoolObjectClosure* b, OopClosure* f) NOT_JVMTI_RETURN;
static void gc_epilogue() NOT_JVMTI_RETURN;
static void transition_pending_onload_raw_monitors() KERNEL_RETURN;
static void transition_pending_onload_raw_monitors() NOT_JVMTI_RETURN;
#ifndef SERVICES_KERNEL
// attach support
static jint load_agent_library(AttachOperation* op, outputStream* out);
#endif // SERVICES_KERNEL
static jint load_agent_library(AttachOperation* op, outputStream* out) NOT_JVMTI_RETURN_(JNI_ERR);
// SetNativeMethodPrefix support
static char** get_all_native_method_prefixes(int* count_ptr);
static char** get_all_native_method_prefixes(int* count_ptr) NOT_JVMTI_RETURN_(NULL);
};
// Support class used by JvmtiDynamicCodeEventCollector and others. It
@ -408,8 +435,8 @@ class JvmtiDynamicCodeEventCollector : public JvmtiEventCollector {
void register_stub(const char* name, address start, address end);
public:
JvmtiDynamicCodeEventCollector() KERNEL_RETURN;
~JvmtiDynamicCodeEventCollector() KERNEL_RETURN;
JvmtiDynamicCodeEventCollector() NOT_JVMTI_RETURN;
~JvmtiDynamicCodeEventCollector() NOT_JVMTI_RETURN;
bool is_dynamic_code_event() { return true; }
};
@ -441,8 +468,8 @@ class JvmtiVMObjectAllocEventCollector : public JvmtiEventCollector {
static void oops_do_for_all_threads(OopClosure* f);
public:
JvmtiVMObjectAllocEventCollector() KERNEL_RETURN;
~JvmtiVMObjectAllocEventCollector() KERNEL_RETURN;
JvmtiVMObjectAllocEventCollector() NOT_JVMTI_RETURN;
~JvmtiVMObjectAllocEventCollector() NOT_JVMTI_RETURN;
bool is_vm_object_alloc_event() { return true; }
bool is_enabled() { return _enable; }
@ -472,16 +499,16 @@ class NoJvmtiVMObjectAllocMark : public StackObj {
bool was_enabled() { return _collector != NULL; }
public:
NoJvmtiVMObjectAllocMark() KERNEL_RETURN;
~NoJvmtiVMObjectAllocMark() KERNEL_RETURN;
NoJvmtiVMObjectAllocMark() NOT_JVMTI_RETURN;
~NoJvmtiVMObjectAllocMark() NOT_JVMTI_RETURN;
};
// Base class for reporting GC events to JVMTI.
class JvmtiGCMarker : public StackObj {
public:
JvmtiGCMarker() KERNEL_RETURN;
~JvmtiGCMarker() KERNEL_RETURN;
JvmtiGCMarker() NOT_JVMTI_RETURN;
~JvmtiGCMarker() NOT_JVMTI_RETURN;
};
// JvmtiHideSingleStepping is a helper class for hiding

@ -481,15 +481,15 @@ class JvmtiDeferredEvent VALUE_OBJ_CLASS_SPEC {
// Factory methods
static JvmtiDeferredEvent compiled_method_load_event(nmethod* nm)
KERNEL_RETURN_(JvmtiDeferredEvent());
NOT_JVMTI_RETURN_(JvmtiDeferredEvent());
static JvmtiDeferredEvent compiled_method_unload_event(nmethod* nm,
jmethodID id, const void* code) KERNEL_RETURN_(JvmtiDeferredEvent());
jmethodID id, const void* code) NOT_JVMTI_RETURN_(JvmtiDeferredEvent());
static JvmtiDeferredEvent dynamic_code_generated_event(
const char* name, const void* begin, const void* end)
KERNEL_RETURN_(JvmtiDeferredEvent());
NOT_JVMTI_RETURN_(JvmtiDeferredEvent());
// Actually posts the event.
void post() KERNEL_RETURN;
void post() NOT_JVMTI_RETURN;
};
/**
@ -520,13 +520,13 @@ class JvmtiDeferredEventQueue : AllStatic {
static volatile QueueNode* _pending_list; // Uses CAS for read/update
// Transfers events from the _pending_list to the _queue.
static void process_pending_events() KERNEL_RETURN;
static void process_pending_events() NOT_JVMTI_RETURN;
public:
// Must be holding Service_lock when calling these
static bool has_events() KERNEL_RETURN_(false);
static void enqueue(const JvmtiDeferredEvent& event) KERNEL_RETURN;
static JvmtiDeferredEvent dequeue() KERNEL_RETURN_(JvmtiDeferredEvent());
static bool has_events() NOT_JVMTI_RETURN_(false);
static void enqueue(const JvmtiDeferredEvent& event) NOT_JVMTI_RETURN;
static JvmtiDeferredEvent dequeue() NOT_JVMTI_RETURN_(JvmtiDeferredEvent());
// Used to enqueue events without using a lock, for times (such as during
// safepoint) when we can't or don't want to lock the Service_lock.
@ -534,7 +534,7 @@ class JvmtiDeferredEventQueue : AllStatic {
// Events will be held off to the side until there's a call to
// dequeue(), enqueue(), or process_pending_events() (all of which require
// the holding of the Service_lock), and will be enqueued at that time.
static void add_pending_event(const JvmtiDeferredEvent&) KERNEL_RETURN;
static void add_pending_event(const JvmtiDeferredEvent&) NOT_JVMTI_RETURN;
};
// Utility macro that checks for NULL pointers:

@ -495,9 +495,9 @@ class VM_RedefineClasses: public VM_Operation {
class MetadataOnStackMark : public StackObj {
NOT_PRODUCT(static bool _is_active;)
public:
MetadataOnStackMark();
~MetadataOnStackMark();
static void record(Metadata* m);
MetadataOnStackMark() NOT_JVMTI_RETURN;
~MetadataOnStackMark() NOT_JVMTI_RETURN;
static void record(Metadata* m) NOT_JVMTI_RETURN;
};
#endif // SHARE_VM_PRIMS_JVMTIREDEFINECLASSES_HPP

@ -2862,9 +2862,8 @@ inline bool VM_HeapWalkOperation::iterate_over_class(oop java_class) {
oop entry;
if (tag.is_string()) {
entry = pool->resolved_string_at(i);
// If the entry is non-null it it resolved.
// If the entry is non-null it is resolved.
if (entry == NULL) continue;
assert(java_lang_String::is_instance(entry), "must be string");
} else {
entry = Klass::cast(pool->resolved_klass_at(i))->java_mirror();
}

@ -125,7 +125,7 @@ class JvmtiTagMap : public CHeapObj<mtInternal> {
jlong** tag_result_ptr);
static void weak_oops_do(
BoolObjectClosure* is_alive, OopClosure* f) KERNEL_RETURN;
BoolObjectClosure* is_alive, OopClosure* f) NOT_JVMTI_RETURN;
};
#endif // SHARE_VM_PRIMS_JVMTITAGMAP_HPP

@ -391,7 +391,7 @@ class JvmtiThreadState : public CHeapObj<mtInternal> {
static ByteSize earlyret_oop_offset() { return byte_offset_of(JvmtiThreadState, _earlyret_oop); }
static ByteSize earlyret_value_offset() { return byte_offset_of(JvmtiThreadState, _earlyret_value); }
void oops_do(OopClosure* f); // GC support
void oops_do(OopClosure* f) NOT_JVMTI_RETURN; // GC support
public:
void set_should_post_on_exceptions(bool val) { _thread->set_should_post_on_exceptions_flag(val ? JNI_TRUE : JNI_FALSE); }

@ -328,6 +328,7 @@ address NativeLookup::lookup_critical_entry(methodHandle method) {
// native implementation again.
// See SetNativeMethodPrefix in the JVM TI Spec for more details.
address NativeLookup::lookup_entry_prefixed(methodHandle method, bool& in_base_library, TRAPS) {
#if INCLUDE_JVMTI
ResourceMark rm(THREAD);
int prefix_count;
@ -358,6 +359,7 @@ address NativeLookup::lookup_entry_prefixed(methodHandle method, bool& in_base_l
}
}
}
#endif // INCLUDE_JVMTI
return NULL;
}

@ -1066,7 +1066,7 @@ void Arguments::set_tiered_flags() {
}
}
#ifndef KERNEL
#if INCLUDE_ALTERNATE_GCS
static void disable_adaptive_size_policy(const char* collector_name) {
if (UseAdaptiveSizePolicy) {
if (FLAG_IS_CMDLINE(UseAdaptiveSizePolicy)) {
@ -1141,7 +1141,7 @@ void Arguments::set_cms_and_parnew_gc_flags() {
FLAG_SET_ERGO(bool, UseParNewGC, true);
}
// Turn off AdaptiveSizePolicy for CMS until it is complete.
// Turn off AdaptiveSizePolicy by default for cms until it is complete.
disable_adaptive_size_policy("UseConcMarkSweepGC");
// In either case, adjust ParallelGCThreads and/or UseParNewGC
@ -1283,7 +1283,7 @@ void Arguments::set_cms_and_parnew_gc_flags() {
tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
}
}
#endif // KERNEL
#endif // INCLUDE_ALTERNATE_GCS
void set_object_alignment() {
// Object alignment.
@ -1300,10 +1300,10 @@ void set_object_alignment() {
// Oop encoding heap max
OopEncodingHeapMax = (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
#ifndef KERNEL
#if INCLUDE_ALTERNATE_GCS
// Set CMS global values
CompactibleFreeListSpace::set_cms_values();
#endif // KERNEL
#endif // INCLUDE_ALTERNATE_GCS
}
bool verify_object_alignment() {
@ -1991,9 +1991,15 @@ bool Arguments::check_vm_args_consistency() {
}
#endif // SPARC
if (PrintNMTStatistics && MemTracker::tracking_level() == MemTracker::NMT_off) {
warning("PrintNMTStatistics is disabled, because native memory tracking is not enabled");
PrintNMTStatistics = false;
if (PrintNMTStatistics) {
#if INCLUDE_NMT
if (MemTracker::tracking_level() == MemTracker::NMT_off) {
#endif // INCLUDE_NMT
warning("PrintNMTStatistics is disabled, because native memory tracking is not enabled");
PrintNMTStatistics = false;
#if INCLUDE_NMT
}
#endif
}
return status;
@ -2220,12 +2226,12 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
size_t len2 = strlen(pos+1) + 1; // options start after ':'. Final zero must be copied.
options = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len2, mtInternal), pos+1, len2);
}
#ifdef JVMTI_KERNEL
#if !INCLUDE_JVMTI
if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) {
warning("profiling and debugging agents are not supported with Kernel VM");
warning("profiling and debugging agents are not supported in this VM");
} else
#endif // JVMTI_KERNEL
add_init_library(name, options);
#endif // !INCLUDE_JVMTI
add_init_library(name, options);
}
// -agentlib and -agentpath
} else if (match_option(option, "-agentlib:", &tail) ||
@ -2240,20 +2246,24 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
if(pos != NULL) {
options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(pos + 1) + 1, mtInternal), pos + 1);
}
#ifdef JVMTI_KERNEL
#if !INCLUDE_JVMTI
if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) {
warning("profiling and debugging agents are not supported with Kernel VM");
warning("profiling and debugging agents are not supported in this VM");
} else
#endif // JVMTI_KERNEL
#endif // !INCLUDE_JVMTI
add_init_agent(name, options, is_absolute_path);
}
// -javaagent
} else if (match_option(option, "-javaagent:", &tail)) {
#if !INCLUDE_JVMTI
warning("Instrumentation agents are not supported in this VM");
#else
if(tail != NULL) {
char *options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(tail) + 1, mtInternal), tail);
add_init_agent("instrument", options, false);
}
#endif // !INCLUDE_JVMTI
// -Xnoclassgc
} else if (match_option(option, "-Xnoclassgc", &tail)) {
FLAG_SET_CMDLINE(bool, ClassUnloading, false);
@ -2385,12 +2395,12 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
// EVM option, ignore silently for compatibility
// -Xprof
} else if (match_option(option, "-Xprof", &tail)) {
#ifndef FPROF_KERNEL
#if INCLUDE_FPROF
_has_profile = true;
#else // FPROF_KERNEL
#else // INCLUDE_FPROF
// do we have to exit?
warning("Kernel VM does not support flat profiling.");
#endif // FPROF_KERNEL
warning("Flat profiling is not supported in this VM.");
#endif // INCLUDE_FPROF
// -Xaprof
} else if (match_option(option, "-Xaprof", &tail)) {
_has_alloc_profile = true;
@ -2438,6 +2448,9 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
#if defined(KERNEL)
vm_exit_during_initialization(
"Dumping a shared archive is not supported on the Kernel JVM.", NULL);
#elif !INCLUDE_CDS
vm_exit_during_initialization(
"Dumping a shared archive is not supported in this VM.", NULL);
#else
FLAG_SET_CMDLINE(bool, DumpSharedSpaces, true);
set_mode_flags(_int); // Prevent compilation, which creates objects
@ -2490,7 +2503,11 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
// JNI hooks
} else if (match_option(option, "-Xcheck", &tail)) {
if (!strcmp(tail, ":jni")) {
#if !INCLUDE_JNI_CHECK
warning("JNI CHECKING is not supported in this VM");
#else
CheckJNICalls = true;
#endif // INCLUDE_JNI_CHECK
} else if (is_bad_option(option, args->ignoreUnrecognized,
"check")) {
return JNI_EINVAL;
@ -3045,7 +3062,11 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
vm_exit(0);
}
if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
#if INCLUDE_NMT
MemTracker::init_tracking_options(tail);
#else
warning("Native Memory Tracking is not supported in this VM");
#endif
}
@ -3108,6 +3129,21 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
UNSUPPORTED_OPTION(UseG1GC, "G1 GC");
#endif
#if !INCLUDE_ALTERNATE_GCS
if (UseParallelGC) {
warning("Parallel GC is not supported in this VM. Using Serial GC.");
}
if (UseParallelOldGC) {
warning("Parallel Old GC is not supported in this VM. Using Serial GC.");
}
if (UseConcMarkSweepGC) {
warning("Concurrent Mark Sweep GC is not supported in this VM. Using Serial GC.");
}
if (UseParNewGC) {
warning("Par New GC is not supported in this VM. Using Serial GC.");
}
#endif // INCLUDE_ALTERNATE_GCS
#ifndef PRODUCT
if (TraceBytecodesAt != 0) {
TraceBytecodes = true;
@ -3156,9 +3192,9 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
#ifdef SERIALGC
force_serial_gc();
#endif // SERIALGC
#ifdef KERNEL
#if !INCLUDE_CDS
no_shared_spaces();
#endif // KERNEL
#endif // INCLUDE_CDS
// Set flags based on ergonomics.
set_ergonomics_flags();
@ -3180,9 +3216,10 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
}
}
#ifndef KERNEL
// Set heap size based on available physical memory
set_heap_size();
#if INCLUDE_ALTERNATE_GCS
// Set per-collector flags
if (UseParallelGC || UseParallelOldGC) {
set_parallel_gc_flags();
@ -3193,7 +3230,7 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
} else if (UseG1GC) {
set_g1_gc_flags();
}
#endif // KERNEL
#endif // INCLUDE_ALTERNATE_GCS
#ifdef SERIALGC
assert(verify_serial_gc_flags(), "SerialGC unset");

@ -65,15 +65,15 @@ public:
// For now, the only thread-specific region is the class loader.
enum Region { noRegion, classLoaderRegion, extraRegion, maxRegion };
ThreadProfilerMark(Region) KERNEL_RETURN;
~ThreadProfilerMark() KERNEL_RETURN;
ThreadProfilerMark(Region) NOT_FPROF_RETURN;
~ThreadProfilerMark() NOT_FPROF_RETURN;
private:
ThreadProfiler* _pp;
Region _r;
};
#ifndef FPROF_KERNEL
#if INCLUDE_FPROF
class IntervalData VALUE_OBJ_CLASS_SPEC {
// Just to keep these things all together
@ -119,29 +119,29 @@ public:
static void print_header(outputStream* st);
void print_data(outputStream* st);
};
#endif // FPROF_KERNEL
#endif // INCLUDE_FPROF
class ThreadProfiler: public CHeapObj<mtInternal> {
public:
ThreadProfiler() KERNEL_RETURN;
~ThreadProfiler() KERNEL_RETURN;
ThreadProfiler() NOT_FPROF_RETURN;
~ThreadProfiler() NOT_FPROF_RETURN;
// Resets the profiler
void reset() KERNEL_RETURN;
void reset() NOT_FPROF_RETURN;
// Activates the profiler for a certain thread
void engage() KERNEL_RETURN;
void engage() NOT_FPROF_RETURN;
// Deactivates the profiler
void disengage() KERNEL_RETURN;
void disengage() NOT_FPROF_RETURN;
// Prints the collected profiling information
void print(const char* thread_name) KERNEL_RETURN;
void print(const char* thread_name) NOT_FPROF_RETURN;
// Garbage Collection Support
void oops_do(OopClosure* f) KERNEL_RETURN;
void oops_do(OopClosure* f) NOT_FPROF_RETURN;
#ifndef FPROF_KERNEL
#if INCLUDE_FPROF
private:
// for recording ticks.
friend class ProfilerNode;
@ -225,39 +225,39 @@ private:
IntervalData* interval_data_ref() {
return &_interval_data;
}
#endif // FPROF_KERNEL
#endif // INCLUDE_FPROF
};
class FlatProfiler: AllStatic {
public:
static void reset() KERNEL_RETURN ;
static void engage(JavaThread* mainThread, bool fullProfile) KERNEL_RETURN ;
static void disengage() KERNEL_RETURN ;
static void print(int unused) KERNEL_RETURN ;
static bool is_active() KERNEL_RETURN_(false) ;
static void reset() NOT_FPROF_RETURN ;
static void engage(JavaThread* mainThread, bool fullProfile) NOT_FPROF_RETURN ;
static void disengage() NOT_FPROF_RETURN ;
static void print(int unused) NOT_FPROF_RETURN ;
static bool is_active() NOT_FPROF_RETURN_(false) ;
// This is NULL if each thread has its own thread profiler,
// else this is the single thread profiler used by all threads.
// In particular it makes a difference during garbage collection,
// where you only want to traverse each thread profiler once.
static ThreadProfiler* get_thread_profiler() KERNEL_RETURN_(NULL);
static ThreadProfiler* get_thread_profiler() NOT_FPROF_RETURN_(NULL);
// Garbage Collection Support
static void oops_do(OopClosure* f) KERNEL_RETURN ;
static void oops_do(OopClosure* f) NOT_FPROF_RETURN ;
// Support for disassembler to inspect the PCRecorder
// Returns the start address for a given pc
// NULL is returned if the PCRecorder is inactive
static address bucket_start_for(address pc) KERNEL_RETURN_(NULL);
static address bucket_start_for(address pc) NOT_FPROF_RETURN_(NULL);
enum { MillisecsPerTick = 10 }; // ms per profiling ticks
// Returns the number of ticks recorded for the bucket
// pc belongs to.
static int bucket_count_for(address pc) KERNEL_RETURN_(0);
static int bucket_count_for(address pc) NOT_FPROF_RETURN_(0);
#ifndef FPROF_KERNEL
#if INCLUDE_FPROF
private:
static bool full_profile() {
@ -324,7 +324,7 @@ public:
static void interval_reset(); // reset interval data.
enum {interval_print_size = 10};
static IntervalData* interval_data;
#endif // FPROF_KERNEL
#endif // INCLUDE_FPROF
};
#endif // SHARE_VM_RUNTIME_FPROFILER_HPP

@ -94,9 +94,9 @@
typedef enum {
RUNTIME_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER, RUNTIME_LP64_PRODUCT_FLAG_MEMBER)
RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER)
#ifndef KERNEL
#if INCLUDE_ALTERNATE_GCS
G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER)
#endif
#endif // INCLUDE_ALTERNATE_GCS
#ifdef COMPILER1
C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
#endif
@ -187,7 +187,7 @@ typedef enum {
RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
#ifndef KERNEL
#if INCLUDE_ALTERNATE_GCS
G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE,
RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE,
@ -197,7 +197,7 @@ typedef enum {
RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE,
RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE,
RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE)
#endif // KERNEL
#endif // INCLUDE_ALTERNATE_GCS
#ifdef COMPILER1
C1_FLAGS(C1_DEVELOP_FLAG_MEMBER_WITH_TYPE,
C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,

@ -114,9 +114,9 @@ jint init_globals() {
universe2_init(); // dependent on codeCache_init and stubRoutines_init1
referenceProcessor_init();
jni_handles_init();
#ifndef VM_STRUCTS_KERNEL
#if INCLUDE_VM_STRUCTS
vmStructs_init();
#endif // VM_STRUCTS_KERNEL
#endif // INCLUDE_VM_STRUCTS
vtableStubs_init();
InlineCacheBuffer_init();

@ -588,6 +588,10 @@ bool PerfDataList::by_name(void* name, PerfData* pd) {
PerfData* PerfDataList::find_by_name(const char* name) {
// if add_item hasn't been called the list won't be initialized
if (this == NULL)
return NULL;
int i = _set->find((void*)name, PerfDataList::by_name);
if (i >= 0 && i <= _set->length())

@ -321,12 +321,14 @@ void Thread::record_stack_base_and_size() {
// set up any platform-specific state.
os::initialize_thread(this);
#if INCLUDE_NMT
// record thread's native stack, stack grows downward
if (MemTracker::is_on()) {
address stack_low_addr = stack_base() - stack_size();
MemTracker::record_thread_stack(stack_low_addr, stack_size(), this,
CURRENT_PC);
}
#endif // INCLUDE_NMT
}
@ -338,10 +340,12 @@ Thread::~Thread() {
// record_stack_base_and_size called. Although, we would like to ensure
// that all started threads do call record_stack_base_and_size(), there is
// not proper way to enforce that.
#if INCLUDE_NMT
if (_stack_base != NULL) {
address low_stack_addr = stack_base() - stack_size();
MemTracker::release_thread_stack(low_stack_addr, stack_size(), this);
}
#endif // INCLUDE_NMT
// deallocate data structures
delete resource_area();
@ -1357,7 +1361,9 @@ void JavaThread::initialize() {
set_monitor_chunks(NULL);
set_next(NULL);
set_thread_state(_thread_new);
#if INCLUDE_NMT
set_recorder(NULL);
#endif
_terminated = _not_terminated;
_privileged_stack_top = NULL;
_array_for_gc = NULL;
@ -3523,7 +3529,9 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
#endif /* USDT2 */
// record VM initialization completion time
#if INCLUDE_MANAGEMENT
Management::record_vm_init_completed();
#endif // INCLUDE_MANAGEMENT
// Compute system loader. Note that this has to occur after set_init_completed, since
// valid exceptions may be thrown in the process.
@ -3584,9 +3592,14 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
}
// initialize compiler(s)
#if defined(COMPILER1) || defined(COMPILER2)
CompileBroker::compilation_init();
#endif
#if INCLUDE_MANAGEMENT
Management::initialize(THREAD);
#endif // INCLUDE_MANAGEMENT
if (HAS_PENDING_EXCEPTION) {
// management agent fails to start possibly due to
// configuration problem and is responsible for printing
@ -3756,6 +3769,7 @@ void Threads::create_vm_init_agents() {
AgentLibrary* agent;
JvmtiExport::enter_onload_phase();
for (agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
OnLoadEntry_t on_load_entry = lookup_agent_on_load(agent);

@ -41,7 +41,11 @@
#include "runtime/stubRoutines.hpp"
#include "runtime/threadLocalStorage.hpp"
#include "runtime/unhandledOops.hpp"
#if INCLUDE_NMT
#include "services/memRecorder.hpp"
#endif // INCLUDE_NMT
#include "trace/tracing.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/top.hpp"
@ -1038,6 +1042,7 @@ class JavaThread: public Thread {
bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; }
void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
#if INCLUDE_NMT
// native memory tracking
inline MemRecorder* get_recorder() const { return (MemRecorder*)_recorder; }
inline void set_recorder(MemRecorder* rc) { _recorder = (volatile MemRecorder*)rc; }
@ -1045,6 +1050,7 @@ class JavaThread: public Thread {
private:
// per-thread memory recorder
volatile MemRecorder* _recorder;
#endif // INCLUDE_NMT
// Suspend/resume support for JavaThread
private:

@ -230,6 +230,15 @@ static inline uint64_t cast_uint64_t(size_t x)
return x;
}
#if INCLUDE_JVMTI
#define JVMTI_STRUCTS(static_field) \
static_field(JvmtiExport, _can_access_local_variables, bool) \
static_field(JvmtiExport, _can_hotswap_or_post_breakpoint, bool) \
static_field(JvmtiExport, _can_post_on_exceptions, bool) \
static_field(JvmtiExport, _can_walk_any_space, bool)
#else
#define JVMTI_STRUCTS(static_field)
#endif // INCLUDE_JVMTI
typedef HashtableEntry<intptr_t, mtInternal> IntptrHashtableEntry;
typedef Hashtable<intptr_t, mtInternal> IntptrHashtable;
@ -1170,10 +1179,7 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
/* JVMTI */ \
/*************************/ \
\
static_field(JvmtiExport, _can_access_local_variables, bool) \
static_field(JvmtiExport, _can_hotswap_or_post_breakpoint, bool) \
static_field(JvmtiExport, _can_post_on_exceptions, bool) \
static_field(JvmtiExport, _can_walk_any_space, bool) \
JVMTI_STRUCTS(static_field) \
\
/*************/ \
/* Arguments */ \

@ -110,24 +110,26 @@ void Abstract_VM_Version::initialize() {
#define VMLP ""
#endif
#ifdef KERNEL
#define VMTYPE "Kernel"
#else // KERNEL
#ifdef TIERED
#define VMTYPE "Server"
#else // TIERED
#ifdef ZERO
#ifdef SHARK
#define VMTYPE "Shark"
#else // SHARK
#define VMTYPE "Zero"
#endif // SHARK
#else // ZERO
#define VMTYPE COMPILER1_PRESENT("Client") \
COMPILER2_PRESENT("Server")
#endif // ZERO
#endif // TIERED
#endif // KERNEL
#ifndef VMTYPE
#ifdef KERNEL
#define VMTYPE "Kernel"
#else // KERNEL
#ifdef TIERED
#define VMTYPE "Server"
#else // TIERED
#ifdef ZERO
#ifdef SHARK
#define VMTYPE "Shark"
#else // SHARK
#define VMTYPE "Zero"
#endif // SHARK
#else // ZERO
#define VMTYPE COMPILER1_PRESENT("Client") \
COMPILER2_PRESENT("Server")
#endif // ZERO
#endif // TIERED
#endif // KERNEL
#endif
#ifndef HOTSPOT_VM_DISTRO
#error HOTSPOT_VM_DISTRO must be defined

@ -52,21 +52,21 @@ struct AttachOperationFunctionInfo {
class AttachListener: AllStatic {
public:
static void init() KERNEL_RETURN;
static void abort() KERNEL_RETURN;
static void init() NOT_SERVICES_RETURN;
static void abort() NOT_SERVICES_RETURN;
// invoke to perform clean-up tasks when all clients detach
static void detachall() KERNEL_RETURN;
static void detachall() NOT_SERVICES_RETURN;
// indicates if the Attach Listener needs to be created at startup
static bool init_at_startup() KERNEL_RETURN_(false);
static bool init_at_startup() NOT_SERVICES_RETURN_(false);
// indicates if we have a trigger to start the Attach Listener
static bool is_init_trigger() KERNEL_RETURN_(false);
static bool is_init_trigger() NOT_SERVICES_RETURN_(false);
#ifdef SERVICES_KERNEL
#if !INCLUDE_SERVICES
static bool is_attach_supported() { return false; }
#else // SERVICES_KERNEL
#else
private:
static volatile bool _initialized;
@ -94,10 +94,10 @@ class AttachListener: AllStatic {
// dequeue the next operation
static AttachOperation* dequeue();
#endif // SERVICES_KERNEL
#endif // !INCLUDE_SERVICES
};
#ifndef SERVICES_KERNEL
#if INCLUDE_SERVICES
class AttachOperation: public CHeapObj<mtInternal> {
public:
enum {
@ -151,6 +151,6 @@ class AttachOperation: public CHeapObj<mtInternal> {
// complete operation by sending result code and any result data to the client
virtual void complete(jint result, bufferedStream* result_stream) = 0;
};
#endif // SERVICES_KERNEL
#endif // INCLUDE_SERVICES
#endif // SHARE_VM_SERVICES_ATTACHLISTENER_HPP

@ -78,6 +78,7 @@ HS_DTRACE_PROBE_DECL4(hotspot, class__unloaded, char*, int, oop, bool);
#endif
#if INCLUDE_MANAGEMENT
// counters for classes loaded from class files
PerfCounter* ClassLoadingService::_classes_loaded_count = NULL;
PerfCounter* ClassLoadingService::_classes_unloaded_count = NULL;
@ -239,3 +240,5 @@ LoadedClassesEnumerator::LoadedClassesEnumerator(Thread* cur_thread) {
// FIXME: Exclude array klasses for now
// Universe::basic_type_classes_do(&add_loaded_class);
}
#endif // INCLUDE_MANAGEMENT

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ public:
static bool get_verbose() { return TraceClassLoading; }
static bool set_verbose(bool verbose);
static void reset_trace_class_unloading();
static void reset_trace_class_unloading() NOT_MANAGEMENT_RETURN;
static jlong loaded_class_count() {
return _classes_loaded_count->get_value() + _shared_classes_loaded_count->get_value();
@ -102,13 +102,16 @@ public:
return (UsePerfData ? _class_methods_size->get_value() : -1);
}
static void notify_class_loaded(InstanceKlass* k, bool shared_class);
static void notify_class_loaded(InstanceKlass* k, bool shared_class)
NOT_MANAGEMENT_RETURN;
// All unloaded classes are non-shared
static void notify_class_unloaded(InstanceKlass* k);
static void notify_class_unloaded(InstanceKlass* k) NOT_MANAGEMENT_RETURN;
static void add_class_method_size(int size) {
#if INCLUDE_MANAGEMENT
if (UsePerfData) {
_class_methods_size->inc(size);
}
#endif // INCLUDE_MANAGEMENT
}
};

@ -43,9 +43,9 @@ void DCmdRegistrant::register_dcmds(){
DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VMUptimeDCmd>(true, false));
DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<SystemGCDCmd>(true, false));
DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<RunFinalizationDCmd>(true, false));
#ifndef SERVICES_KERNEL // Heap dumping not supported
#if INCLUDE_SERVICES // Heap dumping supported
DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HeapDumpDCmd>(true, false));
#endif // SERVICES_KERNEL
#endif // INCLUDE_SERVICES
DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassHistogramDCmd>(true, false));
DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ThreadDumpDCmd>(true, false));
@ -252,7 +252,7 @@ void RunFinalizationDCmd::execute(TRAPS) {
vmSymbols::void_method_signature(), CHECK);
}
#ifndef SERVICES_KERNEL // Heap dumping not supported
#if INCLUDE_SERVICES // Heap dumping supported
HeapDumpDCmd::HeapDumpDCmd(outputStream* output, bool heap) :
DCmdWithParser(output, heap),
_filename("filename","Name of the dump file", "STRING",true),
@ -292,7 +292,7 @@ int HeapDumpDCmd::num_arguments() {
return 0;
}
}
#endif // SERVICES_KERNEL
#endif // INCLUDE_SERVICES
ClassHistogramDCmd::ClassHistogramDCmd(outputStream* output, bool heap) :
DCmdWithParser(output, heap),

@ -155,7 +155,7 @@ public:
virtual void execute(TRAPS);
};
#ifndef SERVICES_KERNEL // Heap dumping not supported
#if INCLUDE_SERVICES // Heap dumping supported
// See also: dump_heap in attachListener.cpp
class HeapDumpDCmd : public DCmdWithParser {
protected:
@ -176,7 +176,7 @@ public:
static int num_arguments();
virtual void execute(TRAPS);
};
#endif // SERVICES_KERNEL
#endif // INCLUDE_SERVICES
// See also: inspeactheap in attachListener.cpp
class ClassHistogramDCmd : public DCmdWithParser {

@ -76,9 +76,9 @@ class HeapDumper : public StackObj {
// returns error message (resource allocated), or NULL if no error
char* error_as_C_string() const;
static void dump_heap() KERNEL_RETURN;
static void dump_heap() NOT_SERVICES_RETURN;
static void dump_heap_from_oome() KERNEL_RETURN;
static void dump_heap_from_oome() NOT_SERVICES_RETURN;
};
#endif // SHARE_VM_SERVICES_HEAPDUMPER_HPP

@ -72,12 +72,22 @@ jmmOptionalSupport Management::_optional_support = {0};
TimeStamp Management::_stamp;
void management_init() {
#if INCLUDE_MANAGEMENT
Management::init();
ThreadService::init();
RuntimeService::init();
ClassLoadingService::init();
#else
ThreadService::init();
// Make sure the VM version is initialized
// This is normally called by RuntimeService::init().
// Since that is conditionalized out, we need to call it here.
Abstract_VM_Version::initialize();
#endif // INCLUDE_MANAGEMENT
}
#if INCLUDE_MANAGEMENT
void Management::init() {
EXCEPTION_MARK;
@ -112,10 +122,10 @@ void Management::init() {
_optional_support.isBootClassPathSupported = 1;
_optional_support.isObjectMonitorUsageSupported = 1;
#ifndef SERVICES_KERNEL
#if INCLUDE_SERVICES
// This depends on the heap inspector
_optional_support.isSynchronizerUsageSupported = 1;
#endif // SERVICES_KERNEL
#endif // INCLUDE_SERVICES
_optional_support.isThreadAllocatedMemorySupported = 1;
// Registration of the diagnostic commands
@ -2108,7 +2118,7 @@ JVM_END
// Dump heap - Returns 0 if succeeds.
JVM_ENTRY(jint, jmm_DumpHeap0(JNIEnv *env, jstring outputfile, jboolean live))
#ifndef SERVICES_KERNEL
#if INCLUDE_SERVICES
ResourceMark rm(THREAD);
oop on = JNIHandles::resolve_external_guard(outputfile);
if (on == NULL) {
@ -2126,9 +2136,9 @@ JVM_ENTRY(jint, jmm_DumpHeap0(JNIEnv *env, jstring outputfile, jboolean live))
THROW_MSG_(vmSymbols::java_io_IOException(), errmsg, -1);
}
return 0;
#else // SERVICES_KERNEL
#else // INCLUDE_SERVICES
return -1;
#endif // SERVICES_KERNEL
#endif // INCLUDE_SERVICES
JVM_END
JVM_ENTRY(jobjectArray, jmm_GetDiagnosticCommands(JNIEnv *env))
@ -2295,10 +2305,13 @@ const struct jmmInterface_1_ jmm_interface = {
jmm_GetDiagnosticCommandArgumentsInfo,
jmm_ExecuteDiagnosticCommand
};
#endif // INCLUDE_MANAGEMENT
void* Management::get_jmm_interface(int version) {
#if INCLUDE_MANAGEMENT
if (version == JMM_VERSION_1_0) {
return (void*) &jmm_interface;
}
#endif // INCLUDE_MANAGEMENT
return NULL;
}

@ -58,16 +58,17 @@ public:
static void init();
static void initialize(TRAPS);
static jlong ticks_to_ms(jlong ticks);
static jlong timestamp();
static jlong ticks_to_ms(jlong ticks) NOT_MANAGEMENT_RETURN_(0L);
static jlong timestamp() NOT_MANAGEMENT_RETURN_(0L);
static void oops_do(OopClosure* f);
static void oops_do(OopClosure* f) NOT_MANAGEMENT_RETURN;
static void* get_jmm_interface(int version);
static void get_optional_support(jmmOptionalSupport* support);
static void get_loaded_classes(JavaThread* cur_thread, GrowableArray<KlassHandle>* klass_handle_array);
static void record_vm_startup_time(jlong begin, jlong duration);
static void record_vm_startup_time(jlong begin, jlong duration)
NOT_MANAGEMENT_RETURN;
static void record_vm_init_completed() {
// Initialize the timestamp to get the current time
_vm_init_done_time->set_value(os::javaTimeMillis());
@ -85,14 +86,19 @@ public:
// methods to return a Klass*.
static Klass* java_lang_management_ThreadInfo_klass(TRAPS);
static Klass* java_lang_management_MemoryUsage_klass(TRAPS);
static Klass* java_lang_management_MemoryUsage_klass(TRAPS)
NOT_MANAGEMENT_RETURN_(NULL);
static Klass* java_lang_management_MemoryPoolMXBean_klass(TRAPS);
static Klass* java_lang_management_MemoryManagerMXBean_klass(TRAPS);
static Klass* java_lang_management_GarbageCollectorMXBean_klass(TRAPS);
static Klass* sun_management_Sensor_klass(TRAPS);
static Klass* sun_management_ManagementFactory_klass(TRAPS);
static Klass* sun_management_GarbageCollectorImpl_klass(TRAPS);
static Klass* com_sun_management_GcInfo_klass(TRAPS);
static Klass* sun_management_Sensor_klass(TRAPS)
NOT_MANAGEMENT_RETURN_(NULL);
static Klass* sun_management_ManagementFactory_klass(TRAPS)
NOT_MANAGEMENT_RETURN_(NULL);
static Klass* sun_management_GarbageCollectorImpl_klass(TRAPS)
NOT_MANAGEMENT_RETURN_(NULL);
static Klass* com_sun_management_GcInfo_klass(TRAPS)
NOT_MANAGEMENT_RETURN_(NULL);
static instanceOop create_thread_info_instance(ThreadSnapshot* snapshot, TRAPS);
static instanceOop create_thread_info_instance(ThreadSnapshot* snapshot, objArrayHandle monitors_array, typeArrayHandle depths_array, objArrayHandle synchronizers_array, TRAPS);

@ -30,6 +30,8 @@
#include "services/memTracker.hpp"
#include "utilities/ostream.hpp"
#if INCLUDE_NMT
/*
* MemBaselineReporter reports data to this outputer class,
* ReportOutputer is responsible for format, store and redirect
@ -265,4 +267,6 @@ class BaselineTTYOutputer : public BaselineOutputer {
};
#endif // INCLUDE_NMT
#endif // SHARE_VM_SERVICES_MEM_REPORTER_HPP

@ -25,6 +25,80 @@
#ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP
#define SHARE_VM_SERVICES_MEM_TRACKER_HPP
#include "utilities/macros.hpp"
#if !INCLUDE_NMT
#include "utilities/ostream.hpp"
class BaselineOutputer : public StackObj {
};
class BaselineTTYOutputer : public BaselineOutputer {
public:
BaselineTTYOutputer(outputStream* st) { }
};
class MemTracker : AllStatic {
public:
enum ShutdownReason {
NMT_shutdown_none, // no shutdown requested
NMT_shutdown_user, // user requested shutdown
NMT_normal, // normal shutdown, process exit
NMT_out_of_memory, // shutdown due to out of memory
NMT_initialization, // shutdown due to initialization failure
NMT_use_malloc_only, // can not combine NMT with UseMallocOnly flag
NMT_error_reporting, // shutdown by vmError::report_and_die()
NMT_out_of_generation, // running out of generation queue
NMT_sequence_overflow // overflow the sequence number
};
public:
static inline void init_tracking_options(const char* option_line) { }
static inline bool is_on() { return false; }
static const char* reason() { return "Native memory tracking is not implemented"; }
static inline bool can_walk_stack() { return false; }
static inline void bootstrap_single_thread() { }
static inline void bootstrap_multi_thread() { }
static inline void start() { }
static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
address pc = 0, Thread* thread = NULL) { }
static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { }
static inline void record_realloc(address old_addr, address new_addr, size_t size,
MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
static inline void record_arena_size(address addr, size_t size) { }
static inline void record_virtual_memory_reserve(address addr, size_t size,
address pc = 0, Thread* thread = NULL) { }
static inline void record_virtual_memory_commit(address addr, size_t size,
address pc = 0, Thread* thread = NULL) { }
static inline void record_virtual_memory_uncommit(address addr, size_t size,
Thread* thread = NULL) { }
static inline void record_virtual_memory_release(address addr, size_t size,
Thread* thread = NULL) { }
static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
Thread* thread = NULL) { }
static inline bool baseline() { return false; }
static inline bool has_baseline() { return false; }
static void shutdown(ShutdownReason reason) { }
static inline bool shutdown_in_progress() { }
static bool print_memory_usage(BaselineOutputer& out, size_t unit,
bool summary_only = true) { }
static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
bool summary_only = true) { }
static inline void sync() { }
static inline void thread_exiting(JavaThread* thread) { }
};
#else // !INCLUDE_NMT
#include "memory/allocation.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
@ -411,4 +485,6 @@ class MemTracker : AllStatic {
static enum ShutdownReason _reason;
};
#endif // !INCLUDE_NMT
#endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,6 +35,7 @@ HS_DTRACE_PROBE_DECL(hs_private, safepoint__begin);
HS_DTRACE_PROBE_DECL(hs_private, safepoint__end);
#endif /* !USDT2 */
#if INCLUDE_MANAGEMENT
TimeStamp RuntimeService::_app_timer;
TimeStamp RuntimeService::_safepoint_timer;
PerfCounter* RuntimeService::_sync_time_ticks = NULL;
@ -101,9 +102,9 @@ void RuntimeService::init() {
memset((void*) capabilities, '0', len);
capabilities[len-1] = '\0';
capabilities[0] = AttachListener::is_attach_supported() ? '1' : '0';
#ifdef KERNEL
#if INCLUDE_SERVICES
capabilities[1] = '1';
#endif // KERNEL
#endif // INCLUDE_SERVICES
PerfDataManager::create_string_constant(SUN_RT, "jvmCapabilities",
capabilities, CHECK);
}
@ -205,3 +206,5 @@ void RuntimeService::record_thread_interrupt_signaled_count() {
_thread_interrupt_signaled_count->inc();
}
}
#endif // INCLUDE_MANAGEMENT

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,15 +53,15 @@ public:
static double last_application_time_sec() { return _app_timer.seconds(); }
// callbacks
static void record_safepoint_begin();
static void record_safepoint_synchronized();
static void record_safepoint_end();
static void record_application_start();
static void record_safepoint_begin() NOT_MANAGEMENT_RETURN;
static void record_safepoint_synchronized() NOT_MANAGEMENT_RETURN;
static void record_safepoint_end() NOT_MANAGEMENT_RETURN;
static void record_application_start() NOT_MANAGEMENT_RETURN;
// interruption events
static void record_interrupted_before_count();
static void record_interrupted_during_count();
static void record_thread_interrupt_signaled_count();
static void record_interrupted_before_count() NOT_MANAGEMENT_RETURN;
static void record_interrupted_during_count() NOT_MANAGEMENT_RETURN;
static void record_thread_interrupt_signaled_count() NOT_MANAGEMENT_RETURN;
};
#endif // SHARE_VM_SERVICES_RUNTIMESERVICE_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,26 +34,131 @@
// Makes a string of the macro expansion of a
#define XSTR(a) STR(a)
// KERNEL variant
#ifdef KERNEL
#define COMPILER1
#define SERIALGC
// -DINCLUDE_<something>=0 | 1 can be specified on the command line to include
// or exclude functionality.
#define JVMTI_KERNEL
#define FPROF_KERNEL
#define VM_STRUCTS_KERNEL
#define JNICHECK_KERNEL
#define SERVICES_KERNEL
#ifndef INCLUDE_JVMTI
#define INCLUDE_JVMTI 1
#endif // INCLUDE_JVMTI
#define KERNEL_RETURN {}
#define KERNEL_RETURN_(code) { return code; }
#if INCLUDE_JVMTI
#define JVMTI_ONLY(x) x
#define NOT_JVMTI(x)
#define NOT_JVMTI_RETURN
#define NOT_JVMTI_RETURN_(code) /* next token must be ; */
#else
#define JVMTI_ONLY(x)
#define NOT_JVMTI(x) x
#define NOT_JVMTI_RETURN { return; }
#define NOT_JVMTI_RETURN_(code) { return code; }
#endif // INCLUDE_JVMTI
#else // KERNEL
#ifndef INCLUDE_FPROF
#define INCLUDE_FPROF 1
#endif
#define KERNEL_RETURN /* next token must be ; */
#define KERNEL_RETURN_(code) /* next token must be ; */
#if INCLUDE_FPROF
#define NOT_FPROF_RETURN /* next token must be ; */
#define NOT_FPROF_RETURN_(code) /* next token must be ; */
#else
#define NOT_FPROF_RETURN {}
#define NOT_FPROF_RETURN_(code) { return code; }
#endif // INCLUDE_FPROF
#endif // KERNEL
#ifndef INCLUDE_VM_STRUCTS
#define INCLUDE_VM_STRUCTS 1
#endif
#if INCLUDE_VM_STRUCTS
#define NOT_VM_STRUCTS_RETURN /* next token must be ; */
#define NOT_VM_STRUCTS_RETURN_(code) /* next token must be ; */
#else
#define NOT_VM_STRUCTS_RETURN {}
#define NOT_VM_STRUCTS_RETURN_(code) { return code; }
#endif // INCLUDE_VM_STRUCTS
#ifndef INCLUDE_JNI_CHECK
#define INCLUDE_JNI_CHECK 1
#endif
#if INCLUDE_JNI_CHECK
#define NOT_JNI_CHECK_RETURN /* next token must be ; */
#define NOT_JNI_CHECK_RETURN_(code) /* next token must be ; */
#else
#define NOT_JNI_CHECK_RETURN {}
#define NOT_JNI_CHECK_RETURN_(code) { return code; }
#endif // INCLUDE_JNI_CHECK
#ifndef INCLUDE_SERVICES
#define INCLUDE_SERVICES 1
#endif
#if INCLUDE_SERVICES
#define NOT_SERVICES_RETURN /* next token must be ; */
#define NOT_SERVICES_RETURN_(code) /* next token must be ; */
#else
#define NOT_SERVICES_RETURN {}
#define NOT_SERVICES_RETURN_(code) { return code; }
#endif // INCLUDE_SERVICES
#ifndef INCLUDE_CDS
#define INCLUDE_CDS 1
#endif
#if INCLUDE_CDS
#define CDS_ONLY(x) x
#define NOT_CDS(x)
#define NOT_CDS_RETURN /* next token must be ; */
#define NOT_CDS_RETURN_(code) /* next token must be ; */
#else
#define CDS_ONLY(x)
#define NOT_CDS(x) x
#define NOT_CDS_RETURN {}
#define NOT_CDS_RETURN_(code) { return code; }
#endif // INCLUDE_CDS
#ifndef INCLUDE_MANAGEMENT
#define INCLUDE_MANAGEMENT 1
#endif // INCLUDE_MANAGEMENT
#if INCLUDE_MANAGEMENT
#define NOT_MANAGEMENT_RETURN /* next token must be ; */
#define NOT_MANAGEMENT_RETURN_(code) /* next token must be ; */
#else
#define NOT_MANAGEMENT_RETURN {}
#define NOT_MANAGEMENT_RETURN_(code) { return code; }
#endif // INCLUDE_MANAGEMENT
/*
* When INCLUDE_ALTERNATE_GCS is false the only garbage collectors
* included in the JVM are defaultNewGeneration and markCompact.
*
* When INCLUDE_ALTERNATE_GCS is true all garbage collectors are
* included in the JVM.
*/
#ifndef INCLUDE_ALTERNATE_GCS
#define INCLUDE_ALTERNATE_GCS 1
#endif // INCLUDE_ALTERNATE_GCS
#if INCLUDE_ALTERNATE_GCS
#define NOT_ALTERNATE_GCS_RETURN /* next token must be ; */
#define NOT_ALTERNATE_GCS_RETURN_(code) /* next token must be ; */
#else
#define NOT_ALTERNATE_GCS_RETURN {}
#define NOT_ALTERNATE_GCS_RETURN_(code) { return code; }
#endif // INCLUDE_ALTERNATE_GCS
#ifndef INCLUDE_NMT
#define INCLUDE_NMT 1
#endif // INCLUDE_NMT
#if INCLUDE_NMT
#define NOT_NMT_RETURN /* next token must be ; */
#define NOT_NMT_RETURN_(code) /* next token must be ; */
#else
#define NOT_NMT_RETURN {}
#define NOT_NMT_RETURN_(code) { return code; }
#endif // INCLUDE_NMT
// COMPILER1 variant
#ifdef COMPILER1