This commit is contained in:
Alejandro Murillo 2016-07-29 16:11:55 -07:00
commit f94e0afeb7
212 changed files with 3555 additions and 3723 deletions

View File

@ -51,6 +51,9 @@ ifeq ($(call check-jvm-feature, compiler2), true)
ADLC_CFLAGS_WARNINGS := -W3 -D_CRT_SECURE_NO_WARNINGS
endif
# Set the C++ standard if supported
ADLC_CFLAGS += $(CXXSTD_CXXFLAG)
# NOTE: The old build didn't set -DASSERT for windows but it doesn't seem to
# hurt.
ADLC_CFLAGS += -DASSERT
@ -153,10 +156,10 @@ ifeq ($(call check-jvm-feature, compiler2), true)
$(call MakeDir, $(@D))
$(call ExecuteWithLog, $(ADLC_SUPPORT_DIR)/adlc_run, \
$(FIXPATH) $(ADLC_TOOL) $(ADLCFLAGS) $(SINGLE_AD_SRCFILE) \
-c$(ADLC_SUPPORT_DIR)/ad_$(HOTSPOT_TARGET_CPU).cpp \
-h$(ADLC_SUPPORT_DIR)/ad_$(HOTSPOT_TARGET_CPU).hpp \
-a$(ADLC_SUPPORT_DIR)/dfa_$(HOTSPOT_TARGET_CPU).cpp \
-v$(ADLC_SUPPORT_DIR)/adGlobals_$(HOTSPOT_TARGET_CPU).hpp)
-c$(ADLC_SUPPORT_DIR)/ad_$(HOTSPOT_TARGET_CPU_ARCH).cpp \
-h$(ADLC_SUPPORT_DIR)/ad_$(HOTSPOT_TARGET_CPU_ARCH).hpp \
-a$(ADLC_SUPPORT_DIR)/dfa_$(HOTSPOT_TARGET_CPU_ARCH).cpp \
-v$(ADLC_SUPPORT_DIR)/adGlobals_$(HOTSPOT_TARGET_CPU_ARCH).hpp)
$(TOUCH) $@
##############################################################################
@ -164,17 +167,17 @@ ifeq ($(call check-jvm-feature, compiler2), true)
# and postprocess them by fixing dummy #line directives.
ADLC_GENERATED_FILES := $(addprefix $(JVM_VARIANT_OUTPUTDIR)/gensrc/adfiles/, \
ad_$(HOTSPOT_TARGET_CPU).cpp \
ad_$(HOTSPOT_TARGET_CPU).hpp \
ad_$(HOTSPOT_TARGET_CPU)_clone.cpp \
ad_$(HOTSPOT_TARGET_CPU)_expand.cpp \
ad_$(HOTSPOT_TARGET_CPU)_format.cpp \
ad_$(HOTSPOT_TARGET_CPU)_gen.cpp \
ad_$(HOTSPOT_TARGET_CPU)_misc.cpp \
ad_$(HOTSPOT_TARGET_CPU)_peephole.cpp \
ad_$(HOTSPOT_TARGET_CPU)_pipeline.cpp \
adGlobals_$(HOTSPOT_TARGET_CPU).hpp \
dfa_$(HOTSPOT_TARGET_CPU).cpp \
ad_$(HOTSPOT_TARGET_CPU_ARCH).cpp \
ad_$(HOTSPOT_TARGET_CPU_ARCH).hpp \
ad_$(HOTSPOT_TARGET_CPU_ARCH)_clone.cpp \
ad_$(HOTSPOT_TARGET_CPU_ARCH)_expand.cpp \
ad_$(HOTSPOT_TARGET_CPU_ARCH)_format.cpp \
ad_$(HOTSPOT_TARGET_CPU_ARCH)_gen.cpp \
ad_$(HOTSPOT_TARGET_CPU_ARCH)_misc.cpp \
ad_$(HOTSPOT_TARGET_CPU_ARCH)_peephole.cpp \
ad_$(HOTSPOT_TARGET_CPU_ARCH)_pipeline.cpp \
adGlobals_$(HOTSPOT_TARGET_CPU_ARCH).hpp \
dfa_$(HOTSPOT_TARGET_CPU_ARCH).cpp \
)
$(JVM_VARIANT_OUTPUTDIR)/gensrc/adfiles/%: $(ADLC_RUN_MARKER)

View File

@ -104,7 +104,7 @@ $(eval $(call SetupNativeCompilation, BUILD_GTEST_LAUNCHER, \
-I$(GTEST_FRAMEWORK_SRC)/include, \
CFLAGS_DEBUG_SYMBOLS := $(JVM_CFLAGS_SYMBOLS), \
CXXFLAGS_DEBUG_SYMBOLS := $(JVM_CFLAGS_SYMBOLS), \
LDFLAGS := $(LDFLAGS_TESTEXE), \
LDFLAGS := $(LDFLAGS_JDKEXE), \
LDFLAGS_unix := -L$(JVM_OUTPUTDIR)/gtest $(call SET_SHARED_LIBRARY_ORIGIN), \
LDFLAGS_solaris := -library=stlport4, \
LIBS_unix := -ljvm, \

View File

@ -60,12 +60,15 @@ JVM_CFLAGS_INCLUDES += \
-I$(HOTSPOT_TOPDIR)/src/share/vm/prims \
#
# INCLUDE_SUFFIX_* is only meant for including the proper
# platform files. Don't use it to guard code. Use the value of
# HOTSPOT_TARGET_CPU_DEFINE etc. instead.
# Remaining TARGET_ARCH_* is needed to distinguish closed and open
# 64-bit ARM ports (also called AARCH64).
JVM_CFLAGS_TARGET_DEFINES += \
-DTARGET_OS_FAMILY_$(HOTSPOT_TARGET_OS) \
-DTARGET_ARCH_MODEL_$(HOTSPOT_TARGET_CPU) \
-DTARGET_ARCH_$(HOTSPOT_TARGET_CPU_ARCH) \
-DTARGET_OS_ARCH_MODEL_$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU) \
-DTARGET_OS_ARCH_$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU_ARCH) \
-DINCLUDE_SUFFIX_OS=_$(HOTSPOT_TARGET_OS) \
-DINCLUDE_SUFFIX_CPU=_$(HOTSPOT_TARGET_CPU_ARCH) \
-DTARGET_COMPILER_$(HOTSPOT_TOOLCHAIN_TYPE) \
-D$(HOTSPOT_TARGET_CPU_DEFINE) \
-DHOTSPOT_LIB_ARCH='"$(OPENJDK_TARGET_CPU_LEGACY_LIB)"' \

View File

@ -53,6 +53,7 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC := \
$(HOTSPOT_TOPDIR)/test/compiler/native \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \
$(HOTSPOT_TOPDIR)/test/testlibrary/jvmti \
$(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetModulesInfo \
#
# Add conditional directories here when needed.
@ -62,12 +63,26 @@ BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
$(HOTSPOT_TOPDIR)/test/runtime/ThreadSignalMask
endif
ifeq ($(OPENJDK_TARGET_OS), linux)
BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
$(HOTSPOT_TOPDIR)/test/runtime/execstack \
$(HOTSPOT_TOPDIR)/test/runtime/jsig \
$(HOTSPOT_TOPDIR)/test/runtime/StackGuardPages
endif
ifeq ($(TOOLCHAIN_TYPE), solstudio)
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_liboverflow := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libSimpleClassFileLoadHook := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libGetNamedModuleTest := -lc
endif
ifeq ($(OPENJDK_TARGET_OS), linux)
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libtest-rw := -z noexecstack
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libtest-rwx := -z execstack
BUILD_HOTSPOT_JTREG_EXECUTABLES_LDFLAGS_exeinvoke := -ljvm -lpthread
BUILD_TEST_invoke_exeinvoke.c_OPTIMIZATION := NONE
endif
BUILD_HOTSPOT_JTREG_OUTPUT_DIR := $(BUILD_OUTPUT)/support/test/hotspot/jtreg/native
BUILD_HOTSPOT_JTREG_IMAGE_DIR := $(TEST_IMAGE_DIR)/hotspot/jtreg

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -67,9 +67,6 @@ class Bytes: AllStatic {
// The following header contains the implementations of swap_u2, swap_u4, and swap_u8[_base]
#ifdef TARGET_OS_ARCH_linux_aarch64
# include "bytes_linux_aarch64.inline.hpp"
#endif
#include OS_CPU_HEADER_INLINE(bytes)
#endif // CPU_AARCH64_VM_BYTES_AARCH64_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -29,9 +29,7 @@
// Inline functions for memory copy and fill.
// Contains inline asm implementations
#ifdef TARGET_OS_ARCH_linux_aarch64
# include "copy_linux_aarch64.inline.hpp"
#endif
#include OS_CPU_HEADER_INLINE(copy)
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -29,10 +29,10 @@
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "utilities/macros.hpp"
#include "vm_version_aarch64.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#include OS_HEADER_INLINE(os)
#ifndef BUILTIN_SIM
#include <sys/auxv.h>

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -274,8 +274,6 @@ class Bytes: AllStatic {
#endif // VM_LITTLE_ENDIAN
};
#if defined(TARGET_OS_ARCH_linux_ppc)
#include "bytes_linux_ppc.inline.hpp"
#endif
#include OS_CPU_HEADER_INLINE(bytes)
#endif // CPU_PPC_VM_BYTES_PPC_HPP

View File

@ -47,7 +47,7 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
// The expected size in bytes of a cache line, used to pad data structures.
#define DEFAULT_CACHE_LINE_SIZE 128
#if defined(COMPILER2) && (defined(AIX) || defined(linux))
#if defined(COMPILER2) && (defined(AIX) || defined(LINUX))
// Include Transactional Memory lock eliding optimization
#define INCLUDE_RTM_OPT 1
#endif

View File

@ -23,8 +23,8 @@
*
*/
#ifndef CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
#define CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
#ifndef CPU_PPC_VM_INTERP_MASM_PPC_HPP
#define CPU_PPC_VM_INTERP_MASM_PPC_HPP
#include "asm/macroAssembler.hpp"
#include "interpreter/invocationCounter.hpp"
@ -263,4 +263,4 @@ class InterpreterMacroAssembler: public MacroAssembler {
NotifyMethodExitMode mode, bool check_exceptions);
};
#endif // CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
#endif // CPU_PPC_VM_INTERP_MASM_PPC_HPP

View File

@ -26,7 +26,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "interp_masm_ppc_64.hpp"
#include "interp_masm_ppc.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/sharedRuntime.hpp"

View File

@ -1,24 +0,0 @@
//
// Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2012, 2013 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//

View File

@ -40,7 +40,7 @@
#include "c1/c1_Runtime1.hpp"
#endif
#ifdef COMPILER2
#include "adfiles/ad_ppc_64.hpp"
#include "opto/ad.hpp"
#include "opto/runtime.hpp"
#endif

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,8 +23,8 @@
*
*/
#ifndef CPU_PPC_VM_STUBROUTINES_PPC_64_HPP
#define CPU_PPC_VM_STUBROUTINES_PPC_64_HPP
#ifndef CPU_PPC_VM_STUBROUTINES_PPC_HPP
#define CPU_PPC_VM_STUBROUTINES_PPC_HPP
// This file holds the platform specific parts of the StubRoutines
// definition. See stubRoutines.hpp for a description on how to
@ -61,4 +61,4 @@ class ppc64 {
};
#endif // CPU_PPC_VM_STUBROUTINES_PPC_64_HPP
#endif // CPU_PPC_VM_STUBROUTINES_PPC_HPP

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2014 SAP SE. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,8 +23,8 @@
*
*/
#ifndef CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP
#define CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP
#ifndef CPU_PPC_VM_TEMPLATETABLE_PPC_HPP
#define CPU_PPC_VM_TEMPLATETABLE_PPC_HPP
static void prepare_invoke(int byte_no, Register Rmethod, Register Rret_addr, Register Rindex, Register Rrecv, Register Rflags, Register Rscratch);
static void invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2);
@ -35,4 +35,4 @@
static void branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert = false);
static void if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0);
#endif // CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP
#endif // CPU_PPC_VM_TEMPLATETABLE_PPC_HPP

View File

@ -278,7 +278,7 @@ void VM_Version::initialize() {
os_too_old = false;
}
#endif
#ifdef linux
#ifdef LINUX
// At least Linux kernel 4.2, as the problematic behavior of syscalls
// being called in the middle of a transaction has been addressed.
// Please, refer to commit b4b56f9ecab40f3b4ef53e130c9f6663be491894

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* Copyright (c) 2012, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,7 +26,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_ppc_64.hpp"
#include "interp_masm_ppc.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#define CPU_X86_VM_BYTES_X86_HPP
#include "memory/allocation.hpp"
#include "utilities/macros.hpp"
class Bytes: AllStatic {
private:
@ -70,20 +71,7 @@ class Bytes: AllStatic {
static inline u8 swap_u8(u8 x);
};
// The following header contains the implementations of swap_u2, swap_u4, and swap_u8[_base]
#ifdef TARGET_OS_ARCH_linux_x86
# include "bytes_linux_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_solaris_x86
# include "bytes_solaris_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_windows_x86
# include "bytes_windows_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_bsd_x86
# include "bytes_bsd_x86.inline.hpp"
#endif
#include OS_CPU_HEADER_INLINE(bytes)
#endif // CPU_X86_VM_BYTES_X86_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,19 +28,7 @@
// Inline functions for memory copy and fill.
// Contains inline asm implementations
#ifdef TARGET_OS_ARCH_linux_x86
# include "copy_linux_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_solaris_x86
# include "copy_solaris_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_windows_x86
# include "copy_windows_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_bsd_x86
# include "copy_bsd_x86.inline.hpp"
#endif
#include OS_CPU_HEADER_INLINE(copy)
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
#ifdef AMD64

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,7 +56,7 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#endif
#endif
#if defined(COMPILER2) && !defined(JAVASE_EMBEDDED)
#if defined(COMPILER2)
// Include Restricted Transactional Memory lock eliding optimization
#define INCLUDE_RTM_OPT 1
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,12 +22,9 @@
*
*/
#include "precompiled.hpp"
#include "mutex_bsd.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/mutex.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/events.hpp"
#ifndef CPU_X86_VM_MACROASSEMBLER_X86_INLINE_HPP
#define CPU_X86_VM_MACROASSEMBLER_X86_INLINE_HPP
// put OS-includes here
# include <signal.h>
// Still empty.
#endif // CPU_X86_VM_MACROASSEMBLER_X86_INLINE_HPP

View File

@ -26,9 +26,7 @@
#include "asm/assembler.hpp"
#include "asm/register.hpp"
#include "register_x86.hpp"
#ifdef TARGET_ARCH_x86
# include "interp_masm_x86.hpp"
#endif
#include "interp_masm_x86.hpp"
REGISTER_DEFINITION(Register, noreg);
REGISTER_DEFINITION(Register, rax);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,83 @@
// definition. See stubRoutines.hpp for a description on how to
// extend it.
static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
enum platform_dependent_constants {
code_size1 = 20000 LP64_ONLY(+10000), // simply increase if too small (assembler will crash if too small)
code_size2 = 33800 LP64_ONLY(+1200) // simply increase if too small (assembler will crash if too small)
};
class x86 {
friend class StubGenerator;
friend class VMStructs;
#ifdef _LP64
private:
static address _get_previous_fp_entry;
static address _get_previous_sp_entry;
static address _f2i_fixup;
static address _f2l_fixup;
static address _d2i_fixup;
static address _d2l_fixup;
static address _float_sign_mask;
static address _float_sign_flip;
static address _double_sign_mask;
static address _double_sign_flip;
public:
static address get_previous_fp_entry() {
return _get_previous_fp_entry;
}
static address get_previous_sp_entry() {
return _get_previous_sp_entry;
}
static address f2i_fixup() {
return _f2i_fixup;
}
static address f2l_fixup() {
return _f2l_fixup;
}
static address d2i_fixup() {
return _d2i_fixup;
}
static address d2l_fixup() {
return _d2l_fixup;
}
static address float_sign_mask() {
return _float_sign_mask;
}
static address float_sign_flip() {
return _float_sign_flip;
}
static address double_sign_mask() {
return _double_sign_mask;
}
static address double_sign_flip() {
return _double_sign_flip;
}
#else // !LP64
private:
static address _verify_fpu_cntrl_wrd_entry;
public:
static address verify_fpu_cntrl_wrd_entry() { return _verify_fpu_cntrl_wrd_entry; }
#endif // !LP64
private:
static address _verify_mxcsr_entry;
// shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
@ -138,4 +215,6 @@
static address _Pi4x4_addr() { return _Pi4x4_adr; }
static address _ones_addr() { return _ones_adr; }
#endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP
};
#endif // CPU_X86_VM_STUBROUTINES_X86_HPP

View File

@ -1,53 +0,0 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_X86_VM_STUBROUTINES_X86_32_HPP
#define CPU_X86_VM_STUBROUTINES_X86_32_HPP
// This file holds the platform specific parts of the StubRoutines
// definition. See stubRoutines.hpp for a description on how to
// extend it.
enum platform_dependent_constants {
code_size1 = 20000, // simply increase if too small (assembler will crash if too small)
code_size2 = 33800 // simply increase if too small (assembler will crash if too small)
};
class x86 {
friend class StubGenerator;
friend class VMStructs;
private:
static address _verify_fpu_cntrl_wrd_entry;
public:
static address verify_fpu_cntrl_wrd_entry() { return _verify_fpu_cntrl_wrd_entry; }
# include "stubRoutines_x86.hpp"
};
static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
#endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP

View File

@ -1,112 +0,0 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef CPU_X86_VM_STUBROUTINES_X86_64_HPP
#define CPU_X86_VM_STUBROUTINES_X86_64_HPP
// This file holds the platform specific parts of the StubRoutines
// definition. See stubRoutines.hpp for a description on how to
// extend it.
static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
enum platform_dependent_constants {
code_size1 = 30000, // simply increase if too small (assembler will crash if too small)
code_size2 = 35000 // simply increase if too small (assembler will crash if too small)
};
class x86 {
friend class StubGenerator;
private:
static address _get_previous_fp_entry;
static address _get_previous_sp_entry;
static address _f2i_fixup;
static address _f2l_fixup;
static address _d2i_fixup;
static address _d2l_fixup;
static address _float_sign_mask;
static address _float_sign_flip;
static address _double_sign_mask;
static address _double_sign_flip;
public:
static address get_previous_fp_entry()
{
return _get_previous_fp_entry;
}
static address get_previous_sp_entry()
{
return _get_previous_sp_entry;
}
static address f2i_fixup()
{
return _f2i_fixup;
}
static address f2l_fixup()
{
return _f2l_fixup;
}
static address d2i_fixup()
{
return _d2i_fixup;
}
static address d2l_fixup()
{
return _d2l_fixup;
}
static address float_sign_mask()
{
return _float_sign_mask;
}
static address float_sign_flip()
{
return _float_sign_flip;
}
static address double_sign_mask()
{
return _double_sign_mask;
}
static address double_sign_flip()
{
return _double_sign_flip;
}
# include "stubRoutines_x86.hpp"
};
#endif // CPU_X86_VM_STUBROUTINES_X86_64_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -165,12 +165,8 @@ class Bytes: AllStatic {
#ifdef VM_LITTLE_ENDIAN
// The following header contains the implementations of swap_u2,
// swap_u4, and swap_u8
#ifdef TARGET_OS_ARCH_linux_zero
# include "bytes_linux_zero.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_bsd_zero
# include "bytes_bsd_zero.inline.hpp"
#endif
#include OS_CPU_HEADER_INLINE(bytes)
#endif // VM_LITTLE_ENDIAN

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,12 +22,9 @@
*
*/
#include "precompiled.hpp"
#include "mutex_linux.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/mutex.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/events.hpp"
#ifndef CPU_ZERO_VM_MACROASSEMBLER_ZERO_HPP
#define CPU_ZERO_VM_MACROASSEMBLER_ZERO_HPP
// put OS-includes here
# include <signal.h>
// Needed for includes in shared files.
#endif // CPU_ZERO_VM_MACROASSEMBLER_ZERO_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,12 +22,9 @@
*
*/
#include "precompiled.hpp"
#include "mutex_windows.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/mutex.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/events.hpp"
#ifndef CPU_ZERO_VM_MACROASSEMBLER_ZERO_INLINE_HPP
#define CPU_ZERO_VM_MACROASSEMBLER_ZERO_INLINE_HPP
// put OS-includes here
# include <windows.h>
// Needed for includes in shared files.
#endif // CPU_ZERO_VM_MACROASSEMBLER_ZERO_INLINE_HPP

View File

@ -242,7 +242,15 @@ public class InstanceKlass extends Klass {
}
public long getSize() {
return alignSize(getHeaderSize() + getVtableLen() + getItableLen() + getNonstaticOopMapSize());
long wordLength = VM.getVM().getBytesPerWord();
long size = getHeaderSize() +
(getVtableLen() +
getItableLen() +
getNonstaticOopMapSize()) * wordLength;
if (isInterface()) {
size += wordLength;
}
return alignSize(size);
}
public static long getHeaderSize() { return headerSize; }

View File

@ -1,33 +0,0 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_AIX_VM_MUTEX_AIX_INLINE_HPP
#define OS_AIX_VM_MUTEX_AIX_INLINE_HPP
#include "os_aix.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/thread.inline.hpp"
#endif // OS_AIX_VM_MUTEX_AIX_INLINE_HPP

View File

@ -44,7 +44,6 @@
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "misc_aix.hpp"
#include "mutex_aix.inline.hpp"
#include "oops/oop.inline.hpp"
#include "os_aix.inline.hpp"
#include "os_share_aix.hpp"

View File

@ -1,37 +0,0 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_BSD_VM_MUTEX_BSD_INLINE_HPP
#define OS_BSD_VM_MUTEX_BSD_INLINE_HPP
#include "os_bsd.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/thread.inline.hpp"
// Reconciliation History
// mutex_solaris.inline.hpp 1.5 99/06/22 16:38:49
// End
#endif // OS_BSD_VM_MUTEX_BSD_INLINE_HPP

View File

@ -35,7 +35,6 @@
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "mutex_bsd.inline.hpp"
#include "oops/oop.inline.hpp"
#include "os_bsd.inline.hpp"
#include "os_share_bsd.hpp"

View File

@ -35,7 +35,6 @@
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "mutex_linux.inline.hpp"
#include "oops/oop.inline.hpp"
#include "os_linux.inline.hpp"
#include "os_share_linux.hpp"

View File

@ -28,6 +28,7 @@
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/os.hpp"
#include "utilities/macros.hpp"
#include "utilities/vmError.hpp"
#include <signal.h>
@ -214,7 +215,7 @@ void os::Posix::print_rlimit_info(outputStream* st) {
else st->print("%luk", rlim.rlim_cur >> 10);
// Isn't there on solaris
#if !defined(TARGET_OS_FAMILY_solaris) && !defined(TARGET_OS_FAMILY_aix)
#if !defined(SOLARIS) && !defined(AIX)
st->print(", NPROC ");
getrlimit(RLIMIT_NPROC, &rlim);
if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
@ -1062,28 +1063,28 @@ int os::Posix::unblock_thread_signal_mask(const sigset_t *set) {
}
address os::Posix::ucontext_get_pc(const ucontext_t* ctx) {
#ifdef TARGET_OS_FAMILY_linux
return Linux::ucontext_get_pc(ctx);
#elif defined(TARGET_OS_FAMILY_solaris)
return Solaris::ucontext_get_pc(ctx);
#elif defined(TARGET_OS_FAMILY_aix)
#if defined(AIX)
return Aix::ucontext_get_pc(ctx);
#elif defined(TARGET_OS_FAMILY_bsd)
#elif defined(BSD)
return Bsd::ucontext_get_pc(ctx);
#elif defined(LINUX)
return Linux::ucontext_get_pc(ctx);
#elif defined(SOLARIS)
return Solaris::ucontext_get_pc(ctx);
#else
VMError::report_and_die("unimplemented ucontext_get_pc");
#endif
}
void os::Posix::ucontext_set_pc(ucontext_t* ctx, address pc) {
#ifdef TARGET_OS_FAMILY_linux
Linux::ucontext_set_pc(ctx, pc);
#elif defined(TARGET_OS_FAMILY_solaris)
Solaris::ucontext_set_pc(ctx, pc);
#elif defined(TARGET_OS_FAMILY_aix)
#if defined(AIX)
Aix::ucontext_set_pc(ctx, pc);
#elif defined(TARGET_OS_FAMILY_bsd)
#elif defined(BSD)
Bsd::ucontext_set_pc(ctx, pc);
#elif defined(LINUX)
Linux::ucontext_set_pc(ctx, pc);
#elif defined(SOLARIS)
Solaris::ucontext_set_pc(ctx, pc);
#else
VMError::report_and_die("unimplemented ucontext_get_pc");
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,17 +33,17 @@
#include <sys/wait.h>
#include <signal.h>
#ifdef TARGET_OS_FAMILY_linux
#ifdef LINUX
#include <sys/syscall.h>
#include <unistd.h>
#endif
#ifdef TARGET_OS_FAMILY_solaris
#ifdef SOLARIS
#include <thread.h>
#endif
#ifdef TARGET_OS_FAMILY_aix
#ifdef AIX
#include <unistd.h>
#endif
#ifdef TARGET_OS_FAMILY_bsd
#ifdef BSD
#include <sys/syscall.h>
#include <unistd.h>
#endif

View File

@ -1,36 +0,0 @@
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "mutex_solaris.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/mutex.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/events.hpp"
// Solaris-specific include, therefore not in includeDB_*
# include "os_share_solaris.hpp"
// put OS-includes here
# include <signal.h>

View File

@ -35,7 +35,6 @@
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "mutex_solaris.inline.hpp"
#include "oops/oop.inline.hpp"
#include "os_share_solaris.hpp"
#include "os_solaris.inline.hpp"
@ -79,7 +78,6 @@
# include <link.h>
# include <poll.h>
# include <pthread.h>
# include <pwd.h>
# include <schedctl.h>
# include <setjmp.h>
# include <signal.h>

View File

@ -34,16 +34,18 @@
#include "utilities/exceptions.hpp"
// put OS-includes here
# include <sys/types.h>
# include <sys/mman.h>
# include <errno.h>
# include <stdio.h>
# include <unistd.h>
# include <sys/stat.h>
# include <signal.h>
# include <pwd.h>
# include <procfs.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/stat.h>
#include <signal.h>
#include <procfs.h>
/* For POSIX-compliant getpwuid_r on Solaris */
#define _POSIX_PTHREAD_SEMANTICS
#include <pwd.h>
static char* backing_store_file_name = NULL; // name of the backing store
// file, if successfully created.
@ -453,12 +455,8 @@ static char* get_user_name(uid_t uid) {
char* pwbuf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
#ifdef _GNU_SOURCE
struct passwd* p = NULL;
int result = getpwuid_r(uid, &pwent, pwbuf, (size_t)bufsize, &p);
#else // _GNU_SOURCE
struct passwd* p = getpwuid_r(uid, &pwent, pwbuf, (int)bufsize);
#endif // _GNU_SOURCE
if (p == NULL || p->pw_name == NULL || *(p->pw_name) == '\0') {
if (PrintMiscellaneous && Verbose) {

View File

@ -1,32 +0,0 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_WINDOWS_VM_MUTEX_WINDOWS_INLINE_HPP
#define OS_WINDOWS_VM_MUTEX_WINDOWS_INLINE_HPP
#include "os_windows.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/thread.inline.hpp"
#endif // OS_WINDOWS_VM_MUTEX_WINDOWS_INLINE_HPP

View File

@ -38,7 +38,6 @@
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "mutex_windows.inline.hpp"
#include "oops/oop.inline.hpp"
#include "os_share_windows.hpp"
#include "os_windows.inline.hpp"
@ -3898,6 +3897,13 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
DWORD res;
HANDLE hproc, hthr;
// We only attempt to register threads until a process exiting
// thread manages to set the process_exiting flag. Any threads
// that come through here after the process_exiting flag is set
// are unregistered and will be caught in the SuspendThread()
// infinite loop below.
bool registered = false;
// The first thread that reached this point, initializes the critical section.
if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
@ -3957,12 +3963,21 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
0, FALSE, DUPLICATE_SAME_ACCESS)) {
warning("DuplicateHandle failed (%u) in %s: %d\n",
GetLastError(), __FILE__, __LINE__);
// We can't register this thread (no more handles) so this thread
// may be racing with a thread that is calling exit(). If the thread
// that is calling exit() has managed to set the process_exiting
// flag, then this thread will be caught in the SuspendThread()
// infinite loop below which closes that race. A small timing
// window remains before the process_exiting flag is set, but it
// is only exposed when we are out of handles.
} else {
++handle_count;
}
registered = true;
// The current exiting thread has stored its handle in the array, and now
// should leave the critical section before calling _endthreadex().
// The current exiting thread has stored its handle in the array, and now
// should leave the critical section before calling _endthreadex().
}
} else if (what != EPT_THREAD && handle_count > 0) {
jlong start_time, finish_time, timeout_left;
@ -4012,10 +4027,11 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
LeaveCriticalSection(&crit_sect);
}
if (OrderAccess::load_acquire(&process_exiting) != 0 &&
if (!registered &&
OrderAccess::load_acquire(&process_exiting) != 0 &&
process_exiting != (jint)GetCurrentThreadId()) {
// Some other thread is about to call exit(), so we
// don't let the current thread proceed to exit() or _endthreadex()
// Some other thread is about to call exit(), so we don't let
// the current unregistered thread proceed to exit() or _endthreadex()
while (true) {
SuspendThread(GetCurrentThread());
// Avoid busy-wait loop, if SuspendThread() failed.
@ -4027,7 +4043,7 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
// We are here if either
// - there's no 'race at exit' bug on this OS release;
// - initialization of the critical section failed (unlikely);
// - the current thread has stored its handle and left the critical section;
// - the current thread has registered itself and left the critical section;
// - the process-exiting thread has raised the flag and left the critical section.
if (what == EPT_THREAD) {
_endthreadex((unsigned)exit_code);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,11 +22,11 @@
*
*/
#ifndef OS_SOLARIS_VM_MUTEX_SOLARIS_INLINE_HPP
#define OS_SOLARIS_VM_MUTEX_SOLARIS_INLINE_HPP
#ifndef OS_CPU_AIX_PPC_VM_BYTES_AIX_PPC_INLINE_HPP
#define OS_CPU_AIX_PPC_VM_BYTES_AIX_PPC_INLINE_HPP
#include "os_solaris.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/thread.inline.hpp"
#if defined(VM_LITTLE_ENDIAN)
// Aix is not little endian.
#endif // VM_LITTLE_ENDIAN
#endif // OS_SOLARIS_VM_MUTEX_SOLARIS_INLINE_HPP
#endif // OS_CPU_AIX_PPC_VM_BYTES_AIX_PPC_INLINE_HPP

View File

@ -34,7 +34,6 @@
#include "interpreter/interpreter.hpp"
#include "jvm_aix.h"
#include "memory/allocation.inline.hpp"
#include "mutex_aix.inline.hpp"
#include "nativeInst_ppc.hpp"
#include "os_share_aix.hpp"
#include "prims/jniFastGetField.hpp"

View File

@ -33,7 +33,6 @@
#include "interpreter/interpreter.hpp"
#include "jvm_bsd.h"
#include "memory/allocation.inline.hpp"
#include "mutex_bsd.inline.hpp"
#include "os_share_bsd.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"

View File

@ -38,7 +38,6 @@
#include "interpreter/interpreter.hpp"
#include "jvm_bsd.h"
#include "memory/allocation.inline.hpp"
#include "mutex_bsd.inline.hpp"
#include "nativeInst_zero.hpp"
#include "os_share_bsd.hpp"
#include "prims/jniFastGetField.hpp"

View File

@ -35,7 +35,6 @@
#include "interpreter/interpreter.hpp"
#include "jvm_linux.h"
#include "memory/allocation.inline.hpp"
#include "mutex_linux.inline.hpp"
#include "os_share_linux.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"

View File

@ -34,7 +34,6 @@
#include "interpreter/interpreter.hpp"
#include "jvm_linux.h"
#include "memory/allocation.inline.hpp"
#include "mutex_linux.inline.hpp"
#include "nativeInst_ppc.hpp"
#include "os_share_linux.hpp"
#include "prims/jniFastGetField.hpp"

View File

@ -33,7 +33,6 @@
#include "interpreter/interpreter.hpp"
#include "jvm_linux.h"
#include "memory/allocation.inline.hpp"
#include "mutex_linux.inline.hpp"
#include "nativeInst_sparc.hpp"
#include "os_share_linux.hpp"
#include "prims/jniFastGetField.hpp"

View File

@ -33,7 +33,6 @@
#include "interpreter/interpreter.hpp"
#include "jvm_linux.h"
#include "memory/allocation.inline.hpp"
#include "mutex_linux.inline.hpp"
#include "os_share_linux.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"

View File

@ -33,7 +33,6 @@
#include "interpreter/interpreter.hpp"
#include "jvm_linux.h"
#include "memory/allocation.inline.hpp"
#include "mutex_linux.inline.hpp"
#include "nativeInst_zero.hpp"
#include "os_share_linux.hpp"
#include "prims/jniFastGetField.hpp"

View File

@ -34,7 +34,6 @@
#include "interpreter/interpreter.hpp"
#include "jvm_solaris.h"
#include "memory/allocation.inline.hpp"
#include "mutex_solaris.inline.hpp"
#include "nativeInst_sparc.hpp"
#include "os_share_solaris.hpp"
#include "prims/jniFastGetField.hpp"
@ -74,7 +73,6 @@
# include <sys/systeminfo.h>
# include <sys/socket.h>
# include <sys/lwp.h>
# include <pwd.h>
# include <poll.h>
# include <sys/lwp.h>

View File

@ -33,7 +33,6 @@
#include "interpreter/interpreter.hpp"
#include "jvm_solaris.h"
#include "memory/allocation.inline.hpp"
#include "mutex_solaris.inline.hpp"
#include "os_share_solaris.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"
@ -74,7 +73,6 @@
# include <sys/socket.h>
# include <sys/trap.h>
# include <sys/lwp.h>
# include <pwd.h>
# include <poll.h>
# include <sys/lwp.h>
# include <procfs.h> // see comment in <sys/procfs.h>

View File

@ -34,7 +34,6 @@
#include "jvm_windows.h"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "mutex_windows.inline.hpp"
#include "nativeInst_x86.hpp"
#include "os_share_windows.hpp"
#include "prims/jniFastGetField.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,7 @@
#include "runtime/vm_version.hpp"
#include "utilities/debug.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
// This file contains platform-independent assembler declarations.
@ -417,24 +418,6 @@ class AbstractAssembler : public ResourceObj {
};
#ifdef TARGET_ARCH_x86
# include "assembler_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "assembler_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "assembler_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "assembler_aarch64.hpp"
#endif
#include CPU_HEADER(assembler)
#endif // SHARE_VM_ASM_ASSEMBLER_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,23 +27,6 @@
#include "asm/assembler.hpp"
#ifdef TARGET_ARCH_x86
# include "assembler_x86.inline.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "assembler_sparc.inline.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "assembler_zero.inline.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "assembler_arm.inline.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "assembler_ppc.inline.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "assembler_aarch64.inline.hpp"
#endif
#include CPU_HEADER_INLINE(assembler)
#endif // SHARE_VM_ASM_ASSEMBLER_INLINE_HPP

View File

@ -28,6 +28,7 @@
#include "code/oopRecorder.hpp"
#include "code/relocInfo.hpp"
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
class CodeStrings;
class PhaseCFG;
@ -633,24 +634,7 @@ class CodeBuffer: public StackObj {
// The following header contains architecture-specific implementations
#ifdef TARGET_ARCH_x86
# include "codeBuffer_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "codeBuffer_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "codeBuffer_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "codeBuffer_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "codeBuffer_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "codeBuffer_aarch64.hpp"
#endif
#include CPU_HEADER(codeBuffer)
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,24 +26,8 @@
#define SHARE_VM_ASM_MACROASSEMBLER_HPP
#include "asm/assembler.hpp"
#include "utilities/macros.hpp"
#ifdef TARGET_ARCH_x86
# include "macroAssembler_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "macroAssembler_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "assembler_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "macroAssembler_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "macroAssembler_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "macroAssembler_aarch64.hpp"
#endif
#include CPU_HEADER(macroAssembler)
#endif // SHARE_VM_ASM_MACROASSEMBLER_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,24 +26,6 @@
#define SHARE_VM_ASM_MACROASSEMBLER_INLINE_HPP
#include "asm/macroAssembler.hpp"
#ifdef TARGET_ARCH_x86
// no macroAssembler_x86.inline.hpp
#endif
#ifdef TARGET_ARCH_sparc
# include "macroAssembler_sparc.inline.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "assembler_zero.inline.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "macroAssembler_arm.inline.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "macroAssembler_ppc.inline.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "macroAssembler_aarch64.inline.hpp"
#endif
#include CPU_HEADER_INLINE(macroAssembler)
#endif // SHARE_VM_ASM_MACROASSEMBLER_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
// Use AbstractRegister as shortcut
class AbstractRegisterImpl;
@ -94,25 +95,7 @@ enum { name##_##type##EnumValue = value##_##type##EnumValue }
#define REGISTER_DEFINITION(type, name) \
const type name = ((type)name##_##type##EnumValue)
#ifdef TARGET_ARCH_x86
# include "register_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "register_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "register_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "register_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "register_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "register_aarch64.hpp"
#endif
#include CPU_HEADER(register)
// Debugging support

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,8 +25,9 @@
#ifndef SHARE_VM_C1_C1_DEFS_HPP
#define SHARE_VM_C1_C1_DEFS_HPP
#include "utilities/globalDefinitions.hpp"
#include "asm/register.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
// set frame size and return address offset to these values in blobs
// (if the compiled frame uses ebp as link pointer on IA; otherwise,
@ -35,23 +36,7 @@ enum {
no_frame_size = -1
};
#ifdef TARGET_ARCH_x86
# include "c1_Defs_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "c1_Defs_sparc.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "c1_Defs_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "c1_Defs_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "c1_Defs_aarch64.hpp"
#endif
#include CPU_HEADER(c1_Defs)
// native word offsets from memory address
enum {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,26 +27,12 @@
#include "c1/c1_FrameMap.hpp"
#include "memory/allocation.hpp"
#include "utilities/macros.hpp"
// Provides location for forward declaration of this class, which is
// only implemented on Intel
class FpuStackSim;
#ifdef TARGET_ARCH_x86
# include "c1_FpuStackSim_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "c1_FpuStackSim_sparc.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "c1_FpuStackSim_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "c1_FpuStackSim_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "c1_FpuStackSim_aarch64.hpp"
#endif
#include CPU_HEADER(c1_FpuStackSim)
#endif // SHARE_VM_C1_C1_FPUSTACKSIM_HPP

View File

@ -33,6 +33,7 @@
#include "runtime/frame.hpp"
#include "runtime/synchronizer.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
class ciMethod;
class CallingConvention;
@ -80,22 +81,7 @@ class FrameMap : public CompilationResourceObj {
spill_slot_size_in_bytes = 4
};
#ifdef TARGET_ARCH_x86
# include "c1_FrameMap_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "c1_FrameMap_sparc.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "c1_FrameMap_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "c1_FrameMap_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "c1_FrameMap_aarch64.hpp"
#endif
#include CPU_HEADER(c1_FrameMap)
friend class LIR_OprDesc;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "c1/c1_CodeStubs.hpp"
#include "ci/ciMethodData.hpp"
#include "oops/methodData.hpp"
#include "utilities/macros.hpp"
class Compilation;
class ScopeValue;
@ -257,21 +258,7 @@ class LIR_Assembler: public CompilationResourceObj {
void atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp);
#ifdef TARGET_ARCH_x86
# include "c1_LIRAssembler_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "c1_LIRAssembler_sparc.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "c1_LIRAssembler_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "c1_LIRAssembler_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "c1_LIRAssembler_aarch64.hpp"
#endif
#include CPU_HEADER(c1_LIRAssembler)
};

View File

@ -31,6 +31,7 @@
#include "c1/c1_Instruction.hpp"
#include "c1/c1_LIR.hpp"
#include "c1/c1_LIRGenerator.hpp"
#include "utilities/macros.hpp"
class DebugInfoCache;
class FpuStackAllocator;
@ -959,23 +960,7 @@ class LinearScanTimers : public StackObj {
#endif // ifndef PRODUCT
// Pick up platform-dependent implementation details
#ifdef TARGET_ARCH_x86
# include "c1_LinearScan_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "c1_LinearScan_sparc.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "c1_LinearScan_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "c1_LinearScan_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "c1_LinearScan_aarch64.hpp"
#endif
#include CPU_HEADER(c1_LinearScan)
#endif // SHARE_VM_C1_C1_LINEARSCAN_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "utilities/macros.hpp"
class CodeEmitInfo;
@ -47,21 +48,7 @@ class C1_MacroAssembler: public MacroAssembler {
void verify_stack_oop(int offset) PRODUCT_RETURN;
void verify_not_null_oop(Register r) PRODUCT_RETURN;
#ifdef TARGET_ARCH_x86
# include "c1_MacroAssembler_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "c1_MacroAssembler_sparc.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "c1_MacroAssembler_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "c1_MacroAssembler_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "c1_MacroAssembler_aarch64.hpp"
#endif
#include CPU_HEADER(c1_MacroAssembler)
};

View File

@ -26,36 +26,10 @@
#define SHARE_VM_C1_C1_GLOBALS_HPP
#include "runtime/globals.hpp"
#ifdef TARGET_ARCH_x86
# include "c1_globals_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "c1_globals_sparc.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "c1_globals_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "c1_globals_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "c1_globals_aarch64.hpp"
#endif
#ifdef TARGET_OS_FAMILY_linux
# include "c1_globals_linux.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "c1_globals_solaris.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "c1_globals_windows.hpp"
#endif
#ifdef TARGET_OS_FAMILY_aix
# include "c1_globals_aix.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "c1_globals_bsd.hpp"
#endif
#include "utilities/macros.hpp"
#include CPU_HEADER(c1_globals)
#include OS_HEADER(c1_globals)
//
// Defines all global flags used by the client compiler.

View File

@ -5406,7 +5406,6 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
Symbol* name,
ClassLoaderData* loader_data,
Handle protection_domain,
TempNewSymbol* parsed_name,
const Klass* host_klass,
GrowableArray<Handle>* cp_patches,
Publicity pub_level,
@ -5416,7 +5415,6 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
_loader_data(loader_data),
_host_klass(host_klass),
_cp_patches(cp_patches),
_parsed_name(parsed_name),
_super_klass(),
_cp(NULL),
_fields(NULL),
@ -5657,15 +5655,6 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
Symbol* const class_name_in_cp = cp->klass_name_at(_this_class_index);
assert(class_name_in_cp != NULL, "class_name can't be null");
if (_parsed_name != NULL) {
// It's important to set parsed_name *before* resolving the super class.
// (it's used for cleanup by the caller if parsing fails)
*_parsed_name = class_name_in_cp;
// parsed_name is returned and can be used if there's an error, so add to
// its reference count. Caller will decrement the refcount.
(*_parsed_name)->increment_refcount();
}
// Update _class_name which could be null previously
// to reflect the name in the constant pool
_class_name = class_name_in_cp;
@ -5692,6 +5681,10 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
return;
}
// Verification prevents us from creating names with dots in them, this
// asserts that that's the case.
assert(is_internal_format(_class_name), "external class name format used internally");
if (!is_internal()) {
if (log_is_enabled(Debug, class, preorder)){
ResourceMark rm(THREAD);
@ -5900,3 +5893,20 @@ const ClassFileStream* ClassFileParser::clone_stream() const {
return _stream->clone();
}
// ----------------------------------------------------------------------------
// debugging
#ifdef ASSERT
// return true if class_name contains no '.' (internal format is '/')
bool ClassFileParser::is_internal_format(Symbol* class_name) {
if (class_name != NULL) {
ResourceMark rm;
char* name = class_name->as_C_string();
return strchr(name, '.') == NULL;
} else {
return true;
}
}
#endif

View File

@ -81,7 +81,6 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
mutable ClassLoaderData* _loader_data;
const Klass* _host_klass;
GrowableArray<Handle>* _cp_patches; // overrides for CP entries
TempNewSymbol* _parsed_name;
// Metadata created before the instance klass is created. Must be deallocated
// if not transferred to the InstanceKlass upon successful class loading
@ -475,7 +474,6 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
Symbol* name,
ClassLoaderData* loader_data,
Handle protection_domain,
TempNewSymbol* parsed_name,
const Klass* host_klass,
GrowableArray<Handle>* cp_patches,
Publicity pub_level,
@ -514,6 +512,11 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
bool is_internal() const { return INTERNAL == _pub_level; }
static bool verify_unqualified_name(const char* name, unsigned int length, int type);
#ifdef ASSERT
static bool is_internal_format(Symbol* class_name);
#endif
};
#endif // SHARE_VM_CLASSFILE_CLASSFILEPARSER_HPP

View File

@ -141,11 +141,11 @@ PerfCounter* ClassLoader::_isUnsyncloadClass = NULL;
PerfCounter* ClassLoader::_load_instance_class_failCounter = NULL;
GrowableArray<ModuleClassPathList*>* ClassLoader::_xpatch_entries = NULL;
ClassPathEntry* ClassLoader::_first_entry = NULL;
ClassPathEntry* ClassLoader::_last_entry = NULL;
int ClassLoader::_num_entries = 0;
GrowableArray<ModuleClassPathList*>* ClassLoader::_exploded_entries = NULL;
ClassPathEntry* ClassLoader::_jrt_entry = NULL;
ClassPathEntry* ClassLoader::_first_append_entry = NULL;
bool ClassLoader::_has_jimage = false;
ClassPathEntry* ClassLoader::_last_append_entry = NULL;
int ClassLoader::_num_entries = 0;
#if INCLUDE_CDS
GrowableArray<char*>* ClassLoader::_boot_modules_array = NULL;
GrowableArray<char*>* ClassLoader::_platform_modules_array = NULL;
@ -508,7 +508,7 @@ ClassFileStream* ClassPathImageEntry::open_stream(const char* name, TRAPS) {
#endif
} else {
PackageEntry* package_entry = get_package_entry(name, ClassLoaderData::the_null_class_loader_data(), THREAD);
PackageEntry* package_entry = get_package_entry(name, ClassLoaderData::the_null_class_loader_data(), CHECK_NULL);
if (package_entry != NULL) {
ResourceMark rm;
// Get the module name
@ -651,7 +651,6 @@ void ClassLoader::check_shared_classpath(const char *path) {
#endif
void ClassLoader::setup_bootstrap_search_path() {
assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
const char* sys_class_path = Arguments::get_sysclasspath();
const char* java_class_path = Arguments::get_appclasspath();
if (PrintSharedArchiveAndExit) {
@ -694,7 +693,10 @@ void ClassLoader::setup_xpatch_entries() {
GrowableArray<ModuleXPatchPath*>* xpatch_args = Arguments::get_xpatchprefix();
int num_of_entries = xpatch_args->length();
// Set up the boot loader's xpatch_entries list
assert(!DumpSharedSpaces, "DumpSharedSpaces not supported with -Xpatch");
assert(!UseSharedSpaces, "UseSharedSpaces not supported with -Xpatch");
// Set up the boot loader's _xpatch_entries list
_xpatch_entries = new (ResourceObj::C_HEAP, mtModule) GrowableArray<ModuleClassPathList*>(num_of_entries, true);
for (int i = 0; i < num_of_entries; i++) {
@ -742,10 +744,9 @@ void ClassLoader::setup_xpatch_entries() {
}
void ClassLoader::setup_search_path(const char *class_path, bool bootstrap_search) {
int offset = 0;
int len = (int)strlen(class_path);
int end = 0;
bool mark_append_entry = false;
bool set_base_piece = bootstrap_search;
// Iterate over class path entries
for (int start = 0; start < len; start = end) {
@ -754,21 +755,45 @@ void ClassLoader::setup_search_path(const char *class_path, bool bootstrap_searc
}
EXCEPTION_MARK;
ResourceMark rm(THREAD);
mark_append_entry = (mark_append_entry ||
(bootstrap_search && (start == Arguments::bootclassloader_append_index())));
char* path = NEW_RESOURCE_ARRAY(char, end - start + 1);
strncpy(path, &class_path[start], end - start);
path[end - start] = '\0';
update_class_path_entry_list(path, false, mark_append_entry, false, bootstrap_search);
// Check on the state of the boot loader's append path
if (mark_append_entry && (_first_append_entry == NULL)) {
// Failure to mark the first append entry, most likely
// due to a non-existent path. Record the next entry
// as the first boot loader append entry.
mark_append_entry = true;
// The first time through the bootstrap_search setup, it must be determined
// what the base or core piece of the boot loader search is. Either a java runtime
// image is present or this is an exploded module build situation.
if (set_base_piece) {
assert(string_ends_with(path, MODULES_IMAGE_NAME) || string_ends_with(path, "java.base"),
"Incorrect boot loader search path, no java runtime image or java.base exploded build");
struct stat st;
if (os::stat(path, &st) == 0) {
// Directory found
Thread* THREAD = Thread::current();
ClassPathEntry* new_entry = create_class_path_entry(path, &st, false, false, CHECK);
// Check for a jimage
if (Arguments::has_jimage()) {
assert(_jrt_entry == NULL, "should not setup bootstrap class search path twice");
assert(new_entry != NULL && new_entry->is_jrt(), "No java runtime image present");
_jrt_entry = new_entry;
++_num_entries;
#if INCLUDE_CDS
if (DumpSharedSpaces) {
JImageFile *jimage = _jrt_entry->jimage();
assert(jimage != NULL, "No java runtime image file present");
ClassLoader::initialize_module_loader_map(jimage);
}
#endif
}
} else {
// If path does not exist, exit
vm_exit_during_initialization("Unable to establish the boot loader search path", path);
}
set_base_piece = false;
} else {
mark_append_entry = false;
// Every entry on the system boot class path after the initial base piece,
// which is set by os::set_boot_path(), is considered an appended entry.
update_class_path_entry_list(path, false, bootstrap_search);
}
#if INCLUDE_CDS
@ -782,6 +807,45 @@ void ClassLoader::setup_search_path(const char *class_path, bool bootstrap_searc
}
}
// During an exploded modules build, each module defined to the boot loader
// will be added to the ClassLoader::_exploded_entries array.
void ClassLoader::add_to_exploded_build_list(Symbol* module_sym, TRAPS) {
assert(!ClassLoader::has_jrt_entry(), "Exploded build not applicable");
// Set up the boot loader's _exploded_entries list
if (_exploded_entries == NULL) {
_exploded_entries = new (ResourceObj::C_HEAP, mtModule) GrowableArray<ModuleClassPathList*>(EXPLODED_ENTRY_SIZE, true);
}
// Find the module's symbol
ResourceMark rm(THREAD);
const char *module_name = module_sym->as_C_string();
const char *home = Arguments::get_java_home();
const char file_sep = os::file_separator()[0];
// 10 represents the length of "modules" + 2 file separators + \0
size_t len = strlen(home) + strlen(module_name) + 10;
char *path = NEW_C_HEAP_ARRAY(char, len, mtModule);
jio_snprintf(path, len, "%s%cmodules%c%s", home, file_sep, file_sep, module_name);
struct stat st;
if (os::stat(path, &st) == 0) {
// Directory found
ClassPathEntry* new_entry = create_class_path_entry(path, &st, false, false, CHECK);
// If the path specification is valid, enter it into this module's list.
// There is no need to check for duplicate modules in the exploded entry list,
// since no two modules with the same name can be defined to the boot loader.
// This is checked at module definition time in Modules::define_module.
if (new_entry != NULL) {
ModuleClassPathList* module_cpl = new ModuleClassPathList(module_sym);
module_cpl->add_to_list(new_entry);
_exploded_entries->push(module_cpl);
log_info(class, load)("path: %s", path);
}
}
FREE_C_HEAP_ARRAY(char, path);
}
ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const struct stat* st,
bool throw_exception,
bool is_boot_append, TRAPS) {
@ -872,21 +936,9 @@ ClassPathZipEntry* ClassLoader::create_class_path_zip_entry(const char *path, bo
return NULL;
}
// The boot class loader must adhere to specfic visibility rules.
// Prior to loading a class in a named package, the package is checked
// to see if it is in a module defined to the boot loader. If the
// package is not in a module defined to the boot loader, the class
// must be loaded only in the boot loader's append path, which
// consists of [-Xbootclasspath/a]; [jvmti appended entries]
void ClassLoader::set_first_append_entry(ClassPathEntry *new_entry) {
if (_first_append_entry == NULL) {
_first_append_entry = new_entry;
}
}
// returns true if entry already on class path
bool ClassLoader::contains_entry(ClassPathEntry *entry) {
ClassPathEntry* e = _first_entry;
ClassPathEntry* e = _first_append_entry;
while (e != NULL) {
// assume zip entries have been canonicalized
if (strcmp(entry->name(), e->name()) == 0) {
@ -899,41 +951,24 @@ bool ClassLoader::contains_entry(ClassPathEntry *entry) {
void ClassLoader::add_to_list(ClassPathEntry *new_entry) {
if (new_entry != NULL) {
if (_last_entry == NULL) {
_first_entry = _last_entry = new_entry;
if (_last_append_entry == NULL) {
assert(_first_append_entry == NULL, "boot loader's append class path entry list not empty");
_first_append_entry = _last_append_entry = new_entry;
} else {
_last_entry->set_next(new_entry);
_last_entry = new_entry;
_last_append_entry->set_next(new_entry);
_last_append_entry = new_entry;
}
}
_num_entries ++;
}
void ClassLoader::prepend_to_list(ClassPathEntry *new_entry) {
if (new_entry != NULL) {
if (_last_entry == NULL) {
_first_entry = _last_entry = new_entry;
} else {
new_entry->set_next(_first_entry);
_first_entry = new_entry;
}
}
_num_entries ++;
_num_entries++;
}
void ClassLoader::add_to_list(const char *apath) {
update_class_path_entry_list((char*)apath, false, false, false, false);
}
void ClassLoader::prepend_to_list(const char *apath) {
update_class_path_entry_list((char*)apath, false, false, true, false);
update_class_path_entry_list((char*)apath, false, false);
}
// Returns true IFF the file/dir exists and the entry was successfully created.
bool ClassLoader::update_class_path_entry_list(const char *path,
bool check_for_duplicates,
bool mark_append_entry,
bool prepend_entry,
bool is_boot_append,
bool throw_exception) {
struct stat st;
@ -946,19 +981,10 @@ bool ClassLoader::update_class_path_entry_list(const char *path,
return false;
}
// Ensure that the first boot loader append entry will always be set correctly.
assert((!mark_append_entry ||
(mark_append_entry && (!check_for_duplicates || !contains_entry(new_entry)))),
"failed to mark boot loader's first append boundary");
// Do not reorder the bootclasspath which would break get_system_package().
// Add new entry to linked list
if (!check_for_duplicates || !contains_entry(new_entry)) {
ClassLoaderExt::add_class_path_entry(path, check_for_duplicates, new_entry, prepend_entry);
if (mark_append_entry) {
set_first_append_entry(new_entry);
}
ClassLoaderExt::add_class_path_entry(path, check_for_duplicates, new_entry);
}
return true;
} else {
@ -971,30 +997,47 @@ bool ClassLoader::update_class_path_entry_list(const char *path,
}
}
static void print_module_entry_table(const GrowableArray<ModuleClassPathList*>* const module_list) {
ResourceMark rm;
int num_of_entries = module_list->length();
for (int i = 0; i < num_of_entries; i++) {
ClassPathEntry* e;
ModuleClassPathList* mpl = module_list->at(i);
tty->print("%s=", mpl->module_name()->as_C_string());
e = mpl->module_first_entry();
while (e != NULL) {
tty->print("%s", e->name());
e = e->next();
if (e != NULL) {
tty->print("%s", os::path_separator());
}
}
tty->print(" ;");
}
}
void ClassLoader::print_bootclasspath() {
ClassPathEntry* e;
tty->print("[bootclasspath= ");
// Print -Xpatch module/path specifications first
if (_xpatch_entries != NULL) {
ResourceMark rm;
int num_of_entries = _xpatch_entries->length();
for (int i = 0; i < num_of_entries; i++) {
ModuleClassPathList* mpl = _xpatch_entries->at(i);
tty->print("%s=", mpl->module_name()->as_C_string());
e = mpl->module_first_entry();
while (e != NULL) {
tty->print("%s", e->name());
e = e->next();
if (e != NULL) {
tty->print("%s", os::path_separator());
}
}
tty->print(" ;");
print_module_entry_table(_xpatch_entries);
}
// [jimage | exploded modules build]
if (has_jrt_entry()) {
// Print the location of the java runtime image
tty->print("%s ;", _jrt_entry->name());
} else {
// Print exploded module build path specifications
if (_exploded_entries != NULL) {
print_module_entry_table(_exploded_entries);
}
}
e = _first_entry;
// appended entries
e = _first_append_entry;
while (e != NULL) {
tty->print("%s ;", e->name());
e = e->next();
@ -1298,6 +1341,60 @@ const char* ClassLoader::file_name_for_class_name(const char* class_name,
return file_name;
}
// Search either the xpatch or exploded build entries for class
ClassFileStream* ClassLoader::search_module_entries(const GrowableArray<ModuleClassPathList*>* const module_list,
const char* const class_name, const char* const file_name, TRAPS) {
ClassFileStream* stream = NULL;
// Find the class' defining module in the boot loader's module entry table
PackageEntry* pkg_entry = get_package_entry(class_name, ClassLoaderData::the_null_class_loader_data(), CHECK_NULL);
ModuleEntry* mod_entry = (pkg_entry != NULL) ? pkg_entry->module() : NULL;
// If the module system has not defined java.base yet, then
// classes loaded are assumed to be defined to java.base.
// When java.base is eventually defined by the module system,
// all packages of classes that have been previously loaded
// are verified in ModuleEntryTable::verify_javabase_packages().
if (!Universe::is_module_initialized() &&
!ModuleEntryTable::javabase_defined() &&
mod_entry == NULL) {
mod_entry = ModuleEntryTable::javabase_module();
}
// The module must be a named module
if (mod_entry != NULL && mod_entry->is_named()) {
int num_of_entries = module_list->length();
const Symbol* class_module_name = mod_entry->name();
// Loop through all the modules in either the xpatch or exploded entries looking for module
for (int i = 0; i < num_of_entries; i++) {
ModuleClassPathList* module_cpl = module_list->at(i);
Symbol* module_cpl_name = module_cpl->module_name();
if (module_cpl_name->fast_compare(class_module_name) == 0) {
// Class' module has been located, attempt to load
// the class from the module's ClassPathEntry list.
ClassPathEntry* e = module_cpl->module_first_entry();
while (e != NULL) {
stream = e->open_stream(file_name, CHECK_NULL);
// No context.check is required since CDS is not supported
// for an exploded modules build or if -Xpatch is specified.
if (NULL != stream) {
return stream;
}
e = e->next();
}
// If the module was located, break out even if the class was not
// located successfully from that module's ClassPathEntry list.
// There will not be another valid entry for that module.
return NULL;
}
}
}
return NULL;
}
instanceKlassHandle ClassLoader::load_class(Symbol* name, bool search_append_only, TRAPS) {
assert(name != NULL, "invariant");
assert(THREAD->is_Java_thread(), "must be a JavaThread");
@ -1321,18 +1418,19 @@ instanceKlassHandle ClassLoader::load_class(Symbol* name, bool search_append_onl
s2 classpath_index = 0;
ClassPathEntry* e = NULL;
// If DumpSharedSpaces is true, boot loader visibility boundaries are set
// to be _first_entry to the end (all path entries). No -Xpatch entries are
// included since CDS and AppCDS are not supported if -Xpatch is specified.
// If DumpSharedSpaces is true boot loader visibility boundaries are set to:
// - [jimage] + [_first_append_entry to _last_append_entry] (all path entries).
// No -Xpatch entries or exploded module builds are included since CDS
// is not supported if -Xpatch or exploded module builds are used.
//
// If search_append_only is true, boot loader visibility boundaries are
// set to be _first_append_entry to the end. This includes:
// [-Xbootclasspath/a]; [jvmti appended entries]
//
// If both DumpSharedSpaces and search_append_only are false, boot loader
// visibility boundaries are set to be _first_entry to the entry before
// the _first_append_entry. This would include:
// [-Xpatch:<module>=<file>(<pathsep><file>)*]; [exploded build | jimage]
// visibility boundaries are set to be the -Xpatch entries plus the base piece.
// This would include:
// [-Xpatch:<module>=<file>(<pathsep><file>)*]; [jimage | exploded module build]
//
// DumpSharedSpaces and search_append_only are mutually exclusive and cannot
// be true at the same time.
@ -1341,85 +1439,37 @@ instanceKlassHandle ClassLoader::load_class(Symbol* name, bool search_append_onl
// Load Attempt #1: -Xpatch
// Determine the class' defining module. If it appears in the _xpatch_entries,
// attempt to load the class from those locations specific to the module.
// Specifications to -Xpatch can contain a partial number of classes
// that are part of the overall module definition. So if a particular class is not
// found within its module specification, the search should continue to Load Attempt #2.
// Note: The -Xpatch entries are never searched if the boot loader's
// visibility boundary is limited to only searching the append entries.
if (_xpatch_entries != NULL && !search_append_only && !DumpSharedSpaces) {
// Find the module in the boot loader's module entry table
PackageEntry* pkg_entry = get_package_entry(class_name, ClassLoaderData::the_null_class_loader_data(), THREAD);
ModuleEntry* mod_entry = (pkg_entry != NULL) ? pkg_entry->module() : NULL;
// If the module system has not defined java.base yet, then
// classes loaded are assumed to be defined to java.base.
// When java.base is eventually defined by the module system,
// all packages of classes that have been previously loaded
// are verified in ModuleEntryTable::verify_javabase_packages().
if (!Universe::is_module_initialized() &&
!ModuleEntryTable::javabase_defined() &&
mod_entry == NULL) {
mod_entry = ModuleEntryTable::javabase_module();
}
// The module must be a named module
if (mod_entry != NULL && mod_entry->is_named()) {
int num_of_entries = _xpatch_entries->length();
const Symbol* class_module_name = mod_entry->name();
// Loop through all the xpatch entries looking for module
for (int i = 0; i < num_of_entries; i++) {
ModuleClassPathList* module_cpl = _xpatch_entries->at(i);
Symbol* module_cpl_name = module_cpl->module_name();
if (module_cpl_name->fast_compare(class_module_name) == 0) {
// Class' module has been located, attempt to load
// the class from the module's ClassPathEntry list.
e = module_cpl->module_first_entry();
while (e != NULL) {
stream = e->open_stream(file_name, CHECK_NULL);
// No context.check is required since both CDS
// and AppCDS are turned off if -Xpatch is specified.
if (NULL != stream) {
break;
}
e = e->next();
}
// If the module was located in the xpatch entries, break out
// even if the class was not located successfully from that module's
// ClassPathEntry list. There will not be another valid entry for
// that module in the _xpatch_entries array.
break;
}
}
}
stream = search_module_entries(_xpatch_entries, class_name, file_name, CHECK_NULL);
}
// Load Attempt #2: [exploded build | jimage]
// Load Attempt #2: [jimage | exploded build]
if (!search_append_only && (NULL == stream)) {
e = _first_entry;
while ((e != NULL) && (e != _first_append_entry)) {
stream = e->open_stream(file_name, CHECK_NULL);
if (has_jrt_entry()) {
e = _jrt_entry;
stream = _jrt_entry->open_stream(file_name, CHECK_NULL);
if (!context.check(stream, classpath_index)) {
return NULL;
}
if (NULL != stream) {
break;
}
e = e->next();
++classpath_index;
} else {
// Exploded build - attempt to locate class in its defining module's location.
assert(_exploded_entries != NULL, "No exploded build entries present");
stream = search_module_entries(_exploded_entries, class_name, file_name, CHECK_NULL);
}
}
// Load Attempt #3: [-Xbootclasspath/a]; [jvmti appended entries]
if ((search_append_only || DumpSharedSpaces) && (NULL == stream)) {
// For the boot loader append path search, must calculate
// the starting classpath_index prior to attempting to
// load the classfile.
if (search_append_only) {
ClassPathEntry *tmp_e = _first_entry;
while ((tmp_e != NULL) && (tmp_e != _first_append_entry)) {
tmp_e = tmp_e->next();
++classpath_index;
}
}
// For the boot loader append path search, the starting classpath_index
// for the appended piece is always 1 to account for either the
// _jrt_entry or the _exploded_entries.
assert(classpath_index == 0, "The classpath_index has been incremented incorrectly");
classpath_index = 1;
e = _first_append_entry;
while (e != NULL) {
@ -1453,7 +1503,6 @@ instanceKlassHandle ClassLoader::load_class(Symbol* name, bool search_append_onl
protection_domain,
NULL, // host_klass
NULL, // cp_patches
NULL, // parsed_name
THREAD);
if (HAS_PENDING_EXCEPTION) {
if (DumpSharedSpaces) {
@ -1597,16 +1646,25 @@ void classLoader_init1() {
}
// Complete the ClassPathEntry setup for the boot loader
void classLoader_init2() {
void ClassLoader::classLoader_init2(TRAPS) {
// Create the moduleEntry for java.base
create_javabase();
// Setup the list of module/path pairs for -Xpatch processing
// This must be done after the SymbolTable is created in order
// to use fast_compare on module names instead of a string compare.
if (Arguments::get_xpatchprefix() != NULL) {
ClassLoader::setup_xpatch_entries();
setup_xpatch_entries();
}
// Determine if this is an exploded build
ClassLoader::set_has_jimage();
// Setup the initial java.base/path pair for the exploded build entries.
// As more modules are defined during module system initialization, more
// entries will be added to the exploded build array.
if (!has_jrt_entry()) {
assert(!DumpSharedSpaces, "DumpSharedSpaces not supported with exploded module builds");
assert(!UseSharedSpaces, "UsedSharedSpaces not supported with exploded module builds");
add_to_exploded_build_list(vmSymbols::java_base(), CHECK);
}
}
@ -1654,26 +1712,6 @@ void ClassLoader::create_javabase() {
}
}
void ClassLoader::set_has_jimage() {
// Determine if this is an exploded build. When looking for
// the jimage file, only search the piece of the boot
// loader's boot class path which contains [exploded build | jimage].
// Do not search the boot loader's xpatch entries or append path.
ClassPathEntry* e = _first_entry;
ClassPathEntry* last_e = _first_append_entry;
while ((e != NULL) && (e != last_e)) {
JImageFile *jimage = e->jimage();
if (jimage != NULL && e->is_jrt()) {
_has_jimage = true;
#if INCLUDE_CDS
ClassLoader::initialize_module_loader_map(jimage);
#endif
return;
}
e = e->next();
}
}
#ifndef PRODUCT
// CompileTheWorld
@ -1762,14 +1800,19 @@ void ClassLoader::compile_the_world() {
HandleMark hm(THREAD);
ResourceMark rm(THREAD);
assert(has_jrt_entry(), "Compile The World not supported with exploded module build");
// Find bootstrap loader
Handle system_class_loader (THREAD, SystemDictionary::java_system_loader());
// Iterate over all bootstrap class path entries
ClassPathEntry* e = _first_entry;
jlong start = os::javaTimeMillis();
// Compile the world for the modular java runtime image
_jrt_entry->compile_the_world(system_class_loader, CATCH);
// Iterate over all bootstrap class path appended entries
ClassPathEntry* e = _first_append_entry;
while (e != NULL) {
// We stop at "modules" jimage, unless it is the first bootstrap path entry
if (e->is_jrt() && e != _first_entry) break;
assert(!e->is_jrt(), "A modular java runtime image is present on the list of appended entries");
e->compile_the_world(system_class_loader, CATCH);
e = e->next();
}

View File

@ -216,33 +216,34 @@ class ClassLoader: AllStatic {
// 1. the module/path pairs specified to -Xpatch
// -Xpatch:<module>=<file>(<pathsep><file>)*
// 2. the base piece
// [exploded build | jimage]
// [jimage | build with exploded modules]
// 3. boot loader append path
// [-Xbootclasspath/a]; [jvmti appended entries]
//
// The boot loader must obey this order when attempting
// to load a class.
// Contains the module/path pairs specified to -Xpatch
// 1. Contains the module/path pairs specified to -Xpatch
static GrowableArray<ModuleClassPathList*>* _xpatch_entries;
// Contains the ClassPathEntry instances that include
// both the base piece and the boot loader append path.
static ClassPathEntry* _first_entry;
// Last entry in linked list of ClassPathEntry instances
static ClassPathEntry* _last_entry;
static int _num_entries;
// 2. the base piece
// Contains the ClassPathEntry of the modular java runtime image.
// If no java runtime image is present, this indicates a
// build with exploded modules is being used instead.
static ClassPathEntry* _jrt_entry;
static GrowableArray<ModuleClassPathList*>* _exploded_entries;
enum { EXPLODED_ENTRY_SIZE = 80 }; // Initial number of exploded modules
// Marks the start of:
// - the boot loader's append path
// [-Xbootclasspath/a]; [jvmti appended entries]
// within the linked list of ClassPathEntry instances.
// 3. the boot loader's append path
// [-Xbootclasspath/a]; [jvmti appended entries]
// Note: boot loader append path does not support named modules.
static ClassPathEntry* _first_append_entry;
// Last entry in linked list of appended ClassPathEntry instances
static ClassPathEntry* _last_append_entry;
static const char* _shared_archive;
// True if the boot path has a "modules" jimage
static bool _has_jimage;
// Note: _num_entries includes the java runtime image and all
// the entries on the _first_append_entry linked list.
static int _num_entries;
// Array of module names associated with the boot class loader
CDS_ONLY(static GrowableArray<char*>* _boot_modules_array;)
@ -253,9 +254,14 @@ class ClassLoader: AllStatic {
// Info used by CDS
CDS_ONLY(static SharedPathsMiscInfo * _shared_paths_misc_info;)
// Initialization
// Initialization:
// - setup the boot loader's system class path
// - setup the boot loader's xpatch entries, if present
// - create the ModuleEntry for java.base
static void setup_bootstrap_search_path();
static void setup_search_path(const char *class_path, bool setting_bootstrap);
static void setup_xpatch_entries();
static void create_javabase();
static void load_zip_library();
static void load_jimage_library();
@ -285,8 +291,6 @@ class ClassLoader: AllStatic {
static int crc32(int crc, const char* buf, int len);
static bool update_class_path_entry_list(const char *path,
bool check_for_duplicates,
bool mark_append_entry,
bool prepend_entry,
bool is_boot_append,
bool throw_exception=true);
static void print_bootclasspath();
@ -352,15 +356,17 @@ class ClassLoader: AllStatic {
return _load_instance_class_failCounter;
}
// Set up the module/path pairs as specified to -Xpatch
static void setup_xpatch_entries();
// Modular java runtime image is present vs. a build with exploded modules
static bool has_jrt_entry() { return (_jrt_entry != NULL); }
static ClassPathEntry* get_jrt_entry() { return _jrt_entry; }
// Sets _has_jimage to TRUE if "modules" jimage file exists
static void set_has_jimage();
static bool has_jimage() { return _has_jimage; }
// Add a module's exploded directory to the boot loader's exploded module build list
static void add_to_exploded_build_list(Symbol* module_name, TRAPS);
// Create the ModuleEntry for java.base
static void create_javabase();
// Attempt load of individual class from either the xpatch or exploded modules build lists
static ClassFileStream* search_module_entries(const GrowableArray<ModuleClassPathList*>* const module_list,
const char* const class_name,
const char* const file_name, TRAPS);
// Load individual .class file
static instanceKlassHandle load_class(Symbol* class_name, bool search_append_only, TRAPS);
@ -381,17 +387,28 @@ class ClassLoader: AllStatic {
// Initialization
static void initialize();
static void classLoader_init2(TRAPS);
CDS_ONLY(static void initialize_shared_path();)
static int compute_Object_vtable();
static ClassPathEntry* classpath_entry(int n) {
ClassPathEntry* e = ClassLoader::_first_entry;
while (--n >= 0) {
assert(e != NULL, "Not that many classpath entries.");
e = e->next();
if (n == 0) {
assert(has_jrt_entry(), "No class path entry at 0 for exploded module builds");
return ClassLoader::_jrt_entry;
} else {
// The java runtime image is always the first entry
// in the FileMapInfo::_classpath_entry_table. Even though
// the _jrt_entry is not included in the _first_append_entry
// linked list, it must be accounted for when comparing the
// class path vs. the shared archive class path.
ClassPathEntry* e = ClassLoader::_first_append_entry;
while (--n >= 1) {
assert(e != NULL, "Not that many classpath entries.");
e = e->next();
}
return e;
}
return e;
}
#if INCLUDE_CDS
@ -429,18 +446,12 @@ class ClassLoader: AllStatic {
// adds a class path list
static void add_to_list(ClassPathEntry* new_entry);
// prepends a class path list
static void prepend_to_list(ClassPathEntry* new_entry);
// creates a class path zip entry (returns NULL if JAR file cannot be opened)
static ClassPathZipEntry* create_class_path_zip_entry(const char *apath, bool is_boot_append);
// add a path to class path list
static void add_to_list(const char* apath);
// prepend a path to class path list
static void prepend_to_list(const char* apath);
static bool string_ends_with(const char* str, const char* str_to_find);
// obtain package name from a fully qualified class name

View File

@ -71,22 +71,11 @@ public:
static void add_class_path_entry(const char* path, bool check_for_duplicates,
ClassPathEntry* new_entry, bool prepend_entry) {
if (prepend_entry) {
ClassLoader::prepend_to_list(new_entry);
} else {
ClassLoader::add_to_list(new_entry);
}
ClassPathEntry* new_entry) {
ClassLoader::add_to_list(new_entry);
}
static void append_boot_classpath(ClassPathEntry* new_entry) {
ClassLoader::add_to_list(new_entry);
// During jvmti live phase an entry can be appended to the boot
// loader's ClassPathEntry instances. Need to mark the start
// of the boot loader's append path in case there was no reason
// to mark it initially in setup_bootstrap_search_path.
if (ClassLoader::_first_append_entry == NULL) {
ClassLoader::set_first_append_entry(new_entry);
}
}
static void setup_search_paths() {}
static bool is_boot_classpath(int classpath_index) {

View File

@ -396,14 +396,15 @@ void Dictionary::add_klass(Symbol* class_name, ClassLoaderData* loader_data,
DictionaryEntry* Dictionary::get_entry(int index, unsigned int hash,
Symbol* class_name,
ClassLoaderData* loader_data) {
debug_only(_lookup_count++);
DEBUG_ONLY(_lookup_count++);
for (DictionaryEntry* entry = bucket(index);
entry != NULL;
entry = entry->next()) {
if (entry->hash() == hash && entry->equals(class_name, loader_data)) {
DEBUG_ONLY(bucket_count_hit(index));
return entry;
}
debug_only(_lookup_length++);
DEBUG_ONLY(_lookup_length++);
}
return NULL;
}
@ -596,7 +597,7 @@ void ProtectionDomainCacheTable::verify() {
}
guarantee(number_of_entries() == element_count,
"Verify of protection domain cache table failed");
debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
DEBUG_ONLY(verify_lookup_length((double)number_of_entries() / table_size()));
}
void ProtectionDomainCacheEntry::verify() {
@ -737,19 +738,65 @@ void Dictionary::print(bool details) {
table_size(), number_of_entries());
tty->print_cr("^ indicates that initiating loader is different from "
"defining loader");
tty->print_cr("1st number: th bucket index");
tty->print_cr("2nd number: the entry's index within this bucket");
#ifdef ASSERT
tty->print_cr("3rd number: the hit percentage of this entry");
tty->print_cr("4th number: the hash index of this entry");
#endif
}
#ifdef ASSERT
// find top buckets with highest lookup count
#define TOP_COUNT 16
int topItemsIndicies[TOP_COUNT];
for (int i = 0; i < TOP_COUNT; i++) {
topItemsIndicies[i] = i;
}
double total = 0.0;
for (int i = 0; i < table_size(); i++) {
// find the total count number, so later on we can
// express bucket lookup count as a percentage of all lookups
unsigned value = bucket_hits(i);
total += value;
// find the entry with min value
int index = 0;
unsigned min = bucket_hits(topItemsIndicies[index]);
for (int j = 1; j < TOP_COUNT; j++) {
if (bucket_hits(topItemsIndicies[j]) < min) {
min = bucket_hits(topItemsIndicies[j]);
index = j;
}
}
// if the bucket loookup value is bigger than the current min
// move that bucket index into the top list
if (value > min) {
topItemsIndicies[index] = i;
}
}
#endif
for (int index = 0; index < table_size(); index++) {
#ifdef ASSERT
double percentage = 100.0 * (double)bucket_hits(index)/total;
#endif
int chain = 0;
for (DictionaryEntry* probe = bucket(index);
probe != NULL;
probe = probe->next()) {
if (Verbose) tty->print("%4d: ", index);
Klass* e = probe->klass();
ClassLoaderData* loader_data = probe->loader_data();
bool is_defining_class =
(loader_data == e->class_loader_data());
if (details) {
tty->print("%4d: %3d: ", index, chain);
#ifdef ASSERT
tty->print("%5.2f%%: %10u:", percentage, probe->hash());
#endif
}
tty->print("%s%s", ((!details) || is_defining_class) ? " " : "^",
e->external_name());
e->external_name());
if (details) {
tty->print(", loader ");
@ -760,9 +807,30 @@ void Dictionary::print(bool details) {
}
}
tty->cr();
chain++;
}
if (details && (chain == 0)) {
tty->print("%4d:", index);
tty->cr();
}
}
#ifdef ASSERT
// print out the TOP_COUNT of buckets with highest lookup count (unsorted)
if (details) {
tty->cr();
tty->print("Top %d buckets:", TOP_COUNT);
tty->cr();
for (int i = 0; i < TOP_COUNT; i++) {
tty->print("%4d: hits %5.2f%%",
topItemsIndicies[i],
100.0*(double)bucket_hits(topItemsIndicies[i])/total);
tty->cr();
}
}
#endif
if (details) {
tty->cr();
_pd_cache_table->print();
@ -795,7 +863,7 @@ void Dictionary::verify() {
}
guarantee(number_of_entries() == element_count,
"Verify of system dictionary failed");
debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
DEBUG_ONLY(if (!verify_lookup_length((double)number_of_entries() / table_size())) this->print(true));
_pd_cache_table->verify();
}

View File

@ -777,9 +777,6 @@ class java_lang_reflect_Module {
static Handle create(Handle loader, Handle module_name, TRAPS);
// Testers
static bool is_subclass(Klass* klass) {
return klass->is_subclass_of(SystemDictionary::reflect_Module_klass());
}
static bool is_instance(oop obj);
// Accessors

View File

@ -168,11 +168,8 @@ inline bool java_lang_invoke_DirectMethodHandle::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
}
inline bool java_lang_reflect_Module::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
return obj != NULL && obj->klass() == SystemDictionary::reflect_Module_klass();
}
inline int Backtrace::merge_bci_and_version(int bci, int version) {

View File

@ -96,7 +96,6 @@ instanceKlassHandle KlassFactory::create_from_stream(ClassFileStream* stream,
Handle protection_domain,
const Klass* host_klass,
GrowableArray<Handle>* cp_patches,
TempNewSymbol* parsed_name,
TRAPS) {
assert(stream != NULL, "invariant");
@ -123,7 +122,6 @@ instanceKlassHandle KlassFactory::create_from_stream(ClassFileStream* stream,
name,
loader_data,
protection_domain,
parsed_name,
host_klass,
cp_patches,
ClassFileParser::BROADCAST, // publicity level

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,7 +74,6 @@ class KlassFactory : AllStatic {
Handle protection_domain,
const Klass* host_klass,
GrowableArray<Handle>* cp_patches,
TempNewSymbol* parsed_name,
TRAPS);
};

View File

@ -102,7 +102,8 @@ static PackageEntryTable* get_package_entry_table(Handle h_loader, TRAPS) {
static ModuleEntry* get_module_entry(jobject module, TRAPS) {
Handle module_h(THREAD, JNIHandles::resolve(module));
if (!java_lang_reflect_Module::is_instance(module_h())) {
THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(), "Bad module object");
THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(),
"module is not an instance of type java.lang.reflect.Module");
}
return java_lang_reflect_Module::module_entry(module_h(), CHECK_NULL);
}
@ -133,36 +134,6 @@ static PackageEntry* get_package_entry_by_name(Symbol* package,
return NULL;
}
// If using exploded build, append <java.home>/modules/module_name, if it exists,
// to the system boot class path in order for the boot loader to locate class files.
static void add_to_exploded_build_list(char *module_name, TRAPS) {
assert(!ClassLoader::has_jimage(), "Exploded build not applicable");
// java.base is handled by os::set_boot_path
assert(strcmp(module_name, "java.base") != 0, "Unexpected java.base module name");
char file_sep = os::file_separator()[0];
size_t module_len = strlen(module_name);
const char* home = Arguments::get_java_home();
size_t len = strlen(home) + module_len + 32;
char* path = NEW_C_HEAP_ARRAY(char, len, mtModule);
jio_snprintf(path, len, "%s%cmodules%c%s", home, file_sep, file_sep, module_name);
struct stat st;
// See if exploded module path exists
if ((os::stat(path, &st) != 0)) {
FREE_C_HEAP_ARRAY(char, path);
path = NULL;
}
if (path != NULL) {
HandleMark hm;
Handle loader_lock = Handle(THREAD, SystemDictionary::system_loader_lock());
ObjectLocker ol(loader_lock, THREAD);
log_info(class, load)("opened: %s", path);
ClassLoader::add_to_list(path);
}
}
bool Modules::is_package_defined(Symbol* package, Handle h_loader, TRAPS) {
PackageEntry* res = get_package_entry_by_name(package, h_loader, CHECK_false);
return res != NULL;
@ -297,9 +268,9 @@ void Modules::define_module(jobject module, jstring version,
THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Null module object");
}
Handle module_handle(THREAD, JNIHandles::resolve(module));
if (!java_lang_reflect_Module::is_subclass(module_handle->klass())) {
if (!java_lang_reflect_Module::is_instance(module_handle())) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"module is not a subclass of java.lang.reflect.Module");
"module is not an instance of type java.lang.reflect.Module");
}
char* module_name = get_module_name(module_handle(), CHECK);
@ -470,8 +441,8 @@ void Modules::define_module(jobject module, jstring version,
// used, prepend <java.home>/modules/modules_name, if it exists, to the system boot class path.
if (loader == NULL &&
!Universe::is_module_initialized() &&
!ClassLoader::has_jimage()) {
add_to_exploded_build_list(module_name, CHECK);
!ClassLoader::has_jrt_entry()) {
ClassLoader::add_to_exploded_build_list(module_symbol, CHECK);
}
}
@ -482,9 +453,9 @@ void Modules::set_bootloader_unnamed_module(jobject module, TRAPS) {
THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Null module object");
}
Handle module_handle(THREAD, JNIHandles::resolve(module));
if (!java_lang_reflect_Module::is_subclass(module_handle->klass())) {
if (!java_lang_reflect_Module::is_instance(module_handle())) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
"module is not a subclass of java.lang.reflect.Module");
"module is not an instance of type java.lang.reflect.Module");
}
// Ensure that this is an unnamed module
@ -758,7 +729,7 @@ jobject Modules::get_module(jclass clazz, TRAPS) {
oop module = java_lang_Class::module(mirror);
assert(module != NULL, "java.lang.Class module field not set");
assert(java_lang_reflect_Module::is_subclass(module->klass()), "Module is not a java.lang.reflect.Module");
assert(java_lang_reflect_Module::is_instance(module), "module is not an instance of type java.lang.reflect.Module");
if (log_is_enabled(Debug, modules)) {
ResourceMark rm(THREAD);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@
#include "runtime/os.hpp"
// During dumping time, when processing class paths, we build up the dump-time
// classpath. The JAR files that exist are stored in the list ClassLoader::_first_entry.
// classpath. The JAR files that exist are stored in the list ClassLoader::_first_append_entry.
// However, we need to store other "misc" information for run-time checking, such as
//
// + The values of Arguments::get_sysclasspath() used during dumping.

View File

@ -70,7 +70,6 @@
#include "services/threadService.hpp"
#include "trace/traceMacros.hpp"
#include "utilities/macros.hpp"
#include "utilities/stringUtils.hpp"
#include "utilities/ticks.hpp"
#if INCLUDE_CDS
#include "classfile/sharedClassUtil.hpp"
@ -139,24 +138,6 @@ ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, TRAPS) {
return ClassLoaderDataGraph::find_or_create(class_loader, THREAD);
}
// ----------------------------------------------------------------------------
// debugging
#ifdef ASSERT
// return true if class_name contains no '.' (internal format is '/')
bool SystemDictionary::is_internal_format(Symbol* class_name) {
if (class_name != NULL) {
ResourceMark rm;
char* name = class_name->as_C_string();
return strchr(name, '.') == NULL;
} else {
return true;
}
}
#endif
// ----------------------------------------------------------------------------
// Parallel class loading check
@ -335,6 +316,10 @@ Klass* SystemDictionary::resolve_array_class_or_null(Symbol* class_name,
// Must be called, even if superclass is null, since this is
// where the placeholder entry is created which claims this
// thread is loading this class/classloader.
// Be careful when modifying this code: once you have run
// placeholders()->find_and_add(PlaceholderTable::LOAD_SUPER),
// you need to find_and_remove it before returning.
// So be careful to not exit with a CHECK_ macro betweeen these calls.
Klass* SystemDictionary::resolve_super_or_fail(Symbol* child_name,
Symbol* class_name,
Handle class_loader,
@ -399,6 +384,7 @@ Klass* SystemDictionary::resolve_super_or_fail(Symbol* child_name,
}
}
if (!throw_circularity_error) {
// Be careful not to exit resolve_super
PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, child_name, loader_data, PlaceholderTable::LOAD_SUPER, class_name, THREAD);
}
}
@ -655,6 +641,24 @@ static void post_class_load_event(const Ticks& start_time,
#endif // INCLUDE_TRACE
}
// utility function for class define event
static void class_define_event(instanceKlassHandle k) {
#if INCLUDE_TRACE
EventClassDefine event(UNTIMED);
if (event.should_commit()) {
event.set_definedClass(k());
oop defining_class_loader = k->class_loader();
event.set_definingClassLoader(defining_class_loader != NULL ?
defining_class_loader->klass() : (Klass*)NULL);
event.commit();
}
#endif // INCLUDE_TRACE
}
// Be careful when modifying this code: once you have run
// placeholders()->find_and_add(PlaceholderTable::LOAD_INSTANCE),
// you need to find_and_remove it before returning.
// So be careful to not exit with a CHECK_ macro betweeen these calls.
Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
Handle class_loader,
Handle protection_domain,
@ -1016,8 +1020,9 @@ Klass* SystemDictionary::find_instance_or_array_klass(Symbol* class_name,
}
// Note: this method is much like resolve_from_stream, but
// updates no supplemental data structures.
// TODO consolidate the two methods with a helper routine?
// does not publish the classes via the SystemDictionary.
// Handles unsafe_DefineAnonymousClass and redefineclasses
// RedefinedClasses do not add to the class hierarchy
Klass* SystemDictionary::parse_stream(Symbol* class_name,
Handle class_loader,
Handle protection_domain,
@ -1054,8 +1059,7 @@ Klass* SystemDictionary::parse_stream(Symbol* class_name,
protection_domain,
host_klass,
cp_patches,
NULL, // parsed_name
THREAD);
CHECK_NULL);
if (host_klass != NULL && k.not_null()) {
// If it's anonymous, initialize it now, since nobody else will.
@ -1126,8 +1130,6 @@ Klass* SystemDictionary::resolve_from_stream(Symbol* class_name,
// already be present in the SystemDictionary, otherwise we would not
// throw potential ClassFormatErrors.
//
// Note: "parsed_name" is updated.
TempNewSymbol parsed_name = NULL;
instanceKlassHandle k;
@ -1139,9 +1141,7 @@ Klass* SystemDictionary::resolve_from_stream(Symbol* class_name,
CHECK_NULL);
#endif
if (k.not_null()) {
parsed_name = k->name();
} else {
if (k.is_null()) {
if (st->buffer() == NULL) {
return NULL;
}
@ -1151,64 +1151,28 @@ Klass* SystemDictionary::resolve_from_stream(Symbol* class_name,
protection_domain,
NULL, // host_klass
NULL, // cp_patches
&parsed_name,
THREAD);
CHECK_NULL);
}
const char* pkg = "java/";
if (!HAS_PENDING_EXCEPTION &&
!class_loader.is_null() &&
!SystemDictionary::is_platform_class_loader(class_loader) &&
parsed_name != NULL &&
!strncmp((const char*)parsed_name->bytes(), pkg, strlen(pkg))) {
// It is illegal to define classes in the "java." package from
// JVM_DefineClass or jni_DefineClass unless you're the bootclassloader
ResourceMark rm(THREAD);
TempNewSymbol pkg_name = InstanceKlass::package_from_name(parsed_name, CHECK_NULL);
assert(pkg_name != NULL, "Error in parsing package name starting with 'java/'");
char* name = pkg_name->as_C_string();
StringUtils::replace_no_expand(name, "/", ".");
const char* msg_text = "Prohibited package name: ";
size_t len = strlen(msg_text) + strlen(name) + 1;
char* message = NEW_RESOURCE_ARRAY(char, len);
jio_snprintf(message, len, "%s%s", msg_text, name);
Exceptions::_throw_msg(THREAD_AND_LOCATION,
vmSymbols::java_lang_SecurityException(), message);
}
assert(k.not_null(), "no klass created");
Symbol* h_name = k->name();
assert(class_name == NULL || class_name == h_name, "name mismatch");
if (!HAS_PENDING_EXCEPTION) {
assert(parsed_name != NULL, "Sanity");
assert(class_name == NULL || class_name == parsed_name, "name mismatch");
// Verification prevents us from creating names with dots in them, this
// asserts that that's the case.
assert(is_internal_format(parsed_name),
"external class name format used internally");
// Add class just loaded
// If a class loader supports parallel classloading handle parallel define requests
// find_or_define_instance_class may return a different InstanceKlass
if (is_parallelCapable(class_loader)) {
k = find_or_define_instance_class(class_name, class_loader, k, THREAD);
} else {
define_instance_class(k, THREAD);
}
// Add class just loaded
// If a class loader supports parallel classloading handle parallel define requests
// find_or_define_instance_class may return a different InstanceKlass
if (is_parallelCapable(class_loader)) {
k = find_or_define_instance_class(h_name, class_loader, k, CHECK_NULL);
} else {
define_instance_class(k, CHECK_NULL);
}
// Make sure we have an entry in the SystemDictionary on success
debug_only( {
if (!HAS_PENDING_EXCEPTION) {
assert(parsed_name != NULL, "parsed_name is still null?");
Symbol* h_name = k->name();
ClassLoaderData *defining_loader_data = k->class_loader_data();
MutexLocker mu(SystemDictionary_lock, THREAD);
MutexLocker mu(SystemDictionary_lock, THREAD);
Klass* check = find_class(parsed_name, loader_data);
assert(check == k(), "should be present in the dictionary");
Klass* check2 = find_class(h_name, defining_loader_data);
assert(check == check2, "name inconsistancy in SystemDictionary");
}
Klass* check = find_class(h_name, k->class_loader_data());
assert(check == k(), "should be present in the dictionary");
} );
return k();
@ -1246,12 +1210,16 @@ Klass* SystemDictionary::find_shared_class(Symbol* class_name) {
instanceKlassHandle SystemDictionary::load_shared_class(
Symbol* class_name, Handle class_loader, TRAPS) {
instanceKlassHandle ik (THREAD, find_shared_class(class_name));
// Make sure we only return the boot class for the NULL classloader.
if (ik.not_null() &&
ik->is_shared_boot_class() && class_loader.is_null()) {
Handle protection_domain;
return load_shared_class(ik, class_loader, protection_domain, THREAD);
// Don't load shared class when JvmtiExport::should_post_class_file_load_hook()
// is enabled since posting CFLH is not supported when loading shared class.
if (!JvmtiExport::should_post_class_file_load_hook()) {
instanceKlassHandle ik (THREAD, find_shared_class(class_name));
// Make sure we only return the boot class for the NULL classloader.
if (ik.not_null() &&
ik->is_shared_boot_class() && class_loader.is_null()) {
Handle protection_domain;
return load_shared_class(ik, class_loader, protection_domain, THREAD);
}
}
return instanceKlassHandle();
}
@ -1334,8 +1302,14 @@ bool SystemDictionary::is_shared_class_visible(Symbol* class_name,
instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik,
Handle class_loader,
Handle protection_domain, TRAPS) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
if (JvmtiExport::should_post_class_file_load_hook()) {
// Don't load shared class when JvmtiExport::should_post_class_file_load_hook()
// is enabled since posting CFLH is not supported when loading shared class.
return nh;
}
if (ik.not_null()) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
Symbol* class_name = ik->name();
bool visible = is_shared_class_visible(
@ -1400,6 +1374,8 @@ instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik,
Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
check_loader_lock_contention(lockObject, THREAD);
ObjectLocker ol(lockObject, THREAD, true);
// prohibited package check assumes all classes loaded from archive call
// restore_unshareable_info which calls ik->set_package()
ik->restore_unshareable_info(loader_data, protection_domain, CHECK_(nh));
}
@ -1665,9 +1641,8 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) {
JvmtiExport::post_class_load((JavaThread *) THREAD, k());
}
TRACE_KLASS_DEFINITION(k, THREAD);
class_define_event(k);
}
// Support parallel classloading
@ -1686,6 +1661,10 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) {
// findClass(), i.e. FindLoadedClass/DefineClassIfAbsent or they
// potentially waste time reading and parsing the bytestream.
// Note: VM callers should ensure consistency of k/class_name,class_loader
// Be careful when modifying this code: once you have run
// placeholders()->find_and_add(PlaceholderTable::DEFINE_CLASS),
// you need to find_and_remove it before returning.
// So be careful to not exit with a CHECK_ macro betweeen these calls.
instanceKlassHandle SystemDictionary::find_or_define_instance_class(Symbol* class_name, Handle class_loader, instanceKlassHandle k, TRAPS) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
@ -2125,7 +2104,7 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) {
// Create the ModuleEntry for java.base. This call needs to be done here,
// after vmSymbols::initialize() is called but before any classes are pre-loaded.
ClassLoader::create_javabase();
ClassLoader::classLoader_init2(CHECK);
// Preload commonly used klasses
WKID scan = FIRST_WKID;

View File

@ -281,6 +281,7 @@ public:
// Parse new stream. This won't update the system dictionary or
// class hierarchy, simply parse the stream. Used by JVMTI RedefineClasses.
// Also used by Unsafe_DefineAnonymousClass
static Klass* parse_stream(Symbol* class_name,
Handle class_loader,
Handle protection_domain,
@ -413,10 +414,6 @@ public:
// Verification
static void verify();
#ifdef ASSERT
static bool is_internal_format(Symbol* class_name);
#endif
// Initialization
static void initialize(TRAPS);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,23 +25,8 @@
#ifndef SHARE_VM_CODE_NATIVEINST_HPP
#define SHARE_VM_CODE_NATIVEINST_HPP
#ifdef TARGET_ARCH_x86
# include "nativeInst_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "nativeInst_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "nativeInst_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "nativeInst_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "nativeInst_aarch64.hpp"
#endif
#include "utilities/macros.hpp"
#include CPU_HEADER(nativeInst)
#endif // SHARE_VM_CODE_NATIVEINST_HPP

View File

@ -51,21 +51,6 @@
#include "utilities/events.hpp"
#include "utilities/xmlstream.hpp"
#include "logging/log.hpp"
#ifdef TARGET_ARCH_x86
# include "nativeInst_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "nativeInst_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "nativeInst_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "nativeInst_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "nativeInst_ppc.hpp"
#endif
#ifdef SHARK
#include "shark/sharkCompiler.hpp"
#endif

View File

@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "runtime/os.hpp"
#include "utilities/macros.hpp"
class nmethod;
class CompiledMethod;
@ -423,24 +424,7 @@ class relocInfo VALUE_OBJ_CLASS_SPEC {
static void remove_reloc_info_for_address(RelocIterator *itr, address pc, relocType old_type);
// Machine dependent stuff
#ifdef TARGET_ARCH_x86
# include "relocInfo_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "relocInfo_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "relocInfo_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "relocInfo_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "relocInfo_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "relocInfo_aarch64.hpp"
#endif
#include CPU_HEADER(relocInfo)
protected:
// Derived constant, based on format_width which is PD:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "asm/register.hpp"
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/ostream.hpp"
#ifdef COMPILER2
#include "opto/adlcVMDeps.hpp"
@ -139,25 +140,7 @@ public:
static void set_regName();
#ifdef TARGET_ARCH_x86
# include "vmreg_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "vmreg_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "vmreg_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "vmreg_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "vmreg_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "vmreg_aarch64.hpp"
#endif
#include CPU_HEADER(vmreg)
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,23 +27,6 @@
#include "asm/register.hpp"
#include "code/vmreg.hpp"
#ifdef TARGET_ARCH_x86
# include "vmreg_x86.inline.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "vmreg_sparc.inline.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "vmreg_zero.inline.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "vmreg_arm.inline.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "vmreg_ppc.inline.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "vmreg_aarch64.inline.hpp"
#endif
#include CPU_HEADER_INLINE(vmreg)
#endif // SHARE_VM_CODE_VMREG_INLINE_HPP

View File

@ -35,24 +35,7 @@
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#ifdef TARGET_ARCH_x86
# include "depChecker_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "depChecker_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "depChecker_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "depChecker_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "depChecker_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "depChecker_aarch64.hpp"
#endif
#include CPU_HEADER(depChecker)
#ifdef SHARK
#include "shark/sharkEntry.hpp"
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "asm/codeBuffer.hpp"
#include "runtime/globals.hpp"
#include "utilities/macros.hpp"
class decode_env;
@ -63,25 +64,7 @@ class Disassembler {
static bool load_library();
// Machine dependent stuff
#ifdef TARGET_ARCH_x86
# include "disassembler_x86.hpp"
#endif
#ifdef TARGET_ARCH_sparc
# include "disassembler_sparc.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "disassembler_zero.hpp"
#endif
#ifdef TARGET_ARCH_arm
# include "disassembler_arm.hpp"
#endif
#ifdef TARGET_ARCH_ppc
# include "disassembler_ppc.hpp"
#endif
#ifdef TARGET_ARCH_aarch64
# include "disassembler_aarch64.hpp"
#endif
#include CPU_HEADER(disassembler)
public:
static bool can_decode() {

View File

@ -122,7 +122,7 @@ DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
// Determines how many mutator threads can process the buffers in parallel.
uint DirtyCardQueueSet::num_par_ids() {
return (uint)os::processor_count();
return (uint)os::initial_active_processor_count();
}
void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl,

View File

@ -4524,7 +4524,8 @@ void G1CollectedHeap::record_obj_copy_mem_stats() {
void G1CollectedHeap::free_region(HeapRegion* hr,
FreeRegionList* free_list,
bool par,
bool skip_remset,
bool skip_hot_card_cache,
bool locked) {
assert(!hr->is_free(), "the region should not be free");
assert(!hr->is_empty(), "the region should not be empty");
@ -4539,20 +4540,20 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
// Clear the card counts for this region.
// Note: we only need to do this if the region is not young
// (since we don't refine cards in young regions).
if (!hr->is_young()) {
if (!skip_hot_card_cache && !hr->is_young()) {
_hot_card_cache->reset_card_counts(hr);
}
hr->hr_clear(par, true /* clear_space */, locked /* locked */);
hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);
free_list->add_ordered(hr);
}
void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
FreeRegionList* free_list,
bool par) {
bool skip_remset) {
assert(hr->is_humongous(), "this is only for humongous regions");
assert(free_list != NULL, "pre-condition");
hr->clear_humongous();
free_region(hr, free_list, par);
free_region(hr, free_list, skip_remset);
}
void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
@ -4600,137 +4601,280 @@ void G1CollectedHeap::scrub_rem_set() {
workers()->run_task(&g1_par_scrub_rs_task);
}
class G1FreeCollectionSetClosure : public HeapRegionClosure {
class G1FreeCollectionSetTask : public AbstractGangTask {
private:
// Closure applied to all regions in the collection set to do work that needs to
// be done serially in a single thread.
class G1SerialFreeCollectionSetClosure : public HeapRegionClosure {
private:
EvacuationInfo* _evacuation_info;
const size_t* _surviving_young_words;
// Bytes used in successfully evacuated regions before the evacuation.
size_t _before_used_bytes;
// Bytes used in unsucessfully evacuated regions before the evacuation
size_t _after_used_bytes;
size_t _bytes_allocated_in_old_since_last_gc;
size_t _failure_used_words;
size_t _failure_waste_words;
FreeRegionList _local_free_list;
public:
G1SerialFreeCollectionSetClosure(EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
HeapRegionClosure(),
_evacuation_info(evacuation_info),
_surviving_young_words(surviving_young_words),
_before_used_bytes(0),
_after_used_bytes(0),
_bytes_allocated_in_old_since_last_gc(0),
_failure_used_words(0),
_failure_waste_words(0),
_local_free_list("Local Region List for CSet Freeing") {
}
virtual bool doHeapRegion(HeapRegion* r) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
g1h->clear_in_cset(r);
if (r->is_young()) {
assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(),
"Young index %d is wrong for region %u of type %s with %u young regions",
r->young_index_in_cset(),
r->hrm_index(),
r->get_type_str(),
g1h->collection_set()->young_region_length());
size_t words_survived = _surviving_young_words[r->young_index_in_cset()];
r->record_surv_words_in_group(words_survived);
}
if (!r->evacuation_failed()) {
assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
_before_used_bytes += r->used();
g1h->free_region(r,
&_local_free_list,
true, /* skip_remset */
true, /* skip_hot_card_cache */
true /* locked */);
} else {
r->uninstall_surv_rate_group();
r->set_young_index_in_cset(-1);
r->set_evacuation_failed(false);
// When moving a young gen region to old gen, we "allocate" that whole region
// there. This is in addition to any already evacuated objects. Notify the
// policy about that.
// Old gen regions do not cause an additional allocation: both the objects
// still in the region and the ones already moved are accounted for elsewhere.
if (r->is_young()) {
_bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
}
// The region is now considered to be old.
r->set_old();
// Do some allocation statistics accounting. Regions that failed evacuation
// are always made old, so there is no need to update anything in the young
// gen statistics, but we need to update old gen statistics.
size_t used_words = r->marked_bytes() / HeapWordSize;
_failure_used_words += used_words;
_failure_waste_words += HeapRegion::GrainWords - used_words;
g1h->old_set_add(r);
_after_used_bytes += r->used();
}
return false;
}
void complete_work() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
_evacuation_info->set_regions_freed(_local_free_list.length());
_evacuation_info->increment_collectionset_used_after(_after_used_bytes);
g1h->prepend_to_freelist(&_local_free_list);
g1h->decrement_summary_bytes(_before_used_bytes);
G1Policy* policy = g1h->g1_policy();
policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
}
};
G1CollectionSet* _collection_set;
G1SerialFreeCollectionSetClosure _cl;
const size_t* _surviving_young_words;
FreeRegionList _local_free_list;
size_t _rs_lengths;
// Bytes used in successfully evacuated regions before the evacuation.
size_t _before_used_bytes;
// Bytes used in unsucessfully evacuated regions before the evacuation
size_t _after_used_bytes;
size_t _bytes_allocated_in_old_since_last_gc;
volatile jint _serial_work_claim;
size_t _failure_used_words;
size_t _failure_waste_words;
struct WorkItem {
uint region_idx;
bool is_young;
bool evacuation_failed;
double _young_time;
double _non_young_time;
public:
G1FreeCollectionSetClosure(const size_t* surviving_young_words) :
HeapRegionClosure(),
_surviving_young_words(surviving_young_words),
_local_free_list("Local Region List for CSet Freeing"),
_rs_lengths(0),
_before_used_bytes(0),
_after_used_bytes(0),
_bytes_allocated_in_old_since_last_gc(0),
_failure_used_words(0),
_failure_waste_words(0),
_young_time(0.0),
_non_young_time(0.0) {
WorkItem(HeapRegion* r) {
region_idx = r->hrm_index();
is_young = r->is_young();
evacuation_failed = r->evacuation_failed();
}
};
volatile size_t _parallel_work_claim;
size_t _num_work_items;
WorkItem* _work_items;
void do_serial_work() {
// Need to grab the lock to be allowed to modify the old region list.
MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
_collection_set->iterate(&_cl);
}
virtual bool doHeapRegion(HeapRegion* r) {
double start_time = os::elapsedTime();
bool is_young = r->is_young();
void do_parallel_work_for_region(uint region_idx, bool is_young, bool evacuation_failed) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapRegion* r = g1h->region_at(region_idx);
assert(!g1h->is_on_master_free_list(r), "sanity");
_rs_lengths += r->rem_set()->occupied_locked();
Atomic::add(r->rem_set()->occupied_locked(), &_rs_lengths);
assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
g1h->clear_in_cset(r);
if (is_young) {
int index = r->young_index_in_cset();
assert(index != -1, "Young index in collection set must not be -1 for region %u", r->hrm_index());
assert((uint) index < g1h->collection_set()->young_region_length(), "invariant");
size_t words_survived = _surviving_young_words[index];
r->record_surv_words_in_group(words_survived);
} else {
assert(r->young_index_in_cset() == -1, "Young index for old region %u in collection set must be -1", r->hrm_index());
if (!is_young) {
g1h->_hot_card_cache->reset_card_counts(r);
}
if (!r->evacuation_failed()) {
assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
_before_used_bytes += r->used();
g1h->free_region(r, &_local_free_list, false /* par */, true /* locked */);
} else {
r->uninstall_surv_rate_group();
r->set_young_index_in_cset(-1);
r->set_evacuation_failed(false);
// When moving a young gen region to old gen, we "allocate" that whole region
// there. This is in addition to any already evacuated objects. Notify the
// policy about that.
// Old gen regions do not cause an additional allocation: both the objects
// still in the region and the ones already moved are accounted for elsewhere.
if (is_young) {
_bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
}
// The region is now considered to be old.
r->set_old();
// Do some allocation statistics accounting. Regions that failed evacuation
// are always made old, so there is no need to update anything in the young
// gen statistics, but we need to update old gen statistics.
size_t used_words = r->marked_bytes() / HeapWordSize;
_failure_used_words += used_words;
_failure_waste_words += HeapRegion::GrainWords - used_words;
g1h->old_set_add(r);
_after_used_bytes += r->used();
if (!evacuation_failed) {
r->rem_set()->clear_locked();
}
if (is_young) {
_young_time += os::elapsedTime() - start_time;
} else {
_non_young_time += os::elapsedTime() - start_time;
}
return false;
}
FreeRegionList* local_free_list() { return &_local_free_list; }
size_t rs_lengths() const { return _rs_lengths; }
size_t before_used_bytes() const { return _before_used_bytes; }
size_t after_used_bytes() const { return _after_used_bytes; }
class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
private:
size_t _cur_idx;
WorkItem* _work_items;
public:
G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
size_t bytes_allocated_in_old_since_last_gc() const { return _bytes_allocated_in_old_since_last_gc; }
virtual bool doHeapRegion(HeapRegion* r) {
_work_items[_cur_idx++] = WorkItem(r);
return false;
}
};
size_t failure_used_words() const { return _failure_used_words; }
size_t failure_waste_words() const { return _failure_waste_words; }
void prepare_work() {
G1PrepareFreeCollectionSetClosure cl(_work_items);
_collection_set->iterate(&cl);
}
double young_time() const { return _young_time; }
double non_young_time() const { return _non_young_time; }
void complete_work() {
_cl.complete_work();
G1Policy* policy = G1CollectedHeap::heap()->g1_policy();
policy->record_max_rs_lengths(_rs_lengths);
policy->cset_regions_freed();
}
public:
G1FreeCollectionSetTask(G1CollectionSet* collection_set, EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
AbstractGangTask("G1 Free Collection Set"),
_cl(evacuation_info, surviving_young_words),
_collection_set(collection_set),
_surviving_young_words(surviving_young_words),
_serial_work_claim(0),
_rs_lengths(0),
_parallel_work_claim(0),
_num_work_items(collection_set->region_length()),
_work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
prepare_work();
}
~G1FreeCollectionSetTask() {
complete_work();
FREE_C_HEAP_ARRAY(WorkItem, _work_items);
}
// Chunk size for work distribution. The chosen value has been determined experimentally
// to be a good tradeoff between overhead and achievable parallelism.
static uint chunk_size() { return 32; }
virtual void work(uint worker_id) {
G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
// Claim serial work.
if (_serial_work_claim == 0) {
jint value = Atomic::add(1, &_serial_work_claim) - 1;
if (value == 0) {
double serial_time = os::elapsedTime();
do_serial_work();
timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0);
}
}
// Start parallel work.
double young_time = 0.0;
bool has_young_time = false;
double non_young_time = 0.0;
bool has_non_young_time = false;
while (true) {
size_t end = Atomic::add(chunk_size(), &_parallel_work_claim);
size_t cur = end - chunk_size();
if (cur >= _num_work_items) {
break;
}
double start_time = os::elapsedTime();
end = MIN2(end, _num_work_items);
for (; cur < end; cur++) {
bool is_young = _work_items[cur].is_young;
do_parallel_work_for_region(_work_items[cur].region_idx, is_young, _work_items[cur].evacuation_failed);
double end_time = os::elapsedTime();
double time_taken = end_time - start_time;
if (is_young) {
young_time += time_taken;
has_young_time = true;
} else {
non_young_time += time_taken;
has_non_young_time = true;
}
start_time = end_time;
}
}
if (has_young_time) {
timer->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, worker_id, young_time);
}
if (has_non_young_time) {
timer->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, worker_id, young_time);
}
}
};
void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
_eden.clear();
G1FreeCollectionSetClosure cl(surviving_young_words);
collection_set_iterate(&cl);
double free_cset_start_time = os::elapsedTime();
evacuation_info.set_regions_freed(cl.local_free_list()->length());
evacuation_info.increment_collectionset_used_after(cl.after_used_bytes());
{
uint const num_chunks = MAX2(_collection_set.region_length() / G1FreeCollectionSetTask::chunk_size(), 1U);
uint const num_workers = MIN2(workers()->active_workers(), num_chunks);
G1Policy* policy = g1_policy();
G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);
policy->record_max_rs_lengths(cl.rs_lengths());
policy->cset_regions_freed();
prepend_to_freelist(cl.local_free_list());
decrement_summary_bytes(cl.before_used_bytes());
policy->add_bytes_allocated_in_old_since_last_gc(cl.bytes_allocated_in_old_since_last_gc());
_old_evac_stats.add_failure_used_and_waste(cl.failure_used_words(), cl.failure_waste_words());
policy->phase_times()->record_young_free_cset_time_ms(cl.young_time() * 1000.0);
policy->phase_times()->record_non_young_free_cset_time_ms(cl.non_young_time() * 1000.0);
log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",
cl.name(),
num_workers,
_collection_set.region_length());
workers()->run_task(&cl, num_workers);
}
g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
collection_set->clear();
}
@ -4825,7 +4969,7 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
_freed_bytes += r->used();
r->set_containing_set(NULL);
_humongous_regions_removed++;
g1h->free_humongous_region(r, _free_region_list, false);
g1h->free_humongous_region(r, _free_region_list, false /* skip_remset */ );
r = next;
} while (r != NULL);

View File

@ -118,6 +118,7 @@ class G1RegionMappingChangedListener : public G1MappingChangedListener {
};
class G1CollectedHeap : public CollectedHeap {
friend class G1FreeCollectionSetTask;
friend class VM_CollectForMetadataAllocation;
friend class VM_G1CollectForAllocation;
friend class VM_G1CollectFull;
@ -642,13 +643,15 @@ public:
// adding it to the free list that's passed as a parameter (this is
// usually a local list which will be appended to the master free
// list later). The used bytes of freed regions are accumulated in
// pre_used. If par is true, the region's RSet will not be freed
// up. The assumption is that this will be done later.
// pre_used. If skip_remset is true, the region's RSet will not be freed
// up. If skip_hot_card_cache is true, the region's hot card cache will not
// be freed up. The assumption is that this will be done later.
// The locked parameter indicates if the caller has already taken
// care of proper synchronization. This may allow some optimizations.
void free_region(HeapRegion* hr,
FreeRegionList* free_list,
bool par,
bool skip_remset,
bool skip_hot_card_cache = false,
bool locked = false);
// It dirties the cards that cover the block so that the post
@ -662,11 +665,11 @@ public:
// will be added to the free list that's passed as a parameter (this
// is usually a local list which will be appended to the master free
// list later). The used bytes of freed regions are accumulated in
// pre_used. If par is true, the region's RSet will not be freed
// pre_used. If skip_remset is true, the region's RSet will not be freed
// up. The assumption is that this will be done later.
void free_humongous_region(HeapRegion* hr,
FreeRegionList* free_list,
bool par);
bool skip_remset);
// Facility for allocating in 'archive' regions in high heap memory and
// recording the allocated ranges. These should all be called from the

View File

@ -132,109 +132,114 @@ void G1CMBitMap::clear_range(MemRegion mr) {
heapWordToOffset(mr.end()), false);
}
G1CMMarkStack::G1CMMarkStack(G1ConcurrentMark* cm) :
_base(NULL), _cm(cm)
{}
G1CMMarkStack::G1CMMarkStack() :
_reserved_space(),
_base(NULL),
_capacity(0),
_saved_index((size_t)AllBits),
_should_expand(false) {
set_empty();
}
bool G1CMMarkStack::allocate(size_t capacity) {
// allocate a stack of the requisite depth
ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
bool G1CMMarkStack::resize(size_t new_capacity) {
assert(is_empty(), "Only resize when stack is empty.");
assert(new_capacity <= MarkStackSizeMax,
"Trying to resize stack to " SIZE_FORMAT " elements when the maximum is " SIZE_FORMAT, new_capacity, MarkStackSizeMax);
size_t reservation_size = ReservedSpace::allocation_align_size_up(new_capacity * sizeof(oop));
ReservedSpace rs(reservation_size);
if (!rs.is_reserved()) {
log_warning(gc)("ConcurrentMark MarkStack allocation failure");
log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " elements and size " SIZE_FORMAT "B.", new_capacity, reservation_size);
return false;
}
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
if (!_virtual_space.initialize(rs, rs.size())) {
log_warning(gc)("ConcurrentMark MarkStack backing store failure");
// Release the virtual memory reserved for the marking stack
VirtualSpace vs;
if (!vs.initialize(rs, rs.size())) {
rs.release();
log_warning(gc)("Failed to commit memory for new overflow mark stack of size " SIZE_FORMAT "B.", rs.size());
return false;
}
assert(_virtual_space.committed_size() == rs.size(),
"Didn't reserve backing store for all of G1ConcurrentMark stack?");
_base = (oop*) _virtual_space.low();
setEmpty();
_capacity = (jint) capacity;
_saved_index = -1;
assert(vs.committed_size() == rs.size(), "Failed to commit all of the mark stack.");
// Release old mapping.
_reserved_space.release();
// Save new mapping for future unmapping.
_reserved_space = rs;
MemTracker::record_virtual_memory_type((address)_reserved_space.base(), mtGC);
_base = (oop*) vs.low();
_capacity = new_capacity;
set_empty();
_should_expand = false;
return true;
}
void G1CMMarkStack::expand() {
// Called, during remark, if we've overflown the marking stack during marking.
assert(isEmpty(), "stack should been emptied while handling overflow");
assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
// Clear expansion flag
_should_expand = false;
if (_capacity == (jint) MarkStackSizeMax) {
log_trace(gc)("(benign) Can't expand marking stack capacity, at max size limit");
return;
}
// Double capacity if possible
jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
// Do not give up existing stack until we have managed to
// get the double capacity that we desired.
ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
sizeof(oop)));
if (rs.is_reserved()) {
// Release the backing store associated with old stack
_virtual_space.release();
// Reinitialize virtual space for new stack
if (!_virtual_space.initialize(rs, rs.size())) {
fatal("Not enough swap for expanded marking stack capacity");
}
_base = (oop*)(_virtual_space.low());
_index = 0;
_capacity = new_capacity;
} else {
// Failed to double capacity, continue;
log_trace(gc)("(benign) Failed to expand marking stack capacity from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
_capacity / K, new_capacity / K);
}
bool G1CMMarkStack::allocate(size_t capacity) {
return resize(capacity);
}
void G1CMMarkStack::set_should_expand() {
// If we're resetting the marking state because of an
// marking stack overflow, record that we should, if
// possible, expand the stack.
_should_expand = _cm->has_overflown();
void G1CMMarkStack::expand() {
// Clear expansion flag
_should_expand = false;
if (_capacity == MarkStackSizeMax) {
log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " elements.", _capacity);
return;
}
size_t old_capacity = _capacity;
// Double capacity if possible
size_t new_capacity = MIN2(old_capacity * 2, MarkStackSizeMax);
if (resize(new_capacity)) {
log_debug(gc)("Expanded marking stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " elements",
old_capacity, new_capacity);
} else {
log_warning(gc)("Failed to expand marking stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " elements",
old_capacity, new_capacity);
}
}
G1CMMarkStack::~G1CMMarkStack() {
if (_base != NULL) {
_base = NULL;
_virtual_space.release();
_reserved_space.release();
}
}
void G1CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
void G1CMMarkStack::par_push_arr(oop* buffer, size_t n) {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
jint start = _index;
jint next_index = start + n;
size_t start = _index;
size_t next_index = start + n;
if (next_index > _capacity) {
_overflow = true;
return;
}
// Otherwise.
_index = next_index;
for (int i = 0; i < n; i++) {
int ind = start + i;
for (size_t i = 0; i < n; i++) {
size_t ind = start + i;
assert(ind < _capacity, "By overflow test above.");
_base[ind] = ptr_arr[i];
_base[ind] = buffer[i];
}
}
bool G1CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
bool G1CMMarkStack::par_pop_arr(oop* buffer, size_t max, size_t* n) {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
jint index = _index;
size_t index = _index;
if (index == 0) {
*n = 0;
return false;
} else {
int k = MIN2(max, index);
jint new_ind = index - k;
for (int j = 0; j < k; j++) {
ptr_arr[j] = _base[new_ind + j];
size_t k = MIN2(max, index);
size_t new_ind = index - k;
for (size_t j = 0; j < k; j++) {
buffer[j] = _base[new_ind + j];
}
_index = new_ind;
*n = k;
@ -243,20 +248,14 @@ bool G1CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
}
void G1CMMarkStack::note_start_of_gc() {
assert(_saved_index == -1,
"note_start_of_gc()/end_of_gc() bracketed incorrectly");
assert(_saved_index == (size_t)AllBits, "note_start_of_gc()/end_of_gc() calls bracketed incorrectly");
_saved_index = _index;
}
void G1CMMarkStack::note_end_of_gc() {
// This is intentionally a guarantee, instead of an assert. If we
// accidentally add something to the mark stack during GC, it
// will be a correctness issue so it's better if we crash. we'll
// only check this once per GC anyway, so it won't be a performance
// issue in any way.
guarantee(_saved_index == _index,
"saved index: %d index: %d", _saved_index, _index);
_saved_index = -1;
guarantee(!stack_modified(), "Saved index " SIZE_FORMAT " must be the same as " SIZE_FORMAT, _saved_index, _index);
_saved_index = (size_t)AllBits;
}
G1CMRootRegions::G1CMRootRegions() :
@ -351,7 +350,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
_prevMarkBitMap(&_markBitMap1),
_nextMarkBitMap(&_markBitMap2),
_markStack(this),
_global_mark_stack(),
// _finger set in set_non_marking_state
_max_worker_id(ParallelGCThreads),
@ -417,11 +416,10 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
double overall_cm_overhead =
(double) MaxGCPauseMillis * marking_overhead /
(double) GCPauseIntervalMillis;
double cpu_ratio = 1.0 / (double) os::processor_count();
double cpu_ratio = 1.0 / os::initial_active_processor_count();
double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
double marking_task_overhead =
overall_cm_overhead / marking_thread_num *
(double) os::processor_count();
overall_cm_overhead / marking_thread_num * os::initial_active_processor_count();
double sleep_factor =
(1.0 - marking_task_overhead) / marking_task_overhead;
@ -485,8 +483,8 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
}
}
if (!_markStack.allocate(MarkStackSize)) {
log_warning(gc)("Failed to allocate CM marking stack");
if (!_global_mark_stack.allocate(MarkStackSize)) {
vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
return;
}
@ -541,8 +539,8 @@ void G1ConcurrentMark::reset() {
void G1ConcurrentMark::reset_marking_state(bool clear_overflow) {
_markStack.set_should_expand();
_markStack.setEmpty(); // Also clears the _markStack overflow flag
_global_mark_stack.set_should_expand(has_overflown());
_global_mark_stack.set_empty(); // Also clears the overflow stack's overflow flag
if (clear_overflow) {
clear_has_overflown();
} else {
@ -1076,7 +1074,7 @@ void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
weakRefsWork(clear_all_soft_refs);
if (has_overflown()) {
// Oops. We overflowed. Restart concurrent marking.
// We overflowed. Restart concurrent marking.
_restart_for_overflow = true;
// Verify the heap w.r.t. the previous marking bitmap.
@ -1109,8 +1107,8 @@ void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
}
// Expand the marking stack, if we have to and if we can.
if (_markStack.should_expand()) {
_markStack.expand();
if (_global_mark_stack.should_expand()) {
_global_mark_stack.expand();
}
// Statistics
@ -1160,10 +1158,10 @@ public:
hr->set_containing_set(NULL);
if (hr->is_humongous()) {
_humongous_regions_removed++;
_g1->free_humongous_region(hr, _local_cleanup_list, true);
_g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */);
} else {
_old_regions_removed++;
_g1->free_region(hr, _local_cleanup_list, true);
_g1->free_region(hr, _local_cleanup_list, true /* skip_remset */);
}
} else {
hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
@ -1637,7 +1635,7 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
// Set the soft reference policy
rp->setup_policy(clear_all_soft_refs);
assert(_markStack.isEmpty(), "mark stack should be empty");
assert(_global_mark_stack.is_empty(), "mark stack should be empty");
// Instances of the 'Keep Alive' and 'Complete GC' closures used
// in serial reference processing. Note these closures are also
@ -1692,10 +1690,10 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
// oop closures will set the has_overflown flag if we overflow the
// global marking stack.
assert(_markStack.overflow() || _markStack.isEmpty(),
assert(_global_mark_stack.overflow() || _global_mark_stack.is_empty(),
"mark stack should be empty (unless it overflowed)");
if (_markStack.overflow()) {
if (_global_mark_stack.overflow()) {
// This should have been done already when we tried to push an
// entry on to the global mark stack. But let's do it again.
set_has_overflown();
@ -1714,7 +1712,7 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
return;
}
assert(_markStack.isEmpty(), "Marking should have completed");
assert(_global_mark_stack.is_empty(), "Marking should have completed");
// Unload Klasses, String, Symbols, Code Cache, etc.
if (ClassUnloadingWithConcurrentMark) {
@ -1967,7 +1965,7 @@ void G1ConcurrentMark::verify_no_cset_oops() {
}
// Verify entries on the global mark stack
_markStack.iterate(VerifyNoCSetOops("Stack"));
_global_mark_stack.iterate(VerifyNoCSetOops("Stack"));
// Verify entries on the task queues
for (uint i = 0; i < _max_worker_id; ++i) {
@ -2366,13 +2364,13 @@ void G1CMTask::get_entries_from_global_stack() {
// local array where we'll store the entries that will be popped
// from the global stack.
oop buffer[global_stack_transfer_size];
int n;
size_t n;
_cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
assert(n <= global_stack_transfer_size,
"we should not pop more than the given limit");
if (n > 0) {
// yes, we did actually pop at least one entry
for (int i = 0; i < n; ++i) {
for (size_t i = 0; i < n; ++i) {
bool success = _task_queue->push(buffer[i]);
// We only call this when the local queue is empty or under a
// given target limit. So, we do not expect this push to fail.

View File

@ -145,53 +145,56 @@ class G1CMBitMap : public G1CMBitMapRO {
void clear_range(MemRegion mr);
};
// Represents a marking stack used by ConcurrentMarking in the G1 collector.
// Represents the overflow mark stack used by concurrent marking.
//
// Stores oops in a huge buffer in virtual memory that is always fully committed.
// Resizing may only happen during a STW pause when the stack is empty.
class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
VirtualSpace _virtual_space; // Underlying backing store for actual stack
G1ConcurrentMark* _cm;
oop* _base; // bottom of stack
jint _index; // one more than last occupied index
jint _capacity; // max #elements
jint _saved_index; // value of _index saved at start of GC
ReservedSpace _reserved_space; // Space currently reserved for the mark stack.
oop* _base; // Bottom address of allocated memory area.
size_t _capacity; // Maximum number of elements.
size_t _index; // One more than last occupied index.
size_t _saved_index; // Value of _index saved at start of GC to detect mark stack modifications during that time.
bool _overflow;
bool _should_expand;
// Resizes the mark stack to the given new capacity. Releases any previous
// memory if successful.
bool resize(size_t new_capacity);
bool stack_modified() const { return _index != _saved_index; }
public:
G1CMMarkStack(G1ConcurrentMark* cm);
G1CMMarkStack();
~G1CMMarkStack();
bool allocate(size_t capacity);
// Pushes the first "n" elements of "ptr_arr" on the stack.
// Locking impl: concurrency is allowed only with
// "par_push_arr" and/or "par_pop_arr" operations, which use the same
// locking strategy.
void par_push_arr(oop* ptr_arr, int n);
// Pushes the first "n" elements of the given buffer on the stack.
void par_push_arr(oop* buffer, size_t n);
// If returns false, the array was empty. Otherwise, removes up to "max"
// elements from the stack, and transfers them to "ptr_arr" in an
// unspecified order. The actual number transferred is given in "n" ("n
// == 0" is deliberately redundant with the return value.) Locking impl:
// concurrency is allowed only with "par_push_arr" and/or "par_pop_arr"
// operations, which use the same locking strategy.
bool par_pop_arr(oop* ptr_arr, int max, int* n);
// Moves up to max elements from the stack into the given buffer. Returns
// the number of elements pushed, and false if the array has been empty.
// Returns true if the buffer contains at least one element.
bool par_pop_arr(oop* buffer, size_t max, size_t* n);
bool isEmpty() { return _index == 0; }
int maxElems() { return _capacity; }
bool is_empty() const { return _index == 0; }
size_t capacity() const { return _capacity; }
bool overflow() { return _overflow; }
bool overflow() const { return _overflow; }
void clear_overflow() { _overflow = false; }
bool should_expand() const { return _should_expand; }
void set_should_expand();
void set_should_expand(bool value) { _should_expand = value; }
// Expand the stack, typically in response to an overflow condition
void expand();
int size() { return _index; }
size_t size() const { return _index; }
void setEmpty() { _index = 0; clear_overflow(); }
void set_empty() { _index = 0; clear_overflow(); }
// Record the current index.
void note_start_of_gc();
@ -308,7 +311,7 @@ protected:
G1CMRootRegions _root_regions;
// For gray objects
G1CMMarkStack _markStack; // Grey objects behind global finger
G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
HeapWord* volatile _finger; // The global finger, region aligned,
// always points to the end of the
// last claimed region
@ -478,21 +481,21 @@ public:
// The push and pop operations are used by tasks for transfers
// between task-local queues and the global mark stack, and use
// locking for concurrency safety.
bool mark_stack_push(oop* arr, int n) {
_markStack.par_push_arr(arr, n);
if (_markStack.overflow()) {
bool mark_stack_push(oop* arr, size_t n) {
_global_mark_stack.par_push_arr(arr, n);
if (_global_mark_stack.overflow()) {
set_has_overflown();
return false;
}
return true;
}
void mark_stack_pop(oop* arr, int max, int* n) {
_markStack.par_pop_arr(arr, max, n);
void mark_stack_pop(oop* arr, size_t max, size_t* n) {
_global_mark_stack.par_pop_arr(arr, max, n);
}
size_t mark_stack_size() { return _markStack.size(); }
size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
bool mark_stack_overflow() { return _markStack.overflow(); }
bool mark_stack_empty() { return _markStack.isEmpty(); }
size_t mark_stack_size() { return _global_mark_stack.size(); }
size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; }
bool mark_stack_overflow() { return _global_mark_stack.overflow(); }
bool mark_stack_empty() { return _global_mark_stack.is_empty(); }
G1CMRootRegions* root_regions() { return &_root_regions; }
@ -598,12 +601,12 @@ public:
// Notify data structures that a GC has started.
void note_start_of_gc() {
_markStack.note_start_of_gc();
_global_mark_stack.note_start_of_gc();
}
// Notify data structures that a GC is finished.
void note_end_of_gc() {
_markStack.note_end_of_gc();
_global_mark_stack.note_end_of_gc();
}
// Verify that there are no CSet oops on the stacks (taskqueues /
@ -660,17 +663,17 @@ private:
class G1CMTask : public TerminatorTerminator {
private:
enum PrivateConstants {
// the regular clock call is called once the scanned words reaches
// The regular clock call is called once the scanned words reaches
// this limit
words_scanned_period = 12*1024,
// the regular clock call is called once the number of visited
// The regular clock call is called once the number of visited
// references reaches this limit
refs_reached_period = 384,
// initial value for the hash seed, used in the work stealing code
// Initial value for the hash seed, used in the work stealing code
init_hash_seed = 17,
// how many entries will be transferred between global stack and
// local queues
global_stack_transfer_size = 16
// How many entries will be transferred between global stack and
// local queues at once.
global_stack_transfer_size = 1024
};
uint _worker_id;

View File

@ -91,8 +91,9 @@ inline bool G1CMBitMap::parMark(HeapWord* addr) {
template<typename Fn>
inline void G1CMMarkStack::iterate(Fn fn) {
assert(_saved_index == _index, "saved index: %d index: %d", _saved_index, _index);
for (int i = 0; i < _index; ++i) {
assert_at_safepoint(true);
assert(!stack_modified(), "Saved index " SIZE_FORMAT " must be the same as " SIZE_FORMAT, _saved_index, _index);
for (size_t i = 0; i < _index; ++i) {
fn(_base[i]);
}
}

View File

@ -97,47 +97,64 @@ void G1DefaultPolicy::note_gc_start() {
phase_times()->note_gc_start();
}
bool G1DefaultPolicy::predict_will_fit(uint young_length,
double base_time_ms,
uint base_free_regions,
double target_pause_time_ms) const {
if (young_length >= base_free_regions) {
// end condition 1: not enough space for the young regions
return false;
class G1YoungLengthPredictor VALUE_OBJ_CLASS_SPEC {
const bool _during_cm;
const double _base_time_ms;
const double _base_free_regions;
const double _target_pause_time_ms;
const G1DefaultPolicy* const _policy;
public:
G1YoungLengthPredictor(bool during_cm,
double base_time_ms,
double base_free_regions,
double target_pause_time_ms,
const G1DefaultPolicy* policy) :
_during_cm(during_cm),
_base_time_ms(base_time_ms),
_base_free_regions(base_free_regions),
_target_pause_time_ms(target_pause_time_ms),
_policy(policy) {}
bool will_fit(uint young_length) const {
if (young_length >= _base_free_regions) {
// end condition 1: not enough space for the young regions
return false;
}
const double accum_surv_rate = _policy->accum_yg_surv_rate_pred((int) young_length - 1);
const size_t bytes_to_copy =
(size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
const double copy_time_ms =
_policy->analytics()->predict_object_copy_time_ms(bytes_to_copy, _during_cm);
const double young_other_time_ms = _policy->analytics()->predict_young_other_time_ms(young_length);
const double pause_time_ms = _base_time_ms + copy_time_ms + young_other_time_ms;
if (pause_time_ms > _target_pause_time_ms) {
// end condition 2: prediction is over the target pause time
return false;
}
const size_t free_bytes = (_base_free_regions - young_length) * HeapRegion::GrainBytes;
// When copying, we will likely need more bytes free than is live in the region.
// Add some safety margin to factor in the confidence of our guess, and the
// natural expected waste.
// (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
// of the calculation: the lower the confidence, the more headroom.
// (100 + TargetPLABWastePct) represents the increase in expected bytes during
// copying due to anticipated waste in the PLABs.
const double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
const size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
if (expected_bytes_to_copy > free_bytes) {
// end condition 3: out-of-space
return false;
}
// success!
return true;
}
double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
size_t bytes_to_copy =
(size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
double copy_time_ms = _analytics->predict_object_copy_time_ms(bytes_to_copy,
collector_state()->during_concurrent_mark());
double young_other_time_ms = _analytics->predict_young_other_time_ms(young_length);
double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
if (pause_time_ms > target_pause_time_ms) {
// end condition 2: prediction is over the target pause time
return false;
}
size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes;
// When copying, we will likely need more bytes free than is live in the region.
// Add some safety margin to factor in the confidence of our guess, and the
// natural expected waste.
// (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
// of the calculation: the lower the confidence, the more headroom.
// (100 + TargetPLABWastePct) represents the increase in expected bytes during
// copying due to anticipated waste in the PLABs.
double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
if (expected_bytes_to_copy > free_bytes) {
// end condition 3: out-of-space
return false;
}
// success!
return true;
}
};
void G1DefaultPolicy::record_new_heap_size(uint new_number_of_regions) {
// re-calculate the necessary reserve
@ -279,31 +296,32 @@ G1DefaultPolicy::calculate_young_list_target_length(size_t rs_lengths,
assert(desired_max_length > base_min_length, "invariant");
uint max_young_length = desired_max_length - base_min_length;
double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
double survivor_regions_evac_time = predict_survivor_regions_evac_time();
size_t pending_cards = _analytics->predict_pending_cards();
size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff();
size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true);
double base_time_ms =
const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
const double survivor_regions_evac_time = predict_survivor_regions_evac_time();
const size_t pending_cards = _analytics->predict_pending_cards();
const size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff();
const size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true);
const double base_time_ms =
predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
survivor_regions_evac_time;
uint available_free_regions = _free_regions_at_end_of_collection;
uint base_free_regions = 0;
if (available_free_regions > _reserve_regions) {
base_free_regions = available_free_regions - _reserve_regions;
}
const uint available_free_regions = _free_regions_at_end_of_collection;
const uint base_free_regions =
available_free_regions > _reserve_regions ? available_free_regions - _reserve_regions : 0;
// Here, we will make sure that the shortest young length that
// makes sense fits within the target pause time.
if (predict_will_fit(min_young_length, base_time_ms,
base_free_regions, target_pause_time_ms)) {
G1YoungLengthPredictor p(collector_state()->during_concurrent_mark(),
base_time_ms,
base_free_regions,
target_pause_time_ms,
this);
if (p.will_fit(min_young_length)) {
// The shortest young length will fit into the target pause time;
// we'll now check whether the absolute maximum number of young
// regions will fit in the target pause time. If not, we'll do
// a binary search between min_young_length and max_young_length.
if (predict_will_fit(max_young_length, base_time_ms,
base_free_regions, target_pause_time_ms)) {
if (p.will_fit(max_young_length)) {
// The maximum young length will fit into the target pause time.
// We are done so set min young length to the maximum length (as
// the result is assumed to be returned in min_young_length).
@ -328,8 +346,7 @@ G1DefaultPolicy::calculate_young_list_target_length(size_t rs_lengths,
uint diff = (max_young_length - min_young_length) / 2;
while (diff > 0) {
uint young_length = min_young_length + diff;
if (predict_will_fit(young_length, base_time_ms,
base_free_regions, target_pause_time_ms)) {
if (p.will_fit(young_length)) {
min_young_length = young_length;
} else {
max_young_length = young_length;
@ -344,12 +361,10 @@ G1DefaultPolicy::calculate_young_list_target_length(size_t rs_lengths,
assert(min_young_length < max_young_length,
"otherwise we should have discovered that max_young_length "
"fits into the pause target and not done the binary search");
assert(predict_will_fit(min_young_length, base_time_ms,
base_free_regions, target_pause_time_ms),
assert(p.will_fit(min_young_length),
"min_young_length, the result of the binary search, should "
"fit into the pause target");
assert(!predict_will_fit(min_young_length + 1, base_time_ms,
base_free_regions, target_pause_time_ms),
assert(!p.will_fit(min_young_length + 1),
"min_young_length, the result of the binary search, should be "
"optimal, so no larger length should fit into the pause target");
}
@ -501,13 +516,12 @@ double G1DefaultPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const
double G1DefaultPolicy::young_other_time_ms() const {
return phase_times()->young_cset_choice_time_ms() +
phase_times()->young_free_cset_time_ms();
phase_times()->average_time_ms(G1GCPhaseTimes::YoungFreeCSet);
}
double G1DefaultPolicy::non_young_other_time_ms() const {
return phase_times()->non_young_cset_choice_time_ms() +
phase_times()->non_young_free_cset_time_ms();
phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet);
}
double G1DefaultPolicy::other_time_ms(double pause_time_ms) const {
@ -515,7 +529,7 @@ double G1DefaultPolicy::other_time_ms(double pause_time_ms) const {
}
double G1DefaultPolicy::constant_other_time_ms(double pause_time_ms) const {
return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms();
}
CollectionSetChooser* G1DefaultPolicy::cset_chooser() const {

View File

@ -91,6 +91,9 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards:");
_gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
_gc_par_phases[YoungFreeCSet] = new WorkerDataArray<double>(max_gc_threads, "Young Free Collection Set (ms):");
_gc_par_phases[NonYoungFreeCSet] = new WorkerDataArray<double>(max_gc_threads, "Non-Young Free Collection Set (ms):");
_gc_par_phases[PreserveCMReferents] = new WorkerDataArray<double>(max_gc_threads, "Parallel Preserve CM Refs (ms):");
}
@ -278,10 +281,11 @@ void G1GCPhaseTimes::print() {
info_line_and_account("Clear Card Table", _cur_clear_ct_time_ms);
info_line_and_account("Expand Heap After Collection", _cur_expand_heap_time_ms);
double free_cset_time = _recorded_young_free_cset_time_ms + _recorded_non_young_free_cset_time_ms;
info_line_and_account("Free Collection Set", free_cset_time);
debug_line("Young Free Collection Set", _recorded_young_free_cset_time_ms);
debug_line("Non-Young Free Collection Set", _recorded_non_young_free_cset_time_ms);
info_line_and_account("Free Collection Set", _recorded_total_free_cset_time_ms);
debug_line("Free Collection Set Serial", _recorded_serial_free_cset_time_ms);
debug_phase(_gc_par_phases[YoungFreeCSet]);
debug_phase(_gc_par_phases[NonYoungFreeCSet]);
info_line_and_account("Merge Per-Thread State", _recorded_merge_pss_time_ms);
info_line("Other", _gc_pause_time_ms - accounted_time_ms);

View File

@ -67,6 +67,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
StringDedupTableFixup,
RedirtyCards,
PreserveCMReferents,
YoungFreeCSet,
NonYoungFreeCSet,
GCParPhasesSentinel
};
@ -110,8 +112,9 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _recorded_merge_pss_time_ms;
double _recorded_young_free_cset_time_ms;
double _recorded_non_young_free_cset_time_ms;
double _recorded_total_free_cset_time_ms;
double _recorded_serial_free_cset_time_ms;
double _cur_fast_reclaim_humongous_time_ms;
double _cur_fast_reclaim_humongous_register_time_ms;
@ -199,12 +202,12 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_root_region_scan_wait_time_ms = time_ms;
}
void record_young_free_cset_time_ms(double time_ms) {
_recorded_young_free_cset_time_ms = time_ms;
void record_total_free_cset_time_ms(double time_ms) {
_recorded_total_free_cset_time_ms = time_ms;
}
void record_non_young_free_cset_time_ms(double time_ms) {
_recorded_non_young_free_cset_time_ms = time_ms;
void record_serial_free_cset_time_ms(double time_ms) {
_recorded_serial_free_cset_time_ms = time_ms;
}
void record_fast_reclaim_humongous_stats(double time_ms, size_t total, size_t candidates) {
@ -278,18 +281,14 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
return _recorded_young_cset_choice_time_ms;
}
double young_free_cset_time_ms() {
return _recorded_young_free_cset_time_ms;
double total_free_cset_time_ms() {
return _recorded_total_free_cset_time_ms;
}
double non_young_cset_choice_time_ms() {
return _recorded_non_young_cset_choice_time_ms;
}
double non_young_free_cset_time_ms() {
return _recorded_non_young_free_cset_time_ms;
}
double fast_reclaim_humongous_time_ms() {
return _cur_fast_reclaim_humongous_time_ms;
}

View File

@ -340,7 +340,7 @@ void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
hr->set_containing_set(NULL);
_humongous_regions_removed++;
_g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
_g1h->free_humongous_region(hr, &dummy_free_list, false /* skip_remset */);
prepare_for_compaction(hr, end);
dummy_free_list.remove_all();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,22 +26,8 @@
#include "gc/g1/g1PageBasedVirtualSpace.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/os.inline.hpp"
#include "services/memTracker.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_aix
# include "os_aix.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
#include "utilities/bitMap.inline.hpp"
G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) :

View File

@ -45,7 +45,7 @@ G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(
"A new max generation size of " SIZE_FORMAT "k will be used.",
NewSize/K, MaxNewSize/K, NewSize/K);
}
MaxNewSize = NewSize;
FLAG_SET_ERGO(size_t, MaxNewSize, NewSize);
}
if (FLAG_IS_CMDLINE(NewSize)) {

View File

@ -167,7 +167,7 @@ void HeapRegion::reset_after_compaction() {
init_top_at_mark_start();
}
void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
void HeapRegion::hr_clear(bool keep_remset, bool clear_space, bool locked) {
assert(_humongous_start_region == NULL,
"we should have already filtered out humongous regions");
assert(!in_collection_set(),
@ -179,15 +179,14 @@ void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
set_free();
reset_pre_dummy_top();
if (!par) {
// If this is parallel, this will be done later.
HeapRegionRemSet* hrrs = rem_set();
if (!keep_remset) {
if (locked) {
hrrs->clear_locked();
rem_set()->clear_locked();
} else {
hrrs->clear();
rem_set()->clear();
}
}
zero_marked_bytes();
init_top_at_mark_start();

Some files were not shown because too many files have changed in this diff Show More