8253795: Implementation of JEP 391: macOS/AArch64 Port

8253816: Support macOS W^X
8253817: Support macOS Aarch64 ABI in Interpreter
8253818: Support macOS Aarch64 ABI for compiled wrappers
8253819: Implement os/cpu for macOS/AArch64
8253839: Update tests and JDK code for macOS/Aarch64
8254941: Implement Serviceability Agent for macOS/AArch64
8255776: Change build system for macOS/AArch64
8262903: [macos_aarch64] Thread::current() called on detached thread

Co-authored-by: Vladimir Kempik <vkempik@openjdk.org>
Co-authored-by: Bernhard Urban-Forster <burban@openjdk.org>
Co-authored-by: Ludovic Henry <luhenry@openjdk.org>
Co-authored-by: Monica Beckwith <mbeckwit@openjdk.org>
Reviewed-by: erikj, ihse, prr, cjplummer, stefank, gziemski, aph, mbeckwit, luhenry
This commit is contained in:
Anton Kozlov 2021-03-25 18:10:18 +00:00 committed by Vladimir Kempik
parent b006f22f1f
commit dbc9e4b50c
75 changed files with 2933 additions and 109 deletions

View File

@ -1,6 +1,7 @@
#!/bin/sh
#
# Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -101,6 +102,14 @@ if [ "x$OUT" = x ]; then
fi
fi
# Test and fix cpu on macos-aarch64, uname -p reports arm, buildsys expects aarch64
echo $OUT | grep arm-apple-darwin > /dev/null 2> /dev/null
if test $? = 0; then
if [ `uname -m` = arm64 ]; then
OUT=aarch64`echo $OUT | sed -e 's/[^-]*//'`
fi
fi
# Test and fix cpu on Macosx when C preprocessor is not on the path
echo $OUT | grep i386-apple-darwin > /dev/null 2> /dev/null
if test $? = 0; then

View File

@ -125,19 +125,25 @@ AC_DEFUN([FLAGS_SETUP_MACOSX_VERSION],
[
# Additional macosx handling
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
# The expected format for <version> is either nn.n.n or nn.nn.nn. See
# /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/AvailabilityVersions.h
# MACOSX_VERSION_MIN specifies the lowest version of Macosx that the built
# binaries should be compatible with, even if compiled on a newer version
# of the OS. It currently has a hard coded value. Setting this also limits
# exposure to API changes in header files. Bumping this is likely to
# require code changes to build.
MACOSX_VERSION_MIN=10.12.0
if test "x$OPENJDK_TARGET_CPU_ARCH" = xaarch64; then
MACOSX_VERSION_MIN=11.00.00
else
MACOSX_VERSION_MIN=10.12.0
fi
MACOSX_VERSION_MIN_NODOTS=${MACOSX_VERSION_MIN//\./}
AC_SUBST(MACOSX_VERSION_MIN)
# Setting --with-macosx-version-max=<version> makes it an error to build or
# link to macosx APIs that are newer than the given OS version. The expected
# format for <version> is either nn.n.n or nn.nn.nn. See /usr/include/AvailabilityMacros.h.
# link to macosx APIs that are newer than the given OS version.
AC_ARG_WITH([macosx-version-max], [AS_HELP_STRING([--with-macosx-version-max],
[error on use of newer functionality. @<:@macosx@:>@])],
[

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -242,7 +242,7 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_AOT],
elif test "x$OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU" = "xlinux-aarch64"; then
AC_MSG_RESULT([yes])
else
AC_MSG_RESULT([no, $OPENJDK_TARGET_CPU])
AC_MSG_RESULT([no, $OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU])
AVAILABLE=false
fi
@ -264,11 +264,13 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_CDS],
[
JVM_FEATURES_CHECK_AVAILABILITY(cds, [
AC_MSG_CHECKING([if platform is supported by CDS])
if test "x$OPENJDK_TARGET_OS" != xaix; then
AC_MSG_RESULT([yes])
else
AC_MSG_RESULT([no, $OPENJDK_TARGET_OS])
if test "x$OPENJDK_TARGET_OS" = xaix || \
( test "x$OPENJDK_TARGET_OS" = "xmacosx" && \
test "x$OPENJDK_TARGET_CPU" = "xaarch64" ) ; then
AC_MSG_RESULT([no, $OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU])
AVAILABLE=false
else
AC_MSG_RESULT([yes])
fi
])
])

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -1177,7 +1177,7 @@ define SetupNativeCompilationBody
# This only works if the openjdk_codesign identity is present on the system. Let
# silently fail otherwise.
ifneq ($(CODESIGN), )
$(CODESIGN) -s "$(MACOSX_CODESIGN_IDENTITY)" --timestamp --options runtime \
$(CODESIGN) -f -s "$(MACOSX_CODESIGN_IDENTITY)" --timestamp --options runtime \
--entitlements $$(call GetEntitlementsFile, $$@) $$@
endif
endif

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -88,6 +88,9 @@ ifeq ($(call check-jvm-feature, compiler2), true)
ADLCFLAGS += -DAIX=1
else ifeq ($(call isTargetOs, macosx), true)
ADLCFLAGS += -D_ALLBSD_SOURCE=1 -D_GNU_SOURCE=1
ifeq ($(HOTSPOT_TARGET_CPU_ARCH), aarch64)
ADLCFLAGS += -DR18_RESERVED
endif
else ifeq ($(call isTargetOs, windows), true)
ifeq ($(call isTargetCpuBits, 64), true)
ADLCFLAGS += -D_WIN64=1

View File

@ -468,7 +468,8 @@ else
maybe-uninitialized class-memaccess
HARFBUZZ_DISABLED_WARNINGS_clang := unused-value incompatible-pointer-types \
tautological-constant-out-of-range-compare int-to-pointer-cast \
undef missing-field-initializers range-loop-analysis
undef missing-field-initializers range-loop-analysis \
deprecated-declarations c++11-narrowing
HARFBUZZ_DISABLED_WARNINGS_microsoft := 4267 4244 4090 4146 4334 4819 4101 4068 4805 4138
LIBFONTMANAGER_CFLAGS += $(HARFBUZZ_CFLAGS)

View File

@ -31,7 +31,7 @@ ifeq ($(call isTargetOs, linux), true)
SA_CFLAGS := -D_FILE_OFFSET_BITS=64
else ifeq ($(call isTargetOs, macosx), true)
SA_CFLAGS := -Damd64 -D_GNU_SOURCE -mno-omit-leaf-frame-pointer \
SA_CFLAGS := -D_GNU_SOURCE -mno-omit-leaf-frame-pointer \
-mstack-alignment=16 -fPIC
LIBSA_EXTRA_SRC := $(SUPPORT_OUTPUTDIR)/gensrc/jdk.hotspot.agent
else ifeq ($(call isTargetOs, windows), true)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -58,7 +58,7 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false
#if defined(_WIN64)
#if defined(__APPLE__) || defined(_WIN64)
#define R18_RESERVED
#define R18_RESERVED_ONLY(code) code
#define NOT_R18_RESERVED(code)

View File

@ -1,6 +1,7 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,9 +58,14 @@ FloatRegister InterpreterRuntime::SignatureHandlerGenerator::next_fpr() {
return fnoreg;
}
int InterpreterRuntime::SignatureHandlerGenerator::next_stack_offset() {
// On macos/aarch64 native stack is packed, int/float are using only 4 bytes
// on stack. Natural alignment for types are still in place,
// for example double/long should be 8 bytes aligned.
int InterpreterRuntime::SignatureHandlerGenerator::next_stack_offset(unsigned elem_size) {
MACOS_ONLY(_stack_offset = align_up(_stack_offset, elem_size));
int ret = _stack_offset;
_stack_offset += wordSize;
_stack_offset += NOT_MACOS(wordSize) MACOS_ONLY(elem_size);
return ret;
}
@ -71,6 +77,30 @@ InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(
_stack_offset = 0;
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_byte() {
const Address src(from(), Interpreter::local_offset_in_bytes(offset()));
Register reg = next_gpr();
if (reg != noreg) {
__ ldr(reg, src);
} else {
__ ldrb(r0, src);
__ strb(r0, Address(to(), next_stack_offset(sizeof(jbyte))));
}
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_short() {
const Address src(from(), Interpreter::local_offset_in_bytes(offset()));
Register reg = next_gpr();
if (reg != noreg) {
__ ldr(reg, src);
} else {
__ ldrh(r0, src);
__ strh(r0, Address(to(), next_stack_offset(sizeof(jshort))));
}
}
void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
const Address src(from(), Interpreter::local_offset_in_bytes(offset()));
@ -79,7 +109,7 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
__ ldr(reg, src);
} else {
__ ldrw(r0, src);
__ strw(r0, Address(to(), next_stack_offset()));
__ strw(r0, Address(to(), next_stack_offset(sizeof(jint))));
}
}
@ -91,7 +121,7 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
__ ldr(reg, src);
} else {
__ ldr(r0, src);
__ str(r0, Address(to(), next_stack_offset()));
__ str(r0, Address(to(), next_stack_offset(sizeof(jlong))));
}
}
@ -103,7 +133,7 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
__ ldrs(reg, src);
} else {
__ ldrw(r0, src);
__ strw(r0, Address(to(), next_stack_offset()));
__ strw(r0, Address(to(), next_stack_offset(sizeof(jfloat))));
}
}
@ -115,7 +145,7 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
__ ldrd(reg, src);
} else {
__ ldr(r0, src);
__ str(r0, Address(to(), next_stack_offset()));
__ str(r0, Address(to(), next_stack_offset(sizeof(jdouble))));
}
}
@ -139,7 +169,8 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
__ cbnz(temp(), L);
__ mov(r0, zr);
__ bind(L);
__ str(r0, Address(to(), next_stack_offset()));
static_assert(sizeof(jobject) == wordSize, "");
__ str(r0, Address(to(), next_stack_offset(sizeof(jobject))));
}
}
@ -164,7 +195,7 @@ class SlowSignatureHandler
: public NativeSignatureIterator {
private:
address _from;
intptr_t* _to;
char* _to;
intptr_t* _int_args;
intptr_t* _fp_args;
intptr_t* _fp_identifiers;
@ -199,21 +230,38 @@ class SlowSignatureHandler
return -1;
}
void pass_stack(intptr_t value) {
*_to++ = value;
template<typename T>
void pass_stack(T value) {
MACOS_ONLY(_to = align_up(_to, sizeof(value)));
*(T *)_to = value;
_to += NOT_MACOS(wordSize) MACOS_ONLY(sizeof(value));
}
virtual void pass_byte() {
jbyte value = *(jbyte*)single_slot_addr();
if (pass_gpr(value) < 0) {
pass_stack<>(value);
}
}
virtual void pass_short() {
jshort value = *(jshort*)single_slot_addr();
if (pass_gpr(value) < 0) {
pass_stack<>(value);
}
}
virtual void pass_int() {
jint value = *(jint*)single_slot_addr();
if (pass_gpr(value) < 0) {
pass_stack(value);
pass_stack<>(value);
}
}
virtual void pass_long() {
intptr_t value = *double_slot_addr();
if (pass_gpr(value) < 0) {
pass_stack(value);
pass_stack<>(value);
}
}
@ -221,14 +269,14 @@ class SlowSignatureHandler
intptr_t* addr = single_slot_addr();
intptr_t value = *addr == 0 ? NULL : (intptr_t)addr;
if (pass_gpr(value) < 0) {
pass_stack(value);
pass_stack<>(value);
}
}
virtual void pass_float() {
jint value = *(jint*)single_slot_addr();
if (pass_fpr(value) < 0) {
pass_stack(value);
pass_stack<>(value);
}
}
@ -238,7 +286,7 @@ class SlowSignatureHandler
if (0 <= arg) {
*_fp_identifiers |= (1ull << arg); // mark as double
} else {
pass_stack(value);
pass_stack<>(value);
}
}
@ -247,7 +295,7 @@ class SlowSignatureHandler
: NativeSignatureIterator(method)
{
_from = from;
_to = to;
_to = (char *)to;
_int_args = to - (method->is_static() ? 16 : 17);
_fp_args = to - 8;

View File

@ -1,6 +1,7 @@
/*
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,6 +39,8 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
unsigned int _num_reg_int_args;
int _stack_offset;
void pass_byte();
void pass_short();
void pass_int();
void pass_long();
void pass_float();
@ -46,7 +49,7 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
Register next_gpr();
FloatRegister next_fpr();
int next_stack_offset();
int next_stack_offset(unsigned elem_size);
public:
// Creation

View File

@ -28,6 +28,7 @@
#include "asm/assembler.inline.hpp"
#include "oops/compressedOops.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/powerOfTwo.hpp"
// MacroAssembler extends Assembler by frequently used macros.

View File

@ -1,6 +1,7 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -773,7 +774,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
}
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
static int c_calling_convention_priv(const BasicType *sig_bt,
VMRegPair *regs,
VMRegPair *regs2,
int total_args_passed) {
@ -804,6 +805,11 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
if (int_args < Argument::n_int_register_parameters_c) {
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
} else {
#ifdef __APPLE__
// Less-than word types are stored one after another.
// The code is unable to handle this so bailout.
return -1;
#endif
regs[i].set1(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
@ -826,6 +832,11 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
if (fp_args < Argument::n_float_register_parameters_c) {
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
} else {
#ifdef __APPLE__
// Less-than word types are stored one after another.
// The code is unable to handle this so bailout.
return -1;
#endif
regs[i].set1(VMRegImpl::stack2reg(stk_args));
stk_args += 2;
}
@ -852,6 +863,16 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
return stk_args;
}
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
VMRegPair *regs2,
int total_args_passed)
{
int result = c_calling_convention_priv(sig_bt, regs, regs2, total_args_passed);
guarantee(result >= 0, "Unsupported arguments configuration");
return result;
}
// On 64 bit we will store integer like items to the stack as
// 64 bits items (Aarch64 abi) even though java would only store
// 32bits for a parameter. On 32bit it will simply be 32 bits
@ -1357,7 +1378,11 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Now figure out where the args must be stored and how much stack space
// they require.
int out_arg_slots;
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
out_arg_slots = c_calling_convention_priv(out_sig_bt, out_regs, NULL, total_c_args);
if (out_arg_slots < 0) {
return NULL;
}
// Compute framesize for the wrapper. We need to handlize all oops in
// incoming registers

View File

@ -76,6 +76,9 @@ public:
// The CPU implementer codes can be found in
// ARM Architecture Reference Manual ARMv8, for ARMv8-A architecture profile
// https://developer.arm.com/docs/ddi0487/latest
// Arm can assign codes that are not published in the manual.
// Apple's code is defined in
// https://github.com/apple/darwin-xnu/blob/33eb983/osfmk/arm/cpuid.h#L62
enum Family {
CPU_AMPERE = 0xC0,
CPU_ARM = 'A',
@ -90,6 +93,7 @@ public:
CPU_QUALCOM = 'Q',
CPU_MARVELL = 'V',
CPU_INTEL = 'i',
CPU_APPLE = 'a',
};
enum Feature_Flag {
@ -132,6 +136,11 @@ public:
constexpr static bool supports_stack_watermark_barrier() { return true; }
static void get_compatible_board(char *buf, int buflen);
#ifdef __APPLE__
// Is the CPU running emulated (for example macOS Rosetta running x86_64 code on M1 ARM (aarch64)
static bool is_cpu_emulated();
#endif
};
#endif // CPU_AARCH64_VM_VERSION_AARCH64_HPP

View File

@ -205,6 +205,8 @@ static char cpu_arch[] = "i386";
static char cpu_arch[] = "amd64";
#elif defined(ARM)
static char cpu_arch[] = "arm";
#elif defined(AARCH64)
static char cpu_arch[] = "aarch64";
#elif defined(PPC32)
static char cpu_arch[] = "ppc";
#else
@ -2124,7 +2126,7 @@ int os::active_processor_count() {
return _processor_count;
}
#ifdef __APPLE__
#if defined(__APPLE__) && defined(__x86_64__)
uint os::processor_id() {
// Get the initial APIC id and return the associated processor id. The initial APIC
// id is limited to 8-bits, which means we can have at most 256 unique APIC ids. If

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1296,20 +1296,17 @@ void install_signal_handlers() {
set_signal_handler(SIGXFSZ);
#if defined(__APPLE__)
// In Mac OS X 10.4, CrashReporter will write a crash log for all 'fatal' signals, including
// signals caught and handled by the JVM. To work around this, we reset the mach task
// signal handler that's placed on our process by CrashReporter. This disables
// CrashReporter-based reporting.
//
// This work-around is not necessary for 10.5+, as CrashReporter no longer intercedes
// on caught fatal signals.
//
// Additionally, gdb installs both standard BSD signal handlers, and mach exception
// handlers. By replacing the existing task exception handler, we disable gdb's mach
// lldb (gdb) installs both standard BSD signal handlers, and mach exception
// handlers. By replacing the existing task exception handler, we disable lldb's mach
// exception handling, while leaving the standard BSD signal handlers functional.
//
// EXC_MASK_BAD_ACCESS needed by all architectures for NULL ptr checking
// EXC_MASK_ARITHMETIC needed by all architectures for div by 0 checking
// EXC_MASK_BAD_INSTRUCTION needed by aarch64 to initiate deoptimization
kern_return_t kr;
kr = task_set_exception_ports(mach_task_self(),
EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC,
EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC
AARCH64_ONLY(| EXC_MASK_BAD_INSTRUCTION),
MACH_PORT_NULL,
EXCEPTION_STATE_IDENTITY,
MACHINE_THREAD_STATE);

View File

@ -0,0 +1,104 @@
/*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP
#define OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP
// Implementation of class atomic
// Note that memory_order_conservative requires a full barrier after atomic stores.
// See https://patchwork.kernel.org/patch/3575821/
template<size_t byte_size>
struct Atomic::PlatformAdd {
template<typename D, typename I>
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
FULL_MEM_BARRIER;
return res;
}
template<typename D, typename I>
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
return add_and_fetch(dest, add_value, order) - add_value;
}
};
template<size_t byte_size>
template<typename T>
inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(byte_size == sizeof(T));
T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
FULL_MEM_BARRIER;
return res;
}
template<size_t byte_size>
template<typename T>
inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest,
T compare_value,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(byte_size == sizeof(T));
if (order == memory_order_relaxed) {
T value = compare_value;
__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
return value;
} else {
T value = compare_value;
FULL_MEM_BARRIER;
__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
FULL_MEM_BARRIER;
return value;
}
}
template<size_t byte_size>
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
{
template <typename T>
T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
};
template<size_t byte_size>
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
{
template <typename T>
void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
};
template<size_t byte_size>
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
{
template <typename T>
void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }
};
#endif // OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP

View File

@ -0,0 +1,56 @@
/*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_BSD_AARCH64_BYTES_BSD_AARCH64_INLINE_HPP
#define OS_CPU_BSD_AARCH64_BYTES_BSD_AARCH64_INLINE_HPP
#ifdef __APPLE__
#include <libkern/OSByteOrder.h>
#endif
#if defined(__APPLE__)
# define bswap_16(x) OSSwapInt16(x)
# define bswap_32(x) OSSwapInt32(x)
# define bswap_64(x) OSSwapInt64(x)
#else
# error "Unimplemented"
#endif
// Efficient swapping of data bytes from Java byte
// ordering to native byte ordering and vice versa.
inline u2 Bytes::swap_u2(u2 x) {
return bswap_16(x);
}
inline u4 Bytes::swap_u4(u4 x) {
return bswap_32(x);
}
inline u8 Bytes::swap_u8(u8 x) {
return bswap_64(x);
}
#endif // OS_CPU_BSD_AARCH64_BYTES_BSD_AARCH64_INLINE_HPP

View File

@ -0,0 +1,189 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_BSD_AARCH64_COPY_BSD_AARCH64_INLINE_HPP
#define OS_CPU_BSD_AARCH64_COPY_BSD_AARCH64_INLINE_HPP
#define COPY_SMALL(from, to, count) \
{ \
long tmp0, tmp1, tmp2, tmp3; \
long tmp4, tmp5, tmp6, tmp7; \
__asm volatile( \
" adr %[t0], 0f;\n" \
" add %[t0], %[t0], %[cnt], lsl #5;\n" \
" br %[t0];\n" \
" .align 5;\n" \
"0:" \
" b 1f;\n" \
" .align 5;\n" \
" ldr %[t0], [%[s], #0];\n" \
" str %[t0], [%[d], #0];\n" \
" b 1f;\n" \
" .align 5;\n" \
" ldp %[t0], %[t1], [%[s], #0];\n" \
" stp %[t0], %[t1], [%[d], #0];\n" \
" b 1f;\n" \
" .align 5;\n" \
" ldp %[t0], %[t1], [%[s], #0];\n" \
" ldr %[t2], [%[s], #16];\n" \
" stp %[t0], %[t1], [%[d], #0];\n" \
" str %[t2], [%[d], #16];\n" \
" b 1f;\n" \
" .align 5;\n" \
" ldp %[t0], %[t1], [%[s], #0];\n" \
" ldp %[t2], %[t3], [%[s], #16];\n" \
" stp %[t0], %[t1], [%[d], #0];\n" \
" stp %[t2], %[t3], [%[d], #16];\n" \
" b 1f;\n" \
" .align 5;\n" \
" ldp %[t0], %[t1], [%[s], #0];\n" \
" ldp %[t2], %[t3], [%[s], #16];\n" \
" ldr %[t4], [%[s], #32];\n" \
" stp %[t0], %[t1], [%[d], #0];\n" \
" stp %[t2], %[t3], [%[d], #16];\n" \
" str %[t4], [%[d], #32];\n" \
" b 1f;\n" \
" .align 5;\n" \
" ldp %[t0], %[t1], [%[s], #0];\n" \
" ldp %[t2], %[t3], [%[s], #16];\n" \
" ldp %[t4], %[t5], [%[s], #32];\n" \
"2:" \
" stp %[t0], %[t1], [%[d], #0];\n" \
" stp %[t2], %[t3], [%[d], #16];\n" \
" stp %[t4], %[t5], [%[d], #32];\n" \
" b 1f;\n" \
" .align 5;\n" \
" ldr %[t6], [%[s], #0];\n" \
" ldp %[t0], %[t1], [%[s], #8];\n" \
" ldp %[t2], %[t3], [%[s], #24];\n" \
" ldp %[t4], %[t5], [%[s], #40];\n" \
" str %[t6], [%[d]], #8;\n" \
" b 2b;\n" \
" .align 5;\n" \
" ldp %[t0], %[t1], [%[s], #0];\n" \
" ldp %[t2], %[t3], [%[s], #16];\n" \
" ldp %[t4], %[t5], [%[s], #32];\n" \
" ldp %[t6], %[t7], [%[s], #48];\n" \
" stp %[t0], %[t1], [%[d], #0];\n" \
" stp %[t2], %[t3], [%[d], #16];\n" \
" stp %[t4], %[t5], [%[d], #32];\n" \
" stp %[t6], %[t7], [%[d], #48];\n" \
"1:" \
\
: [s]"+r"(from), [d]"+r"(to), [cnt]"+r"(count), \
[t0]"=&r"(tmp0), [t1]"=&r"(tmp1), [t2]"=&r"(tmp2), [t3]"=&r"(tmp3), \
[t4]"=&r"(tmp4), [t5]"=&r"(tmp5), [t6]"=&r"(tmp6), [t7]"=&r"(tmp7) \
: \
: "memory", "cc"); \
}
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
__asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory");
if (__builtin_expect(count <= 8, 1)) {
COPY_SMALL(from, to, count);
return;
}
_Copy_conjoint_words(from, to, count);
}
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
if (__builtin_constant_p(count)) {
memcpy(to, from, count * sizeof(HeapWord));
return;
}
__asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory");
if (__builtin_expect(count <= 8, 1)) {
COPY_SMALL(from, to, count);
return;
}
_Copy_disjoint_words(from, to, count);
}
static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) {
__asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory");
if (__builtin_expect(count <= 8, 1)) {
COPY_SMALL(from, to, count);
return;
}
_Copy_disjoint_words(from, to, count);
}
static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_conjoint_words(from, to, count);
}
static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
pd_disjoint_words(from, to, count);
}
static void pd_conjoint_bytes(const void* from, void* to, size_t count) {
(void)memmove(to, from, count);
}
static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) {
pd_conjoint_bytes(from, to, count);
}
static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
_Copy_conjoint_jshorts_atomic(from, to, count);
}
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
_Copy_conjoint_jints_atomic(from, to, count);
}
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
_Copy_conjoint_jlongs_atomic(from, to, count);
}
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
_Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
}
static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_bytes(from, to, count);
}
static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_jshorts(from, to, count);
}
static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_jints(from, to, count);
}
static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) {
_Copy_arrayof_conjoint_jlongs(from, to, count);
}
static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) {
assert(!UseCompressedOops, "foo!");
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
_Copy_arrayof_conjoint_jlongs(from, to, count);
}
#endif // OS_CPU_BSD_AARCH64_COPY_BSD_AARCH64_INLINE_HPP

View File

@ -0,0 +1,240 @@
/*
* Copyright (c) 2016, Linaro Ltd. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#define CFUNC(x) _##x
.global CFUNC(_Copy_conjoint_words)
.global CFUNC(_Copy_disjoint_words)
s .req x0
d .req x1
count .req x2
t0 .req x3
t1 .req x4
t2 .req x5
t3 .req x6
t4 .req x7
t5 .req x8
t6 .req x9
t7 .req x10
.align 6
CFUNC(_Copy_disjoint_words):
// Ensure 2 word aligned
tbz s, #3, fwd_copy_aligned
ldr t0, [s], #8
str t0, [d], #8
sub count, count, #1
fwd_copy_aligned:
// Bias s & d so we only pre index on the last copy
sub s, s, #16
sub d, d, #16
ldp t0, t1, [s, #16]
ldp t2, t3, [s, #32]
ldp t4, t5, [s, #48]
ldp t6, t7, [s, #64]!
subs count, count, #16
blo fwd_copy_drain
fwd_copy_again:
prfm pldl1keep, [s, #256]
stp t0, t1, [d, #16]
ldp t0, t1, [s, #16]
stp t2, t3, [d, #32]
ldp t2, t3, [s, #32]
stp t4, t5, [d, #48]
ldp t4, t5, [s, #48]
stp t6, t7, [d, #64]!
ldp t6, t7, [s, #64]!
subs count, count, #8
bhs fwd_copy_again
fwd_copy_drain:
stp t0, t1, [d, #16]
stp t2, t3, [d, #32]
stp t4, t5, [d, #48]
stp t6, t7, [d, #64]!
// count is now -8..-1 for 0..7 words to copy
adr t0, 0f
add t0, t0, count, lsl #5
br t0
.align 5
ret // -8 == 0 words
.align 5
ldr t0, [s, #16] // -7 == 1 word
str t0, [d, #16]
ret
.align 5
ldp t0, t1, [s, #16] // -6 = 2 words
stp t0, t1, [d, #16]
ret
.align 5
ldp t0, t1, [s, #16] // -5 = 3 words
ldr t2, [s, #32]
stp t0, t1, [d, #16]
str t2, [d, #32]
ret
.align 5
ldp t0, t1, [s, #16] // -4 = 4 words
ldp t2, t3, [s, #32]
stp t0, t1, [d, #16]
stp t2, t3, [d, #32]
ret
.align 5
ldp t0, t1, [s, #16] // -3 = 5 words
ldp t2, t3, [s, #32]
ldr t4, [s, #48]
stp t0, t1, [d, #16]
stp t2, t3, [d, #32]
str t4, [d, #48]
ret
.align 5
ldp t0, t1, [s, #16] // -2 = 6 words
ldp t2, t3, [s, #32]
ldp t4, t5, [s, #48]
stp t0, t1, [d, #16]
stp t2, t3, [d, #32]
stp t4, t5, [d, #48]
ret
.align 5
ldp t0, t1, [s, #16] // -1 = 7 words
ldp t2, t3, [s, #32]
ldp t4, t5, [s, #48]
ldr t6, [s, #64]
stp t0, t1, [d, #16]
stp t2, t3, [d, #32]
stp t4, t5, [d, #48]
str t6, [d, #64]
// Is always aligned here, code for 7 words is one instruction
// too large so it just falls through.
.align 5
0:
ret
.align 6
CFUNC(_Copy_conjoint_words):
sub t0, d, s
cmp t0, count, lsl #3
bhs CFUNC(_Copy_disjoint_words)
add s, s, count, lsl #3
add d, d, count, lsl #3
// Ensure 2 word aligned
tbz s, #3, bwd_copy_aligned
ldr t0, [s, #-8]!
str t0, [d, #-8]!
sub count, count, #1
bwd_copy_aligned:
ldp t0, t1, [s, #-16]
ldp t2, t3, [s, #-32]
ldp t4, t5, [s, #-48]
ldp t6, t7, [s, #-64]!
subs count, count, #16
blo bwd_copy_drain
bwd_copy_again:
prfum pldl1keep, [s, #-256]
stp t0, t1, [d, #-16]
ldp t0, t1, [s, #-16]
stp t2, t3, [d, #-32]
ldp t2, t3, [s, #-32]
stp t4, t5, [d, #-48]
ldp t4, t5, [s, #-48]
stp t6, t7, [d, #-64]!
ldp t6, t7, [s, #-64]!
subs count, count, #8
bhs bwd_copy_again
bwd_copy_drain:
stp t0, t1, [d, #-16]
stp t2, t3, [d, #-32]
stp t4, t5, [d, #-48]
stp t6, t7, [d, #-64]!
// count is now -8..-1 for 0..7 words to copy
adr t0, 0f
add t0, t0, count, lsl #5
br t0
.align 5
ret // -8 == 0 words
.align 5
ldr t0, [s, #-8] // -7 == 1 word
str t0, [d, #-8]
ret
.align 5
ldp t0, t1, [s, #-16] // -6 = 2 words
stp t0, t1, [d, #-16]
ret
.align 5
ldp t0, t1, [s, #-16] // -5 = 3 words
ldr t2, [s, #-24]
stp t0, t1, [d, #-16]
str t2, [d, #-24]
ret
.align 5
ldp t0, t1, [s, #-16] // -4 = 4 words
ldp t2, t3, [s, #-32]
stp t0, t1, [d, #-16]
stp t2, t3, [d, #-32]
ret
.align 5
ldp t0, t1, [s, #-16] // -3 = 5 words
ldp t2, t3, [s, #-32]
ldr t4, [s, #-40]
stp t0, t1, [d, #-16]
stp t2, t3, [d, #-32]
str t4, [d, #-40]
ret
.align 5
ldp t0, t1, [s, #-16] // -2 = 6 words
ldp t2, t3, [s, #-32]
ldp t4, t5, [s, #-48]
stp t0, t1, [d, #-16]
stp t2, t3, [d, #-32]
stp t4, t5, [d, #-48]
ret
.align 5
ldp t0, t1, [s, #-16] // -1 = 7 words
ldp t2, t3, [s, #-32]
ldp t4, t5, [s, #-48]
ldr t6, [s, #-56]
stp t0, t1, [d, #-16]
stp t2, t3, [d, #-32]
stp t4, t5, [d, #-48]
str t6, [d, #-56]
// Is always aligned here, code for 7 words is one instruction
// too large so it just falls through.
.align 5
0:
ret

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_BSD_AARCH64_GLOBALS_BSD_AARCH64_HPP
#define OS_CPU_BSD_AARCH64_GLOBALS_BSD_AARCH64_HPP
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
define_pd_global(bool, DontYieldALot, false);
define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 2048);
define_pd_global(intx, CompilerThreadStackSize, 2048);
define_pd_global(uintx,JVMInvokeMethodSlack, 8192);
// Used on 64 bit platforms for UseCompressedOops base address
define_pd_global(uintx,HeapBaseMinAddress, 2*G);
#endif // OS_CPU_BSD_AARCH64_GLOBALS_BSD_AARCH64_HPP

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_BSD_AARCH64_ICACHE_AARCH64_HPP
#define OS_CPU_BSD_AARCH64_ICACHE_AARCH64_HPP
// Interface for updating the instruction cache. Whenever the VM
// modifies code, part of the processor instruction cache potentially
// has to be flushed.
class ICache : public AbstractICache {
public:
static void initialize();
static void invalidate_word(address addr) {
__clear_cache((char *)addr, (char *)(addr + 4));
}
static void invalidate_range(address start, int nbytes) {
__clear_cache((char *)start, (char *)(start + nbytes));
}
};
#endif // OS_CPU_BSD_AARCH64_ICACHE_AARCH64_HPP

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_BSD_AARCH64_ORDERACCESS_BSD_AARCH64_HPP
#define OS_CPU_BSD_AARCH64_ORDERACCESS_BSD_AARCH64_HPP
// Included in orderAccess.hpp header file.
// Implementation of class OrderAccess.
inline void OrderAccess::loadload() { acquire(); }
inline void OrderAccess::storestore() { release(); }
inline void OrderAccess::loadstore() { acquire(); }
inline void OrderAccess::storeload() { fence(); }
#define FULL_MEM_BARRIER __sync_synchronize()
#define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE);
#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
inline void OrderAccess::acquire() {
READ_MEM_BARRIER;
}
inline void OrderAccess::release() {
WRITE_MEM_BARRIER;
}
inline void OrderAccess::fence() {
FULL_MEM_BARRIER;
}
inline void OrderAccess::cross_modify_fence_impl() {
asm volatile("isb" : : : "memory");
}
#endif // OS_CPU_BSD_AARCH64_ORDERACCESS_BSD_AARCH64_HPP

View File

@ -0,0 +1,614 @@
/*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// no precompiled headers
#include "jvm.h"
#include "asm/macroAssembler.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "os_share_bsd.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/osThread.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
#include "signals_posix.hpp"
#include "utilities/align.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
// put OS-includes here
# include <sys/types.h>
# include <sys/mman.h>
# include <pthread.h>
# include <signal.h>
# include <errno.h>
# include <dlfcn.h>
# include <stdlib.h>
# include <stdio.h>
# include <unistd.h>
# include <sys/resource.h>
# include <sys/stat.h>
# include <sys/time.h>
# include <sys/utsname.h>
# include <sys/socket.h>
# include <sys/wait.h>
# include <pwd.h>
# include <poll.h>
#ifndef __OpenBSD__
# include <ucontext.h>
#endif
#if !defined(__APPLE__) && !defined(__NetBSD__)
# include <pthread_np.h>
#endif
#define SPELL_REG_SP "sp"
#define SPELL_REG_FP "fp"
#ifdef __APPLE__
// see darwin-xnu/osfmk/mach/arm/_structs.h
// 10.5 UNIX03 member name prefixes
#define DU3_PREFIX(s, m) __ ## s.__ ## m
#endif
#define context_x uc_mcontext->DU3_PREFIX(ss,x)
#define context_fp uc_mcontext->DU3_PREFIX(ss,fp)
#define context_lr uc_mcontext->DU3_PREFIX(ss,lr)
#define context_sp uc_mcontext->DU3_PREFIX(ss,sp)
#define context_pc uc_mcontext->DU3_PREFIX(ss,pc)
#define context_cpsr uc_mcontext->DU3_PREFIX(ss,cpsr)
#define context_esr uc_mcontext->DU3_PREFIX(es,esr)
address os::current_stack_pointer() {
#if defined(__clang__) || defined(__llvm__)
void *sp;
__asm__("mov %0, " SPELL_REG_SP : "=r"(sp));
return (address) sp;
#else
register void *sp __asm__ (SPELL_REG_SP);
return (address) sp;
#endif
}
char* os::non_memory_address_word() {
// Must never look like an address returned by reserve_memory,
// even in its subfields (as defined by the CPU immediate fields,
// if the CPU splits constants across multiple instructions).
// the return value used in computation of Universe::non_oop_word(), which
// is loaded by cpu/aarch64 by MacroAssembler::movptr(Register, uintptr_t)
return (char*) 0xffffffffffff;
}
address os::Posix::ucontext_get_pc(const ucontext_t * uc) {
return (address)uc->context_pc;
}
void os::Posix::ucontext_set_pc(ucontext_t * uc, address pc) {
uc->context_pc = (intptr_t)pc ;
}
intptr_t* os::Bsd::ucontext_get_sp(const ucontext_t * uc) {
return (intptr_t*)uc->context_sp;
}
intptr_t* os::Bsd::ucontext_get_fp(const ucontext_t * uc) {
return (intptr_t*)uc->context_fp;
}
address os::fetch_frame_from_context(const void* ucVoid,
intptr_t** ret_sp, intptr_t** ret_fp) {
address epc;
const ucontext_t* uc = (const ucontext_t*)ucVoid;
if (uc != NULL) {
epc = os::Posix::ucontext_get_pc(uc);
if (ret_sp) *ret_sp = os::Bsd::ucontext_get_sp(uc);
if (ret_fp) *ret_fp = os::Bsd::ucontext_get_fp(uc);
} else {
epc = NULL;
if (ret_sp) *ret_sp = (intptr_t *)NULL;
if (ret_fp) *ret_fp = (intptr_t *)NULL;
}
return epc;
}
frame os::fetch_frame_from_context(const void* ucVoid) {
intptr_t* sp;
intptr_t* fp;
address epc = fetch_frame_from_context(ucVoid, &sp, &fp);
return frame(sp, fp, epc);
}
frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
const ucontext_t* uc = (const ucontext_t*)ucVoid;
// In compiled code, the stack banging is performed before LR
// has been saved in the frame. LR is live, and SP and FP
// belong to the caller.
intptr_t* fp = os::Bsd::ucontext_get_fp(uc);
intptr_t* sp = os::Bsd::ucontext_get_sp(uc);
address pc = (address)(uc->context_lr
- NativeInstruction::instruction_size);
return frame(sp, fp, pc);
}
// JVM compiled with -fno-omit-frame-pointer, so RFP is saved on the stack.
frame os::get_sender_for_C_frame(frame* fr) {
return frame(fr->link(), fr->link(), fr->sender_pc());
}
NOINLINE frame os::current_frame() {
intptr_t *fp = *(intptr_t **)__builtin_frame_address(0);
frame myframe((intptr_t*)os::current_stack_pointer(),
(intptr_t*)fp,
CAST_FROM_FN_PTR(address, os::current_frame));
if (os::is_first_C_frame(&myframe)) {
// stack is not walkable
return frame();
} else {
return os::get_sender_for_C_frame(&myframe);
}
}
ATTRIBUTE_PRINTF(6, 7)
static void report_and_die(Thread* thread, void* context, const char* filename, int lineno, const char* message,
const char* detail_fmt, ...) {
va_list va;
va_start(va, detail_fmt);
VMError::report_and_die(thread, context, filename, lineno, message, detail_fmt, va);
va_end(va);
}
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
ucontext_t* uc, JavaThread* thread) {
// Enable WXWrite: this function is called by the signal handler at arbitrary
// point of execution.
ThreadWXEnable wx(WXWrite, thread);
// decide if this trap can be handled by a stub
address stub = NULL;
address pc = NULL;
//%note os_trap_1
if (info != NULL && uc != NULL && thread != NULL) {
pc = (address) os::Posix::ucontext_get_pc(uc);
// Handle ALL stack overflow variations here
if (sig == SIGSEGV || sig == SIGBUS) {
address addr = (address) info->si_addr;
// Make sure the high order byte is sign extended, as it may be masked away by the hardware.
if ((uintptr_t(addr) & (uintptr_t(1) << 55)) != 0) {
addr = address(uintptr_t(addr) | (uintptr_t(0xFF) << 56));
}
// check if fault address is within thread stack
if (thread->is_in_full_stack(addr)) {
// stack overflow
if (os::Posix::handle_stack_overflow(thread, addr, pc, uc, &stub)) {
return true; // continue
}
}
}
// We test if stub is already set (by the stack overflow code
// above) so it is not overwritten by the code that follows. This
// check is not required on other platforms, because on other
// platforms we check for SIGSEGV only or SIGBUS only, where here
// we have to check for both SIGSEGV and SIGBUS.
if (thread->thread_state() == _thread_in_Java && stub == NULL) {
// Java thread running in Java code => find exception handler if any
// a fault inside compiled code, the interpreter, or a stub
// Handle signal from NativeJump::patch_verified_entry().
if ((sig == SIGILL)
&& nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
if (TraceTraps) {
tty->print_cr("trap: zombie_not_entrant");
}
stub = SharedRuntime::get_handle_wrong_method_stub();
} else if ((sig == SIGSEGV || sig == SIGBUS) && SafepointMechanism::is_poll_address((address)info->si_addr)) {
stub = SharedRuntime::get_poll_stub(pc);
#if defined(__APPLE__)
// 32-bit Darwin reports a SIGBUS for nearly all memory access exceptions.
// 64-bit Darwin may also use a SIGBUS (seen with compressed oops).
// Catching SIGBUS here prevents the implicit SIGBUS NULL check below from
// being called, so only do so if the implicit NULL check is not necessary.
} else if (sig == SIGBUS && !MacroAssembler::uses_implicit_null_check(info->si_addr)) {
#else
} else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
#endif
// BugId 4454115: A read from a MappedByteBuffer can fault
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = pc + NativeCall::instruction_size;
if (is_unsafe_arraycopy) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
} else if (sig == SIGILL && nativeInstruction_at(pc)->is_stop()) {
// Pull a pointer to the error message out of the instruction
// stream.
const uint64_t *detail_msg_ptr
= (uint64_t*)(pc + NativeInstruction::instruction_size);
const char *detail_msg = (const char *)*detail_msg_ptr;
const char *msg = "stop";
if (TraceTraps) {
tty->print_cr("trap: %s: (SIGILL)", msg);
}
// End life with a fatal error, message and detail message and the context.
// Note: no need to do any post-processing here (e.g. signal chaining)
report_and_die(thread, uc, NULL, 0, msg, "%s", detail_msg);
ShouldNotReachHere();
} else if (sig == SIGFPE &&
(info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
stub =
SharedRuntime::
continuation_for_implicit_exception(thread,
pc,
SharedRuntime::
IMPLICIT_DIVIDE_BY_ZERO);
} else if ((sig == SIGSEGV || sig == SIGBUS) &&
MacroAssembler::uses_implicit_null_check(info->si_addr)) {
// Determination of interpreter/vtable stub/compiled code null exception
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
}
} else if ((thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) &&
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access()) {
address next_pc = pc + NativeCall::instruction_size;
if (UnsafeCopyMemory::contains_pc(pc)) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
// and the heap gets shrunk before the field access.
if ((sig == SIGSEGV) || (sig == SIGBUS)) {
address addr = JNI_FastGetField::find_slowcase_pc(pc);
if (addr != (address)-1) {
stub = addr;
}
}
}
if (stub != NULL) {
// save all thread context in case we need to restore it
if (thread != NULL) thread->set_saved_exception_pc(pc);
os::Posix::ucontext_set_pc(uc, stub);
return true;
}
return false; // Mute compiler
}
void os::Bsd::init_thread_fpu_state(void) {
}
bool os::is_allocatable(size_t bytes) {
return true;
}
////////////////////////////////////////////////////////////////////////////////
// thread stack
// Minimum usable stack sizes required to get to user code. Space for
// HotSpot guard pages is added later.
size_t os::Posix::_compiler_thread_min_stack_allowed = 72 * K;
size_t os::Posix::_java_thread_min_stack_allowed = 72 * K;
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 72 * K;
// return default stack size for thr_type
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
// default stack size (compiler thread needs larger stack)
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
return s;
}
static void current_stack_region(address * bottom, size_t * size) {
#ifdef __APPLE__
pthread_t self = pthread_self();
void *stacktop = pthread_get_stackaddr_np(self);
*size = pthread_get_stacksize_np(self);
*bottom = (address) stacktop - *size;
#elif defined(__OpenBSD__)
stack_t ss;
int rslt = pthread_stackseg_np(pthread_self(), &ss);
if (rslt != 0)
fatal("pthread_stackseg_np failed with error = %d", rslt);
*bottom = (address)((char *)ss.ss_sp - ss.ss_size);
*size = ss.ss_size;
#else
pthread_attr_t attr;
int rslt = pthread_attr_init(&attr);
// JVM needs to know exact stack location, abort if it fails
if (rslt != 0)
fatal("pthread_attr_init failed with error = %d", rslt);
rslt = pthread_attr_get_np(pthread_self(), &attr);
if (rslt != 0)
fatal("pthread_attr_get_np failed with error = %d", rslt);
if (pthread_attr_getstackaddr(&attr, (void **)bottom) != 0 ||
pthread_attr_getstacksize(&attr, size) != 0) {
fatal("Can not locate current stack attributes!");
}
pthread_attr_destroy(&attr);
#endif
assert(os::current_stack_pointer() >= *bottom &&
os::current_stack_pointer() < *bottom + *size, "just checking");
}
address os::current_stack_base() {
address bottom;
size_t size;
current_stack_region(&bottom, &size);
return (bottom + size);
}
size_t os::current_stack_size() {
// stack size includes normal stack and HotSpot guard pages
address bottom;
size_t size;
current_stack_region(&bottom, &size);
return size;
}
/////////////////////////////////////////////////////////////////////////////
// helper functions for fatal error handler
void os::print_context(outputStream *st, const void *context) {
if (context == NULL) return;
const ucontext_t *uc = (const ucontext_t*)context;
st->print_cr("Registers:");
st->print( " x0=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 0]);
st->print(" x1=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 1]);
st->print(" x2=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 2]);
st->print(" x3=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 3]);
st->cr();
st->print( " x4=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 4]);
st->print(" x5=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 5]);
st->print(" x6=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 6]);
st->print(" x7=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 7]);
st->cr();
st->print( " x8=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 8]);
st->print(" x9=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 9]);
st->print(" x10=" INTPTR_FORMAT, (intptr_t)uc->context_x[10]);
st->print(" x11=" INTPTR_FORMAT, (intptr_t)uc->context_x[11]);
st->cr();
st->print( "x12=" INTPTR_FORMAT, (intptr_t)uc->context_x[12]);
st->print(" x13=" INTPTR_FORMAT, (intptr_t)uc->context_x[13]);
st->print(" x14=" INTPTR_FORMAT, (intptr_t)uc->context_x[14]);
st->print(" x15=" INTPTR_FORMAT, (intptr_t)uc->context_x[15]);
st->cr();
st->print( "x16=" INTPTR_FORMAT, (intptr_t)uc->context_x[16]);
st->print(" x17=" INTPTR_FORMAT, (intptr_t)uc->context_x[17]);
st->print(" x18=" INTPTR_FORMAT, (intptr_t)uc->context_x[18]);
st->print(" x19=" INTPTR_FORMAT, (intptr_t)uc->context_x[19]);
st->cr();
st->print( "x20=" INTPTR_FORMAT, (intptr_t)uc->context_x[20]);
st->print(" x21=" INTPTR_FORMAT, (intptr_t)uc->context_x[21]);
st->print(" x22=" INTPTR_FORMAT, (intptr_t)uc->context_x[22]);
st->print(" x23=" INTPTR_FORMAT, (intptr_t)uc->context_x[23]);
st->cr();
st->print( "x24=" INTPTR_FORMAT, (intptr_t)uc->context_x[24]);
st->print(" x25=" INTPTR_FORMAT, (intptr_t)uc->context_x[25]);
st->print(" x26=" INTPTR_FORMAT, (intptr_t)uc->context_x[26]);
st->print(" x27=" INTPTR_FORMAT, (intptr_t)uc->context_x[27]);
st->cr();
st->print( "x28=" INTPTR_FORMAT, (intptr_t)uc->context_x[28]);
st->print(" fp=" INTPTR_FORMAT, (intptr_t)uc->context_fp);
st->print(" lr=" INTPTR_FORMAT, (intptr_t)uc->context_lr);
st->print(" sp=" INTPTR_FORMAT, (intptr_t)uc->context_sp);
st->cr();
st->print( "pc=" INTPTR_FORMAT, (intptr_t)uc->context_pc);
st->print(" cpsr=" INTPTR_FORMAT, (intptr_t)uc->context_cpsr);
st->cr();
intptr_t *sp = (intptr_t *)os::Bsd::ucontext_get_sp(uc);
st->print_cr("Top of Stack: (sp=" INTPTR_FORMAT ")", (intptr_t)sp);
print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
st->cr();
// Note: it may be unsafe to inspect memory near pc. For example, pc may
// point to garbage if entry point in an nmethod is corrupted. Leave
// this at the end, and hope for the best.
address pc = os::Posix::ucontext_get_pc(uc);
print_instructions(st, pc, 4/*native instruction size*/);
st->cr();
}
void os::print_register_info(outputStream *st, const void *context) {
if (context == NULL) return;
const ucontext_t *uc = (const ucontext_t*)context;
st->print_cr("Register to memory mapping:");
st->cr();
// this is horrendously verbose but the layout of the registers in the
// context does not match how we defined our abstract Register set, so
// we can't just iterate through the gregs area
// this is only for the "general purpose" registers
st->print(" x0="); print_location(st, uc->context_x[ 0]);
st->print(" x1="); print_location(st, uc->context_x[ 1]);
st->print(" x2="); print_location(st, uc->context_x[ 2]);
st->print(" x3="); print_location(st, uc->context_x[ 3]);
st->print(" x4="); print_location(st, uc->context_x[ 4]);
st->print(" x5="); print_location(st, uc->context_x[ 5]);
st->print(" x6="); print_location(st, uc->context_x[ 6]);
st->print(" x7="); print_location(st, uc->context_x[ 7]);
st->print(" x8="); print_location(st, uc->context_x[ 8]);
st->print(" x9="); print_location(st, uc->context_x[ 9]);
st->print("x10="); print_location(st, uc->context_x[10]);
st->print("x11="); print_location(st, uc->context_x[11]);
st->print("x12="); print_location(st, uc->context_x[12]);
st->print("x13="); print_location(st, uc->context_x[13]);
st->print("x14="); print_location(st, uc->context_x[14]);
st->print("x15="); print_location(st, uc->context_x[15]);
st->print("x16="); print_location(st, uc->context_x[16]);
st->print("x17="); print_location(st, uc->context_x[17]);
st->print("x18="); print_location(st, uc->context_x[18]);
st->print("x19="); print_location(st, uc->context_x[19]);
st->print("x20="); print_location(st, uc->context_x[20]);
st->print("x21="); print_location(st, uc->context_x[21]);
st->print("x22="); print_location(st, uc->context_x[22]);
st->print("x23="); print_location(st, uc->context_x[23]);
st->print("x24="); print_location(st, uc->context_x[24]);
st->print("x25="); print_location(st, uc->context_x[25]);
st->print("x26="); print_location(st, uc->context_x[26]);
st->print("x27="); print_location(st, uc->context_x[27]);
st->print("x28="); print_location(st, uc->context_x[28]);
st->cr();
}
void os::setup_fpu() {
}
#ifndef PRODUCT
void os::verify_stack_alignment() {
assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
}
#endif
int os::extra_bang_size_in_bytes() {
// AArch64 does not require the additional stack bang.
return 0;
}
void os::current_thread_enable_wx(WXMode mode) {
pthread_jit_write_protect_np(mode == WXExec);
}
extern "C" {
int SpinPause() {
return 0;
}
void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
if (from > to) {
const jshort *end = from + count;
while (from < end)
*(to++) = *(from++);
}
else if (from < to) {
const jshort *end = from;
from += count - 1;
to += count - 1;
while (from >= end)
*(to--) = *(from--);
}
}
void _Copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
if (from > to) {
const jint *end = from + count;
while (from < end)
*(to++) = *(from++);
}
else if (from < to) {
const jint *end = from;
from += count - 1;
to += count - 1;
while (from >= end)
*(to--) = *(from--);
}
}
void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
if (from > to) {
const jlong *end = from + count;
while (from < end)
os::atomic_copy64(from++, to++);
}
else if (from < to) {
const jlong *end = from;
from += count - 1;
to += count - 1;
while (from >= end)
os::atomic_copy64(from--, to--);
}
}
void _Copy_arrayof_conjoint_bytes(const HeapWord* from,
HeapWord* to,
size_t count) {
memmove(to, from, count);
}
void _Copy_arrayof_conjoint_jshorts(const HeapWord* from,
HeapWord* to,
size_t count) {
memmove(to, from, count * 2);
}
void _Copy_arrayof_conjoint_jints(const HeapWord* from,
HeapWord* to,
size_t count) {
memmove(to, from, count * 4);
}
void _Copy_arrayof_conjoint_jlongs(const HeapWord* from,
HeapWord* to,
size_t count) {
memmove(to, from, count * 8);
}
};

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_BSD_AARCH64_OS_BSD_AARCH64_HPP
#define OS_CPU_BSD_AARCH64_OS_BSD_AARCH64_HPP
static void setup_fpu();
static bool is_allocatable(size_t bytes);
// Used to register dynamic code cache area with the OS
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
// Atomically copy 64 bits of data
static void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;
}
#endif // OS_CPU_BSD_AARCH64_OS_BSD_AARCH64_HPP

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_BSD_AARCH64_PREFETCH_BSD_AARCH64_INLINE_HPP
#define OS_CPU_BSD_AARCH64_PREFETCH_BSD_AARCH64_INLINE_HPP
#include "runtime/prefetch.hpp"
inline void Prefetch::read (void *loc, intx interval) {
if (interval >= 0)
asm("prfm PLDL1KEEP, [%0, %1]" : : "r"(loc), "r"(interval));
}
inline void Prefetch::write(void *loc, intx interval) {
if (interval >= 0)
asm("prfm PSTL1KEEP, [%0, %1]" : : "r"(loc), "r"(interval));
}
#endif // OS_CPU_BSD_AARCH64_PREFETCH_BSD_AARCH64_INLINE_HPP

View File

@ -0,0 +1,99 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "memory/metaspaceShared.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
frame JavaThread::pd_last_frame() {
assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
vmassert(_anchor.last_Java_pc() != NULL, "not walkable");
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
}
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
void* ucontext, bool isInJava) {
assert(Thread::current() == this, "caller must be current thread");
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
return pd_get_top_frame(fr_addr, ucontext, isInJava);
}
bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) {
assert(this->is_Java_thread(), "must be JavaThread");
JavaThread* jt = (JavaThread *)this;
// If we have a last_Java_frame, then we should use it even if
// isInJava == true. It should be more reliable than ucontext info.
if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) {
*fr_addr = jt->pd_last_frame();
return true;
}
// At this point, we don't have a last_Java_frame, so
// we try to glean some information out of the ucontext
// if we were running Java code when SIGPROF came in.
if (isInJava) {
ucontext_t* uc = (ucontext_t*) ucontext;
intptr_t* ret_fp;
intptr_t* ret_sp;
address addr = os::fetch_frame_from_context(uc, &ret_sp, &ret_fp);
if (addr == NULL || ret_sp == NULL ) {
// ucontext wasn't useful
return false;
}
frame ret_frame(ret_sp, ret_fp, addr);
if (!ret_frame.safe_for_sender(jt)) {
#if COMPILER2_OR_JVMCI
// C2 and JVMCI use ebp as a general register see if NULL fp helps
frame ret_frame2(ret_sp, NULL, addr);
if (!ret_frame2.safe_for_sender(jt)) {
// nothing else to try if the frame isn't good
return false;
}
ret_frame = ret_frame2;
#else
// nothing else to try if the frame isn't good
return false;
#endif // COMPILER2_OR_JVMCI
}
*fr_addr = ret_frame;
return true;
}
// nothing else to try
return false;
}
void JavaThread::cache_global_variables() { }

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_BSD_AARCH64_THREAD_BSD_AARCH64_HPP
#define OS_CPU_BSD_AARCH64_THREAD_BSD_AARCH64_HPP
private:
void pd_initialize() {
_anchor.clear();
}
frame pd_last_frame();
public:
static ByteSize last_Java_fp_offset() {
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);
bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
private:
bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
public:
static Thread *aarch64_get_thread_helper() {
return Thread::current();
}
#endif // OS_CPU_BSD_AARCH64_THREAD_BSD_AARCH64_HPP

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_BSD_AARCH64_VMSTRUCTS_BSD_AARCH64_HPP
#define OS_CPU_BSD_AARCH64_VMSTRUCTS_BSD_AARCH64_HPP
// These are the OS and CPU-specific fields, types and integer
// constants required by the Serviceability Agent. This file is
// referenced by vmStructs.cpp.
#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
\
/******************************/ \
/* Threads (NOTE: incomplete) */ \
/******************************/ \
nonstatic_field(OSThread, _thread_id, OSThread::thread_id_t) \
nonstatic_field(OSThread, _unique_thread_id, uint64_t)
#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
\
/**********************/ \
/* Thread IDs */ \
/**********************/ \
\
declare_unsigned_integer_type(OSThread::thread_id_t)
#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
#endif // OS_CPU_BSD_AARCH64_VMSTRUCTS_BSD_AARCH64_HPP

View File

@ -0,0 +1,107 @@
/*
* Copyright (c) 2006, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
#include <sys/sysctl.h>
int VM_Version::get_current_sve_vector_length() {
ShouldNotCallThis();
return -1;
}
int VM_Version::set_and_get_current_sve_vector_length(int length) {
ShouldNotCallThis();
return -1;
}
static bool cpu_has(const char* optional) {
uint32_t val;
size_t len = sizeof(val);
if (sysctlbyname(optional, &val, &len, NULL, 0)) {
return false;
}
return val;
}
void VM_Version::get_os_cpu_info() {
size_t sysctllen;
// hw.optional.floatingpoint always returns 1, see
// https://github.com/apple/darwin-xnu/blob/master/bsd/kern/kern_mib.c#L416.
// ID_AA64PFR0_EL1 describes AdvSIMD always equals to FP field.
assert(cpu_has("hw.optional.floatingpoint"), "should be");
assert(cpu_has("hw.optional.neon"), "should be");
_features = CPU_FP | CPU_ASIMD;
// Only few features are available via sysctl, see line 614
// https://opensource.apple.com/source/xnu/xnu-6153.141.1/bsd/kern/kern_mib.c.auto.html
if (cpu_has("hw.optional.armv8_crc32")) _features |= CPU_CRC32;
if (cpu_has("hw.optional.armv8_1_atomics")) _features |= CPU_LSE;
int cache_line_size;
int hw_conf_cache_line[] = { CTL_HW, HW_CACHELINE };
sysctllen = sizeof(cache_line_size);
if (sysctl(hw_conf_cache_line, 2, &cache_line_size, &sysctllen, NULL, 0)) {
cache_line_size = 16;
}
_icache_line_size = 16; // minimal line lenght CCSIDR_EL1 can hold
_dcache_line_size = cache_line_size;
uint64_t dczid_el0;
__asm__ (
"mrs %0, DCZID_EL0\n"
: "=r"(dczid_el0)
);
if (!(dczid_el0 & 0x10)) {
_zva_length = 4 << (dczid_el0 & 0xf);
}
int family;
sysctllen = sizeof(family);
if (sysctlbyname("hw.cpufamily", &family, &sysctllen, NULL, 0)) {
family = 0;
}
_model = family;
_cpu = CPU_APPLE;
}
void VM_Version::get_compatible_board(char *buf, int buflen) {
assert(buf != NULL, "invalid argument");
assert(buflen >= 1, "invalid argument");
*buf = '\0';
}
#ifdef __APPLE__
bool VM_Version::is_cpu_emulated() {
return false;
}
#endif

View File

@ -1266,6 +1266,10 @@ JRT_END
void Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id) {
NOT_PRODUCT(_patch_code_slowcase_cnt++);
// Enable WXWrite: the function is called by c1 stub as a runtime function
// (see another implementation above).
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, thread));
if (TracePatching) {
tty->print_cr("Deoptimizing because patch is needed");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "gc/shared/barrierSetNMethod.hpp"
#include "logging/log.hpp"
#include "runtime/thread.hpp"
#include "runtime/threadWXSetters.inline.hpp"
#include "utilities/debug.hpp"
int BarrierSetNMethod::disarmed_value() const {
@ -48,6 +49,10 @@ bool BarrierSetNMethod::supports_entry_barrier(nmethod* nm) {
}
int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
// Enable WXWrite: the function is called directly from nmethod_entry_barrier
// stub.
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
address return_address = *return_address_ptr;
CodeBlob* cb = CodeCache::find_blob(return_address);
assert(cb != NULL, "invariant");

View File

@ -970,6 +970,9 @@ JRT_END
nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) {
// Enable WXWrite: the function is called directly by interpreter.
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, thread));
// frequency_counter_overflow_inner can throw async exception.
nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp);
assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests");

View File

@ -243,6 +243,8 @@ class MaskFillerForNative: public NativeSignatureIterator {
}
public:
void pass_byte() { /* ignore */ }
void pass_short() { /* ignore */ }
void pass_int() { /* ignore */ }
void pass_long() { /* ignore */ }
void pass_float() { /* ignore */ }

View File

@ -126,10 +126,11 @@ Handle JavaArgumentUnboxer::next_arg(BasicType expectedType) {
}
// Bring the JVMCI compiler thread into the VM state.
#define JVMCI_VM_ENTRY_MARK \
ThreadInVMfromNative __tiv(thread); \
HandleMarkCleaner __hm(thread); \
Thread* THREAD = thread; \
#define JVMCI_VM_ENTRY_MARK \
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
ThreadInVMfromNative __tiv(thread); \
HandleMarkCleaner __hm(thread); \
Thread* THREAD = thread; \
debug_only(VMNativeEntryWrapper __vew;)
// Native method block that transitions current thread to '_thread_in_vm'.

View File

@ -72,6 +72,7 @@
#include "runtime/signature.hpp"
#include "runtime/stackWatermarkSet.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/threadWXSetters.inline.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vframeArray.hpp"
#include "runtime/vframe_hp.hpp"
@ -1411,6 +1412,10 @@ address OptoRuntime::handle_exception_C(JavaThread* thread) {
// *THIS IS NOT RECOMMENDED PROGRAMMING STYLE*
//
address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) {
// Enable WXWrite: the function called directly by compiled code.
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, thread));
// The frame we rethrow the exception to might not have been processed by the GC yet.
// The stack watermark barrier takes care of detecting that and ensuring the frame
// has updated oops.

View File

@ -1,6 +1,7 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 Red Hat, Inc.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3631,6 +3632,7 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) {
// Since this is not a JVM_ENTRY we have to set the thread state manually before leaving.
ThreadStateTransition::transition(thread, _thread_in_vm, _thread_in_native);
MACOS_AARCH64_ONLY(thread->enable_wx(WXExec));
} else {
// If create_vm exits because of a pending exception, exit with that
// exception. In the future when we figure out how to reclaim memory,
@ -3782,6 +3784,7 @@ static jint attach_current_thread(JavaVM *vm, void **penv, void *_args, bool dae
thread->record_stack_base_and_size();
thread->register_thread_stack_with_NMT();
thread->initialize_thread_current();
MACOS_AARCH64_ONLY(thread->init_wx());
if (!os::create_attached_thread(thread)) {
thread->smr_delete();
@ -3855,6 +3858,7 @@ static jint attach_current_thread(JavaVM *vm, void **penv, void *_args, bool dae
// needed.
ThreadStateTransition::transition(thread, _thread_in_vm, _thread_in_native);
MACOS_AARCH64_ONLY(thread->enable_wx(WXExec));
// Perform any platform dependent FPU setup
os::setup_fpu();
@ -3922,6 +3926,10 @@ jint JNICALL jni_DetachCurrentThread(JavaVM *vm) {
thread->exit(false, JavaThread::jni_detach);
thread->smr_delete();
// Go to the execute mode, the initial state of the thread on creation.
// Use os interface as the thread is not a JavaThread anymore.
MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXExec));
HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(JNI_OK);
return JNI_OK;
}

View File

@ -104,6 +104,7 @@ extern "C" { \
if (env != xenv) { \
NativeReportJNIFatalError(thr, warn_wrong_jnienv); \
} \
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thr)); \
VM_ENTRY_BASE(result_type, header, thr)

View File

@ -435,6 +435,8 @@ struct jvmtiInterface_1_ jvmti</xsl:text>
<xsl:if test="count(@impl)=0 or not(contains(@impl,'innative'))">
<xsl:text>JavaThread* current_thread = this_thread->as_Java_thread();</xsl:text>
<xsl:value-of select="$space"/>
<xsl:text>MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current_thread));</xsl:text>
<xsl:value-of select="$space"/>
<xsl:text>ThreadInVMfromNative __tiv(current_thread);</xsl:text>
<xsl:value-of select="$space"/>
<xsl:text>VM_ENTRY_BASE(jvmtiError, </xsl:text>

View File

@ -178,6 +178,7 @@ JvmtiEnv::GetThreadLocalStorage(jthread thread, void** data_ptr) {
// other than the current thread is required we need to transition
// from native so as to resolve the jthread.
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current_thread));
ThreadInVMfromNative __tiv(current_thread);
VM_ENTRY_BASE(jvmtiError, JvmtiEnv::GetThreadLocalStorage , current_thread)
debug_only(VMNativeEntryWrapper __vew;)

View File

@ -31,6 +31,7 @@ ProgrammableInvoker::Generator::Generator(CodeBuffer* code, const ABIDescriptor*
_layout(layout) {}
void ProgrammableInvoker::invoke_native(Stub stub, address buff, JavaThread* thread) {
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, thread));
ThreadToNativeFromVM ttnfvm(thread);
stub(buff);
}

View File

@ -61,7 +61,10 @@ void ProgrammableUpcallHandler::attach_thread_and_do_upcall(jobject rec, address
thread = Thread::current();
}
upcall_helper(thread->as_Java_thread(), rec, buff);
{
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, thread));
upcall_helper(thread->as_Java_thread(), rec, buff);
}
if (should_detach) {
JavaVM_ *vm = (JavaVM *)(&main_vm);

View File

@ -407,6 +407,7 @@ UNSAFE_ENTRY(void, Unsafe_CopyMemory0(JNIEnv *env, jobject unsafe, jobject srcOb
{
GuardUnsafeAccess guard(thread);
if (StubRoutines::unsafe_arraycopy() != NULL) {
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, thread));
StubRoutines::UnsafeArrayCopy_stub()(src, dst, sz);
} else {
Copy::conjoint_memory_atomic(src, dst, sz);
@ -458,12 +459,14 @@ UNSAFE_LEAF (void, Unsafe_WriteBack0(JNIEnv *env, jobject unsafe, jlong line)) {
}
#endif
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current()));
assert(StubRoutines::data_cache_writeback() != NULL, "sanity");
(StubRoutines::DataCacheWriteback_stub())(addr_from_java(line));
} UNSAFE_END
static void doWriteBackSync0(bool is_pre)
{
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current()));
assert(StubRoutines::data_cache_writeback_sync() != NULL, "sanity");
(StubRoutines::DataCacheWritebackSync_stub())(is_pre);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,8 @@
// Entry macro to transition from JNI to VM state.
#define WB_ENTRY(result_type, header) JNI_ENTRY(result_type, header) \
ClearPendingJniExcCheck _clearCheck(env);
ClearPendingJniExcCheck _clearCheck(env); \
MACOS_AARCH64_ONLY(ThreadWXEnable _wx(WXWrite, thread));
#define WB_END JNI_END

View File

@ -75,6 +75,7 @@
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.hpp"
#include "runtime/threadSMR.hpp"
#include "runtime/threadWXSetters.inline.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vframeArray.hpp"
#include "runtime/vframe_hp.hpp"
@ -2469,6 +2470,9 @@ Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int tr
}
Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request, jint exec_mode) {
// Enable WXWrite: current function is called from methods compiled by C2 directly
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, thread));
if (TraceDeoptimization) {
tty->print("Uncommon trap ");
}

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +34,7 @@
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/thread.hpp"
#include "runtime/threadWXSetters.inline.hpp"
#include "runtime/vmOperations.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
@ -289,6 +291,8 @@ class VMNativeEntryWrapper {
#define VM_LEAF_BASE(result_type, header) \
debug_only(NoHandleMark __hm;) \
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, \
Thread::current())); \
os::verify_stack_alignment(); \
/* begin of body */
@ -311,6 +315,7 @@ class VMNativeEntryWrapper {
#define JRT_ENTRY(result_type, header) \
result_type header { \
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
ThreadInVMfromJava __tiv(thread); \
VM_ENTRY_BASE(result_type, header, thread) \
debug_only(VMEntryWrapper __vew;)
@ -337,6 +342,7 @@ class VMNativeEntryWrapper {
#define JRT_ENTRY_NO_ASYNC(result_type, header) \
result_type header { \
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
ThreadInVMfromJava __tiv(thread, false /* check asyncs */); \
VM_ENTRY_BASE(result_type, header, thread) \
debug_only(VMEntryWrapper __vew;)
@ -345,6 +351,7 @@ class VMNativeEntryWrapper {
// to get back into Java from the VM
#define JRT_BLOCK_ENTRY(result_type, header) \
result_type header { \
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
HandleMarkCleaner __hm(thread);
#define JRT_BLOCK \
@ -374,6 +381,7 @@ extern "C" { \
result_type JNICALL header { \
JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
ThreadInVMfromNative __tiv(thread); \
debug_only(VMNativeEntryWrapper __vew;) \
VM_ENTRY_BASE(result_type, header, thread)
@ -398,6 +406,7 @@ extern "C" { \
extern "C" { \
result_type JNICALL header { \
JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
ThreadInVMfromNative __tiv(thread); \
debug_only(VMNativeEntryWrapper __vew;) \
VM_ENTRY_BASE(result_type, header, thread)
@ -407,6 +416,7 @@ extern "C" { \
extern "C" { \
result_type JNICALL header { \
JavaThread* thread = JavaThread::current(); \
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
ThreadInVMfromNative __tiv(thread); \
debug_only(VMNativeEntryWrapper __vew;) \
VM_ENTRY_BASE(result_type, header, thread)

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,7 +79,6 @@ JavaCallWrapper::JavaCallWrapper(const methodHandle& callee_method, Handle recei
}
}
// Make sure to set the oop's after the thread transition - since we can block there. No one is GC'ing
// the JavaCallWrapper before the entry frame is on the stack.
_callee_method = callee_method();
@ -108,12 +108,16 @@ JavaCallWrapper::JavaCallWrapper(const methodHandle& callee_method, Handle recei
if(clear_pending_exception) {
_thread->clear_pending_exception();
}
MACOS_AARCH64_ONLY(_thread->enable_wx(WXExec));
}
JavaCallWrapper::~JavaCallWrapper() {
assert(_thread == JavaThread::current(), "must still be the same thread");
MACOS_AARCH64_ONLY(_thread->enable_wx(WXWrite));
// restore previous handle block & Java frame linkage
JNIHandleBlock *_old_handles = _thread->active_handles();
_thread->set_active_handles(_handles);

View File

@ -77,6 +77,11 @@ enum ThreadPriority { // JLS 20.20.1-3
CriticalPriority = 11 // Critical thread priority
};
enum WXMode {
WXWrite,
WXExec
};
// Executable parameter flag for os::commit_memory() and
// os::commit_memory_or_exit().
const bool ExecMem = true;
@ -932,6 +937,11 @@ class os: AllStatic {
bool _done;
};
#if defined(__APPLE__) && defined(AARCH64)
// Enables write or execute access to writeable and executable pages.
static void current_thread_enable_wx(WXMode mode);
#endif // __APPLE__ && AARCH64
#ifndef _WINDOWS
// Suspend/resume support
// Protocol:

View File

@ -26,16 +26,27 @@
#define SHARE_RUNTIME_SAFEFETCH_INLINE_HPP
#include "runtime/stubRoutines.hpp"
#include "runtime/threadWXSetters.inline.hpp"
// Safefetch allows to load a value from a location that's not known
// to be valid. If the load causes a fault, the error value is returned.
inline int SafeFetch32(int* adr, int errValue) {
assert(StubRoutines::SafeFetch32_stub(), "stub not yet generated");
#if defined(__APPLE__) && defined(AARCH64)
Thread* thread = Thread::current_or_null_safe();
assert(thread != NULL, "required for W^X management");
ThreadWXEnable wx(WXExec, thread);
#endif // __APPLE__ && AARCH64
return StubRoutines::SafeFetch32_stub()(adr, errValue);
}
inline intptr_t SafeFetchN(intptr_t* adr, intptr_t errValue) {
assert(StubRoutines::SafeFetchN_stub(), "stub not yet generated");
#if defined(__APPLE__) && defined(AARCH64)
Thread* thread = Thread::current_or_null_safe();
assert(thread != NULL, "required for W^X management");
ThreadWXEnable wx(WXExec, thread);
#endif // __APPLE__ && AARCH64
return StubRoutines::SafeFetchN_stub()(adr, errValue);
}

View File

@ -64,6 +64,7 @@
#include "runtime/synchronizer.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.hpp"
#include "runtime/threadWXSetters.inline.hpp"
#include "runtime/timerTrace.hpp"
#include "services/runtimeService.hpp"
#include "utilities/events.hpp"
@ -757,6 +758,9 @@ void SafepointSynchronize::block(JavaThread *thread) {
void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) {
assert(thread->thread_state() == _thread_in_Java, "should come from Java code");
// Enable WXWrite: the function is called implicitly from java code.
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, thread));
if (log_is_enabled(Info, safepoint, stats)) {
Atomic::inc(&_nof_threads_hit_polling_page);
}

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -381,10 +382,14 @@ class NativeSignatureIterator: public SignatureIterator {
void do_type(BasicType type) {
switch (type) {
case T_BYTE:
case T_SHORT:
case T_INT:
case T_BOOLEAN:
pass_byte(); _jni_offset++; _offset++;
break;
case T_CHAR:
case T_SHORT:
pass_short(); _jni_offset++; _offset++;
break;
case T_INT:
pass_int(); _jni_offset++; _offset++;
break;
case T_FLOAT:
@ -418,6 +423,8 @@ class NativeSignatureIterator: public SignatureIterator {
virtual void pass_long() = 0;
virtual void pass_object() = 0; // objects, arrays, inlines
virtual void pass_float() = 0;
virtual void pass_byte() { pass_int(); };
virtual void pass_short() { pass_int(); };
#ifdef _LP64
virtual void pass_double() = 0;
#else

View File

@ -286,6 +286,8 @@ void StubRoutines::initialize2() {
#ifdef ASSERT
MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXExec));
#define TEST_ARRAYCOPY(type) \
test_arraycopy_func( type##_arraycopy(), sizeof(type)); \
test_arraycopy_func( type##_disjoint_arraycopy(), sizeof(type)); \
@ -359,6 +361,8 @@ void StubRoutines::initialize2() {
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_conjoint_words), sizeof(jlong));
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_disjoint_words), sizeof(jlong));
MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXWrite));
#endif
}

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -106,6 +107,7 @@
#include "runtime/threadCritical.hpp"
#include "runtime/threadSMR.inline.hpp"
#include "runtime/threadStatisticalInfo.hpp"
#include "runtime/threadWXSetters.inline.hpp"
#include "runtime/timer.hpp"
#include "runtime/timerTrace.hpp"
#include "runtime/vframe.inline.hpp"
@ -323,6 +325,8 @@ Thread::Thread() {
// If the main thread creates other threads before the barrier set that is an error.
assert(Thread::current_or_null() == NULL, "creating thread before barrier set");
}
MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
}
void Thread::initialize_tlab() {
@ -386,6 +390,8 @@ void Thread::call_run() {
register_thread_stack_with_NMT();
MACOS_AARCH64_ONLY(this->init_wx());
JFR_ONLY(Jfr::on_thread_start(this);)
log_debug(os, thread)("Thread " UINTX_FORMAT " stack dimensions: "
@ -2157,6 +2163,9 @@ void JavaThread::check_safepoint_and_suspend_for_native_trans(JavaThread *thread
// Note only the native==>VM/Java barriers can call this function and when
// thread state is _thread_in_native_trans.
void JavaThread::check_special_condition_for_native_trans(JavaThread *thread) {
// Enable WXWrite: called directly from interpreter native wrapper.
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, thread));
check_safepoint_and_suspend_for_native_trans(thread);
// After returning from native, it could be that the stack frames are not
@ -3039,6 +3048,8 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// Initialize the os module
os::init();
MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXWrite));
// Record VM creation timing statistics
TraceVmCreationTime create_vm_timer;
create_vm_timer.start();
@ -3142,6 +3153,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
main_thread->record_stack_base_and_size();
main_thread->register_thread_stack_with_NMT();
main_thread->set_active_handles(JNIHandleBlock::allocate_block());
MACOS_AARCH64_ONLY(main_thread->init_wx());
if (!main_thread->set_as_starting_thread()) {
vm_shutdown_during_initialization(

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -822,6 +823,15 @@ protected:
// Not for general synchronization use.
static void SpinAcquire(volatile int * Lock, const char * Name);
static void SpinRelease(volatile int * Lock);
#if defined(__APPLE__) && defined(AARCH64)
private:
DEBUG_ONLY(bool _wx_init);
WXMode _wx_state;
public:
void init_wx();
WXMode enable_wx(WXMode new_state);
#endif // __APPLE__ && AARCH64
};
// Inline implementation of Thread::current()

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -94,6 +95,27 @@ inline void Thread::set_threads_hazard_ptr(ThreadsList* new_list) {
Atomic::release_store_fence(&_threads_hazard_ptr, new_list);
}
#if defined(__APPLE__) && defined(AARCH64)
inline void Thread::init_wx() {
assert(this == Thread::current(), "should only be called for current thread");
assert(!_wx_init, "second init");
_wx_state = WXWrite;
os::current_thread_enable_wx(_wx_state);
DEBUG_ONLY(_wx_init = true);
}
inline WXMode Thread::enable_wx(WXMode new_state) {
assert(this == Thread::current(), "should only be called for current thread");
assert(_wx_init, "should be inited");
WXMode old = _wx_state;
if (_wx_state != new_state) {
_wx_state = new_state;
os::current_thread_enable_wx(new_state);
}
return old;
}
#endif // __APPLE__ && AARCH64
inline void JavaThread::set_ext_suspended() {
set_suspend_flag (_ext_suspended);
}

View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_RUNTIME_THREADWXSETTERS_INLINE_HPP
#define SHARE_RUNTIME_THREADWXSETTERS_INLINE_HPP
#include "runtime/thread.inline.hpp"
#if defined(__APPLE__) && defined(AARCH64)
class ThreadWXEnable {
Thread* _thread;
WXMode _old_mode;
public:
ThreadWXEnable(WXMode new_mode, Thread* thread) :
_thread(thread),
_old_mode(_thread ? _thread->enable_wx(new_mode) : WXWrite)
{ }
~ThreadWXEnable() {
if (_thread) {
_thread->enable_wx(_old_mode);
}
}
};
#endif // __APPLE__ && AARCH64
#endif // SHARE_RUNTIME_THREADWXSETTERS_INLINE_HPP

View File

@ -569,6 +569,8 @@
#define NOT_AARCH64(code) code
#endif
#define MACOS_AARCH64_ONLY(x) MACOS_ONLY(AARCH64_ONLY(x))
#ifdef VM_LITTLE_ENDIAN
#define LITTLE_ENDIAN_ONLY(code) code
#define BIG_ENDIAN_ONLY(code)

View File

@ -36,7 +36,7 @@ NativeCallStack::NativeCallStack(int toSkip) {
// to call os::get_native_stack. A tail call is used if _NMT_NOINLINE_ is not defined
// (which means this is not a slowdebug build), and we are on 64-bit (except Windows).
// This is not necessarily a rule, but what has been obvserved to date.
#if (defined(_NMT_NOINLINE_) || defined(_WINDOWS) || !defined(_LP64) || defined(PPC64))
#if (defined(_NMT_NOINLINE_) || defined(_WINDOWS) || !defined(_LP64) || defined(PPC64) || (defined(BSD) && defined (__aarch64__)))
// Not a tail call.
toSkip++;
#if (defined(_NMT_NOINLINE_) && defined(BSD) && defined(_LP64))

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -210,6 +210,8 @@ static InvocationFunctions *GetExportedJNIFunctions() {
preferredJVM = "client";
#elif defined(__x86_64__)
preferredJVM = "server";
#elif defined(__aarch64__)
preferredJVM = "server";
#else
#error "Unknown architecture - needs definition"
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,9 @@
extern "C" {
#endif /* __cplusplus */
#if defined(TARGET_OS_MAC)
// Condition was copied from
// Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/gssapi/gssapi.h
#if TARGET_OS_MAC && (defined(__ppc__) || defined(__ppc64__) || defined(__i386__) || defined(__x86_64__))
# pragma pack(push,2)
#endif
@ -695,7 +697,7 @@ GSS_DLLIMP OM_uint32 gss_canonicalize_name(
gss_name_t * /* output_name */
);
#if defined(TARGET_OS_MAC)
#if TARGET_OS_MAC && (defined(__ppc__) || defined(__ppc64__) || defined(__i386__) || defined(__x86_64__))
# pragma pack(pop)
#endif

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,14 +43,10 @@
#import <sys/ptrace.h>
#include "libproc_impl.h"
#define UNSUPPORTED_ARCH "Unsupported architecture!"
#if defined(x86_64) && !defined(amd64)
#define amd64 1
#endif
#if amd64
#if defined(amd64)
#include "sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext.h"
#elif defined(aarch64)
#include "sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext.h"
#else
#error UNSUPPORTED_ARCH
#endif
@ -162,20 +159,20 @@ static struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj) {
return (struct ps_prochandle*)(intptr_t)ptr;
}
#if defined(__i386__)
#define hsdb_thread_state_t x86_thread_state32_t
#define hsdb_float_state_t x86_float_state32_t
#define HSDB_THREAD_STATE x86_THREAD_STATE32
#define HSDB_FLOAT_STATE x86_FLOAT_STATE32
#define HSDB_THREAD_STATE_COUNT x86_THREAD_STATE32_COUNT
#define HSDB_FLOAT_STATE_COUNT x86_FLOAT_STATE32_COUNT
#elif defined(__x86_64__)
#if defined(amd64)
#define hsdb_thread_state_t x86_thread_state64_t
#define hsdb_float_state_t x86_float_state64_t
#define HSDB_THREAD_STATE x86_THREAD_STATE64
#define HSDB_FLOAT_STATE x86_FLOAT_STATE64
#define HSDB_THREAD_STATE_COUNT x86_THREAD_STATE64_COUNT
#define HSDB_FLOAT_STATE_COUNT x86_FLOAT_STATE64_COUNT
#elif defined(aarch64)
#define hsdb_thread_state_t arm_thread_state64_t
#define hsdb_float_state_t arm_neon_state64_t
#define HSDB_THREAD_STATE ARM_THREAD_STATE64
#define HSDB_FLOAT_STATE ARM_NEON_STATE64
#define HSDB_THREAD_STATE_COUNT ARM_THREAD_STATE64_COUNT
#define HSDB_FLOAT_STATE_COUNT ARM_NEON_STATE64_COUNT
#else
#error UNSUPPORTED_ARCH
#endif
@ -494,11 +491,21 @@ bool fill_java_threads(JNIEnv* env, jobject this_obj, struct ps_prochandle* ph)
lwpid_t uid = cinfos[j];
uint64_t beg = cinfos[j + 1];
uint64_t end = cinfos[j + 2];
#if defined(amd64)
if ((regs.r_rsp < end && regs.r_rsp >= beg) ||
(regs.r_rbp < end && regs.r_rbp >= beg)) {
set_lwp_id(ph, i, uid);
break;
}
#elif defined(aarch64)
if ((regs.r_sp < end && regs.r_sp >= beg) ||
(regs.r_fp < end && regs.r_fp >= beg)) {
set_lwp_id(ph, i, uid);
break;
}
#else
#error UNSUPPORTED_ARCH
#endif
}
}
(*env)->ReleaseLongArrayElements(env, thrinfos, (jlong*)cinfos, 0);
@ -530,14 +537,22 @@ jlongArray getThreadIntegerRegisterSetFromCore(JNIEnv *env, jobject this_obj, lo
#undef NPRGREG
#undef REG_INDEX
#if amd64
#if defined(amd64)
#define NPRGREG sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext_NPRGREG
#define REG_INDEX(reg) sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext_##reg
#elif defined(aarch64)
#define NPRGREG sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext_NPRGREG
#define REG_INDEX(reg) sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext_##reg
#else
#error UNSUPPORTED_ARCH
#endif
array = (*env)->NewLongArray(env, NPRGREG);
CHECK_EXCEPTION_(0);
regs = (*env)->GetLongArrayElements(env, array, &isCopy);
#if defined(amd64)
regs[REG_INDEX(R15)] = gregs.r_r15;
regs[REG_INDEX(R14)] = gregs.r_r14;
regs[REG_INDEX(R13)] = gregs.r_r13;
@ -566,8 +581,47 @@ jlongArray getThreadIntegerRegisterSetFromCore(JNIEnv *env, jobject this_obj, lo
regs[REG_INDEX(TRAPNO)] = gregs.r_trapno;
regs[REG_INDEX(RFL)] = gregs.r_rflags;
#elif defined(aarch64)
regs[REG_INDEX(R0)] = gregs.r_r0;
regs[REG_INDEX(R1)] = gregs.r_r1;
regs[REG_INDEX(R2)] = gregs.r_r2;
regs[REG_INDEX(R3)] = gregs.r_r3;
regs[REG_INDEX(R4)] = gregs.r_r4;
regs[REG_INDEX(R5)] = gregs.r_r5;
regs[REG_INDEX(R6)] = gregs.r_r6;
regs[REG_INDEX(R7)] = gregs.r_r7;
regs[REG_INDEX(R8)] = gregs.r_r8;
regs[REG_INDEX(R9)] = gregs.r_r9;
regs[REG_INDEX(R10)] = gregs.r_r10;
regs[REG_INDEX(R11)] = gregs.r_r11;
regs[REG_INDEX(R12)] = gregs.r_r12;
regs[REG_INDEX(R13)] = gregs.r_r13;
regs[REG_INDEX(R14)] = gregs.r_r14;
regs[REG_INDEX(R15)] = gregs.r_r15;
regs[REG_INDEX(R16)] = gregs.r_r16;
regs[REG_INDEX(R17)] = gregs.r_r17;
regs[REG_INDEX(R18)] = gregs.r_r18;
regs[REG_INDEX(R19)] = gregs.r_r19;
regs[REG_INDEX(R20)] = gregs.r_r20;
regs[REG_INDEX(R21)] = gregs.r_r21;
regs[REG_INDEX(R22)] = gregs.r_r22;
regs[REG_INDEX(R23)] = gregs.r_r23;
regs[REG_INDEX(R24)] = gregs.r_r24;
regs[REG_INDEX(R25)] = gregs.r_r25;
regs[REG_INDEX(R26)] = gregs.r_r26;
regs[REG_INDEX(R27)] = gregs.r_r27;
regs[REG_INDEX(R28)] = gregs.r_r28;
regs[REG_INDEX(FP)] = gregs.r_fp;
regs[REG_INDEX(LR)] = gregs.r_lr;
regs[REG_INDEX(SP)] = gregs.r_sp;
regs[REG_INDEX(PC)] = gregs.r_pc;
#else
#error UNSUPPORTED_ARCH
#endif
(*env)->ReleaseLongArrayElements(env, array, regs, JNI_COMMIT);
#endif /* amd64 */
return array;
}
@ -662,10 +716,14 @@ Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_getThreadIntegerRegisterSet0(
return NULL;
}
#if amd64
#undef NPRGREG
#if defined(amd64)
#define NPRGREG sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext_NPRGREG
#undef REG_INDEX
#define REG_INDEX(reg) sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext_##reg
#elif defined(aarch64)
#define NPRGREG sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext_NPRGREG
#else
#error UNSUPPORTED_ARCH
#endif
// 64 bit
print_debug("Getting threads for a 64-bit process\n");
@ -673,6 +731,8 @@ Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_getThreadIntegerRegisterSet0(
CHECK_EXCEPTION_(0);
primitiveArray = (*env)->GetLongArrayElements(env, registerArray, NULL);
#if defined(amd64)
primitiveArray[REG_INDEX(R15)] = state.__r15;
primitiveArray[REG_INDEX(R14)] = state.__r14;
primitiveArray[REG_INDEX(R13)] = state.__r13;
@ -701,14 +761,50 @@ Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_getThreadIntegerRegisterSet0(
primitiveArray[REG_INDEX(DS)] = 0;
primitiveArray[REG_INDEX(FSBASE)] = 0;
primitiveArray[REG_INDEX(GSBASE)] = 0;
print_debug("set registers\n");
(*env)->ReleaseLongArrayElements(env, registerArray, primitiveArray, 0);
#elif defined(aarch64)
primitiveArray[REG_INDEX(R0)] = state.__x[0];
primitiveArray[REG_INDEX(R1)] = state.__x[1];
primitiveArray[REG_INDEX(R2)] = state.__x[2];
primitiveArray[REG_INDEX(R3)] = state.__x[3];
primitiveArray[REG_INDEX(R4)] = state.__x[4];
primitiveArray[REG_INDEX(R5)] = state.__x[5];
primitiveArray[REG_INDEX(R6)] = state.__x[6];
primitiveArray[REG_INDEX(R7)] = state.__x[7];
primitiveArray[REG_INDEX(R8)] = state.__x[8];
primitiveArray[REG_INDEX(R9)] = state.__x[9];
primitiveArray[REG_INDEX(R10)] = state.__x[10];
primitiveArray[REG_INDEX(R11)] = state.__x[11];
primitiveArray[REG_INDEX(R12)] = state.__x[12];
primitiveArray[REG_INDEX(R13)] = state.__x[13];
primitiveArray[REG_INDEX(R14)] = state.__x[14];
primitiveArray[REG_INDEX(R15)] = state.__x[15];
primitiveArray[REG_INDEX(R16)] = state.__x[16];
primitiveArray[REG_INDEX(R17)] = state.__x[17];
primitiveArray[REG_INDEX(R18)] = state.__x[18];
primitiveArray[REG_INDEX(R19)] = state.__x[19];
primitiveArray[REG_INDEX(R20)] = state.__x[20];
primitiveArray[REG_INDEX(R21)] = state.__x[21];
primitiveArray[REG_INDEX(R22)] = state.__x[22];
primitiveArray[REG_INDEX(R23)] = state.__x[23];
primitiveArray[REG_INDEX(R24)] = state.__x[24];
primitiveArray[REG_INDEX(R25)] = state.__x[25];
primitiveArray[REG_INDEX(R26)] = state.__x[26];
primitiveArray[REG_INDEX(R27)] = state.__x[27];
primitiveArray[REG_INDEX(R28)] = state.__x[28];
primitiveArray[REG_INDEX(FP)] = state.__fp;
primitiveArray[REG_INDEX(LR)] = state.__lr;
primitiveArray[REG_INDEX(SP)] = state.__sp;
primitiveArray[REG_INDEX(PC)] = state.__pc;
#else
#error UNSUPPORTED_ARCH
#endif /* amd64 */
#endif
print_debug("set registers\n");
(*env)->ReleaseLongArrayElements(env, registerArray, primitiveArray, 0);
return registerArray;
}

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,6 +31,16 @@
#include "libproc.h"
#include "symtab.h"
#define UNSUPPORTED_ARCH "Unsupported architecture!"
#if defined(__x86_64__) && !defined(amd64)
#define amd64 1
#endif
#if defined(__arm64__) && !defined(aarch64)
#define aarch64 1
#endif
#ifdef __APPLE__
#include <inttypes.h> // for PRIx64, 32, ...
#include <pthread.h>
@ -42,6 +53,7 @@
#define register_t uint64_t
#endif
#if defined(amd64)
/*** registers copied from bsd/amd64 */
typedef struct reg {
register_t r_r15;
@ -72,6 +84,48 @@ typedef struct reg {
register_t r_ss; // not used
} reg;
#elif defined(aarch64)
/*** registers copied from bsd/arm64 */
typedef struct reg {
register_t r_r0;
register_t r_r1;
register_t r_r2;
register_t r_r3;
register_t r_r4;
register_t r_r5;
register_t r_r6;
register_t r_r7;
register_t r_r8;
register_t r_r9;
register_t r_r10;
register_t r_r11;
register_t r_r12;
register_t r_r13;
register_t r_r14;
register_t r_r15;
register_t r_r16;
register_t r_r17;
register_t r_r18;
register_t r_r19;
register_t r_r20;
register_t r_r21;
register_t r_r22;
register_t r_r23;
register_t r_r24;
register_t r_r25;
register_t r_r26;
register_t r_r27;
register_t r_r28;
register_t r_fp;
register_t r_lr;
register_t r_sp;
register_t r_pc;
} reg;
#else
#error UNSUPPORTED_ARCH
#endif
// convenient defs
typedef struct mach_header_64 mach_header_64;
typedef struct load_command load_command;

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,8 +34,14 @@
#include "ps_core_common.h"
#ifdef __APPLE__
#if defined(amd64)
#include "sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext.h"
#elif defined(aarch64)
#include "sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext.h"
#else
#error UNSUPPORTED_ARCH
#endif
#endif /* __APPLE__ */
// This file has the libproc implementation to read core files.
// For live processes, refer to ps_proc.c. Portions of this is adapted
@ -195,6 +202,8 @@ static ps_prochandle_ops core_ops = {
void print_thread(sa_thread_info *threadinfo) {
print_debug("thread added: %d\n", threadinfo->lwp_id);
print_debug("registers:\n");
#if defined(amd64)
print_debug(" r_r15: 0x%" PRIx64 "\n", threadinfo->regs.r_r15);
print_debug(" r_r14: 0x%" PRIx64 "\n", threadinfo->regs.r_r14);
print_debug(" r_r13: 0x%" PRIx64 "\n", threadinfo->regs.r_r13);
@ -216,6 +225,45 @@ void print_thread(sa_thread_info *threadinfo) {
print_debug(" r_cs: 0x%" PRIx64 "\n", threadinfo->regs.r_cs);
print_debug(" r_rsp: 0x%" PRIx64 "\n", threadinfo->regs.r_rsp);
print_debug(" r_rflags: 0x%" PRIx64 "\n", threadinfo->regs.r_rflags);
#elif defined(aarch64)
print_debug(" r_r0: 0x%" PRIx64 "\n", threadinfo->regs.r_r0);
print_debug(" r_r1: 0x%" PRIx64 "\n", threadinfo->regs.r_r1);
print_debug(" r_r2: 0x%" PRIx64 "\n", threadinfo->regs.r_r2);
print_debug(" r_r3: 0x%" PRIx64 "\n", threadinfo->regs.r_r3);
print_debug(" r_r4: 0x%" PRIx64 "\n", threadinfo->regs.r_r4);
print_debug(" r_r5: 0x%" PRIx64 "\n", threadinfo->regs.r_r5);
print_debug(" r_r6: 0x%" PRIx64 "\n", threadinfo->regs.r_r6);
print_debug(" r_r7: 0x%" PRIx64 "\n", threadinfo->regs.r_r7);
print_debug(" r_r8: 0x%" PRIx64 "\n", threadinfo->regs.r_r8);
print_debug(" r_r9: 0x%" PRIx64 "\n", threadinfo->regs.r_r9);
print_debug(" r_r10: 0x%" PRIx64 "\n", threadinfo->regs.r_r10);
print_debug(" r_r11: 0x%" PRIx64 "\n", threadinfo->regs.r_r11);
print_debug(" r_r12: 0x%" PRIx64 "\n", threadinfo->regs.r_r12);
print_debug(" r_r13: 0x%" PRIx64 "\n", threadinfo->regs.r_r13);
print_debug(" r_r14: 0x%" PRIx64 "\n", threadinfo->regs.r_r14);
print_debug(" r_r15: 0x%" PRIx64 "\n", threadinfo->regs.r_r15);
print_debug(" r_r16: 0x%" PRIx64 "\n", threadinfo->regs.r_r16);
print_debug(" r_r17: 0x%" PRIx64 "\n", threadinfo->regs.r_r17);
print_debug(" r_r18: 0x%" PRIx64 "\n", threadinfo->regs.r_r18);
print_debug(" r_r19: 0x%" PRIx64 "\n", threadinfo->regs.r_r19);
print_debug(" r_r20: 0x%" PRIx64 "\n", threadinfo->regs.r_r20);
print_debug(" r_r21: 0x%" PRIx64 "\n", threadinfo->regs.r_r21);
print_debug(" r_r22: 0x%" PRIx64 "\n", threadinfo->regs.r_r22);
print_debug(" r_r23: 0x%" PRIx64 "\n", threadinfo->regs.r_r23);
print_debug(" r_r24: 0x%" PRIx64 "\n", threadinfo->regs.r_r24);
print_debug(" r_r25: 0x%" PRIx64 "\n", threadinfo->regs.r_r25);
print_debug(" r_r26: 0x%" PRIx64 "\n", threadinfo->regs.r_r26);
print_debug(" r_r27: 0x%" PRIx64 "\n", threadinfo->regs.r_r27);
print_debug(" r_r28: 0x%" PRIx64 "\n", threadinfo->regs.r_r28);
print_debug(" r_fp: 0x%" PRIx64 "\n", threadinfo->regs.r_fp);
print_debug(" r_lr: 0x%" PRIx64 "\n", threadinfo->regs.r_lr);
print_debug(" r_sp: 0x%" PRIx64 "\n", threadinfo->regs.r_sp);
print_debug(" r_pc: 0x%" PRIx64 "\n", threadinfo->regs.r_pc);
#else
#error UNSUPPORTED_ARCH
#endif
}
// read all segments64 commands from core file
@ -269,6 +317,7 @@ static bool read_core_segments(struct ps_prochandle* ph) {
goto err;
}
size += sizeof(thread_fc);
#if defined(amd64)
if (fc.flavor == x86_THREAD_STATE) {
x86_thread_state_t thrstate;
if (read(fd, (void *)&thrstate, sizeof(x86_thread_state_t)) != sizeof(x86_thread_state_t)) {
@ -328,6 +377,90 @@ static bool read_core_segments(struct ps_prochandle* ph) {
}
size += sizeof(x86_exception_state_t);
}
#elif defined(aarch64)
if (fc.flavor == ARM_THREAD_STATE64) {
arm_thread_state64_t thrstate;
if (read(fd, (void *)&thrstate, sizeof(arm_thread_state64_t)) != sizeof(arm_thread_state64_t)) {
printf("Reading flavor, count failed.\n");
goto err;
}
size += sizeof(arm_thread_state64_t);
// create thread info list, update lwp_id later
sa_thread_info* newthr = add_thread_info(ph, (pthread_t) -1, (lwpid_t) num_threads++);
if (newthr == NULL) {
printf("create thread_info failed\n");
goto err;
}
// note __DARWIN_UNIX03 depengs on other definitions
#if __DARWIN_UNIX03
#define get_register_v(regst, regname) \
regst.__##regname
#else
#define get_register_v(regst, regname) \
regst.##regname
#endif // __DARWIN_UNIX03
newthr->regs.r_r0 = get_register_v(thrstate, x[0]);
newthr->regs.r_r1 = get_register_v(thrstate, x[1]);
newthr->regs.r_r2 = get_register_v(thrstate, x[2]);
newthr->regs.r_r3 = get_register_v(thrstate, x[3]);
newthr->regs.r_r4 = get_register_v(thrstate, x[4]);
newthr->regs.r_r5 = get_register_v(thrstate, x[5]);
newthr->regs.r_r6 = get_register_v(thrstate, x[6]);
newthr->regs.r_r7 = get_register_v(thrstate, x[7]);
newthr->regs.r_r8 = get_register_v(thrstate, x[8]);
newthr->regs.r_r9 = get_register_v(thrstate, x[9]);
newthr->regs.r_r10 = get_register_v(thrstate, x[10]);
newthr->regs.r_r11 = get_register_v(thrstate, x[11]);
newthr->regs.r_r12 = get_register_v(thrstate, x[12]);
newthr->regs.r_r13 = get_register_v(thrstate, x[13]);
newthr->regs.r_r14 = get_register_v(thrstate, x[14]);
newthr->regs.r_r15 = get_register_v(thrstate, x[15]);
newthr->regs.r_r16 = get_register_v(thrstate, x[16]);
newthr->regs.r_r17 = get_register_v(thrstate, x[17]);
newthr->regs.r_r18 = get_register_v(thrstate, x[18]);
newthr->regs.r_r19 = get_register_v(thrstate, x[19]);
newthr->regs.r_r20 = get_register_v(thrstate, x[20]);
newthr->regs.r_r21 = get_register_v(thrstate, x[21]);
newthr->regs.r_r22 = get_register_v(thrstate, x[22]);
newthr->regs.r_r23 = get_register_v(thrstate, x[23]);
newthr->regs.r_r24 = get_register_v(thrstate, x[24]);
newthr->regs.r_r25 = get_register_v(thrstate, x[25]);
newthr->regs.r_r26 = get_register_v(thrstate, x[26]);
newthr->regs.r_r27 = get_register_v(thrstate, x[27]);
newthr->regs.r_r28 = get_register_v(thrstate, x[28]);
newthr->regs.r_fp = get_register_v(thrstate, fp);
newthr->regs.r_lr = get_register_v(thrstate, lr);
newthr->regs.r_sp = get_register_v(thrstate, sp);
newthr->regs.r_pc = get_register_v(thrstate, pc);
print_thread(newthr);
} else if (fc.flavor == ARM_NEON_STATE64) {
arm_neon_state64_t flstate;
if (read(fd, (void *)&flstate, sizeof(arm_neon_state64_t)) != sizeof(arm_neon_state64_t)) {
printf("Reading flavor, count failed.\n");
goto err;
}
size += sizeof(arm_neon_state64_t);
} else if (fc.flavor == ARM_EXCEPTION_STATE64) {
arm_exception_state64_t excpstate;
if (read(fd, (void *)&excpstate, sizeof(arm_exception_state64_t)) != sizeof(arm_exception_state64_t)) {
printf("Reading flavor, count failed.\n");
goto err;
}
size += sizeof(arm_exception_state64_t);
} else if (fc.flavor == ARM_DEBUG_STATE64) {
arm_debug_state64_t dbgstate;
if (read(fd, (void *)&dbgstate, sizeof(arm_debug_state64_t)) != sizeof(arm_debug_state64_t)) {
printf("Reading flavor, count failed.\n");
goto err;
}
size += sizeof(arm_debug_state64_t);
}
#else
#error UNSUPPORTED_ARCH
#endif
}
}
}

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -621,8 +622,10 @@ public class HotSpotAgent {
if (cpu.equals("amd64") || cpu.equals("x86_64")) {
machDesc = new MachineDescriptionAMD64();
} else if (cpu.equals("aarch64")) {
machDesc = new MachineDescriptionAArch64();
} else {
throw new DebuggerException("Darwin only supported on x86_64. Current arch: " + cpu);
throw new DebuggerException("Darwin only supported on x86_64/aarch64. Current arch: " + cpu);
}
BsdDebuggerLocal dbg = new BsdDebuggerLocal(machDesc, !isServer);

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,8 +31,10 @@ import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
import sun.jvm.hotspot.debugger.x86.*;
import sun.jvm.hotspot.debugger.amd64.*;
import sun.jvm.hotspot.debugger.aarch64.*;
import sun.jvm.hotspot.debugger.bsd.x86.*;
import sun.jvm.hotspot.debugger.bsd.amd64.*;
import sun.jvm.hotspot.debugger.bsd.aarch64.*;
import sun.jvm.hotspot.utilities.*;
class BsdCDebugger implements CDebugger {
@ -97,6 +100,13 @@ class BsdCDebugger implements CDebugger {
Address pc = context.getRegisterAsAddress(AMD64ThreadContext.RIP);
if (pc == null) return null;
return new BsdAMD64CFrame(dbg, rbp, pc);
} else if (cpu.equals("aarch64")) {
AARCH64ThreadContext context = (AARCH64ThreadContext) thread.getContext();
Address fp = context.getRegisterAsAddress(AARCH64ThreadContext.FP);
if (fp == null) return null;
Address pc = context.getRegisterAsAddress(AARCH64ThreadContext.PC);
if (pc == null) return null;
return new BsdAARCH64CFrame(dbg, fp, pc);
} else {
throw new DebuggerException(cpu + " is not yet supported");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
package sun.jvm.hotspot.debugger.bsd;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.bsd.aarch64.*;
import sun.jvm.hotspot.debugger.bsd.amd64.*;
import sun.jvm.hotspot.debugger.bsd.x86.*;
@ -35,6 +36,8 @@ class BsdThreadContextFactory {
return new BsdX86ThreadContext(dbg);
} else if (cpu.equals("amd64") || cpu.equals("x86_64")) {
return new BsdAMD64ThreadContext(dbg);
} else if (cpu.equals("aarch64")) {
return new BsdAARCH64ThreadContext(dbg);
} else {
throw new RuntimeException("cpu " + cpu + " is not yet supported");
}

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, Red Hat Inc.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.debugger.bsd.aarch64;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.aarch64.*;
import sun.jvm.hotspot.debugger.bsd.*;
import sun.jvm.hotspot.debugger.cdbg.*;
import sun.jvm.hotspot.debugger.cdbg.basic.*;
final public class BsdAARCH64CFrame extends BasicCFrame {
public BsdAARCH64CFrame(BsdDebugger dbg, Address fp, Address pc) {
super(dbg.getCDebugger());
this.fp = fp;
this.pc = pc;
this.dbg = dbg;
}
// override base class impl to avoid ELF parsing
public ClosestSymbol closestSymbolToPC() {
// try native lookup in debugger.
return dbg.lookup(dbg.getAddressValue(pc()));
}
public Address pc() {
return pc;
}
public Address localVariableBase() {
return fp;
}
public CFrame sender(ThreadProxy thread) {
AARCH64ThreadContext context = (AARCH64ThreadContext) thread.getContext();
Address rsp = context.getRegisterAsAddress(AARCH64ThreadContext.SP);
if ((fp == null) || fp.lessThan(rsp)) {
return null;
}
// Check alignment of fp
if (dbg.getAddressValue(fp) % (2 * ADDRESS_SIZE) != 0) {
return null;
}
Address nextFP = fp.getAddressAt(0 * ADDRESS_SIZE);
if (nextFP == null || nextFP.lessThanOrEqual(fp)) {
return null;
}
Address nextPC = fp.getAddressAt(1 * ADDRESS_SIZE);
if (nextPC == null) {
return null;
}
return new BsdAARCH64CFrame(dbg, nextFP, nextPC);
}
// package/class internals only
private static final int ADDRESS_SIZE = 8;
private Address pc;
private Address sp;
private Address fp;
private BsdDebugger dbg;
}

View File

@ -0,0 +1,47 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.debugger.bsd.aarch64;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.aarch64.*;
import sun.jvm.hotspot.debugger.bsd.*;
public class BsdAARCH64ThreadContext extends AARCH64ThreadContext {
private BsdDebugger debugger;
public BsdAARCH64ThreadContext(BsdDebugger debugger) {
super();
this.debugger = debugger;
}
public void setRegisterAsAddress(int index, Address value) {
setRegister(index, debugger.getAddressValue(value));
}
public Address getRegisterAsAddress(int index) {
return debugger.newAddress(getRegister(index));
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,6 +37,7 @@ import sun.jvm.hotspot.runtime.linux_aarch64.LinuxAARCH64JavaThreadPDAccess;
import sun.jvm.hotspot.runtime.linux_ppc64.LinuxPPC64JavaThreadPDAccess;
import sun.jvm.hotspot.runtime.bsd_x86.BsdX86JavaThreadPDAccess;
import sun.jvm.hotspot.runtime.bsd_amd64.BsdAMD64JavaThreadPDAccess;
import sun.jvm.hotspot.runtime.bsd_aarch64.BsdAARCH64JavaThreadPDAccess;
import sun.jvm.hotspot.utilities.*;
import sun.jvm.hotspot.utilities.Observable;
import sun.jvm.hotspot.utilities.Observer;
@ -132,6 +133,8 @@ public class Threads {
} else if (os.equals("darwin")) {
if (cpu.equals("amd64") || cpu.equals("x86_64")) {
access = new BsdAMD64JavaThreadPDAccess();
} else if (cpu.equals("aarch64")) {
access = new BsdAARCH64JavaThreadPDAccess();
}
}

View File

@ -0,0 +1,139 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.runtime.bsd_aarch64;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.aarch64.*;
import sun.jvm.hotspot.debugger.bsd.BsdDebugger;
import sun.jvm.hotspot.debugger.bsd.BsdDebuggerLocal;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.runtime.aarch64.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
import sun.jvm.hotspot.utilities.Observable;
import sun.jvm.hotspot.utilities.Observer;
public class BsdAARCH64JavaThreadPDAccess implements JavaThreadPDAccess {
private static AddressField lastJavaFPField;
private static AddressField osThreadField;
// Fields from OSThread
private static CIntegerField osThreadThreadIDField;
private static CIntegerField osThreadUniqueThreadIDField;
// This is currently unneeded but is being kept in case we change
// the currentFrameGuess algorithm
private static final long GUESS_SCAN_RANGE = 128 * 1024;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("JavaThread");
osThreadField = type.getAddressField("_osthread");
Type anchorType = db.lookupType("JavaFrameAnchor");
lastJavaFPField = anchorType.getAddressField("_last_Java_fp");
Type osThreadType = db.lookupType("OSThread");
osThreadThreadIDField = osThreadType.getCIntegerField("_thread_id");
osThreadUniqueThreadIDField = osThreadType.getCIntegerField("_unique_thread_id");
}
public Address getLastJavaFP(Address addr) {
return lastJavaFPField.getValue(addr.addOffsetTo(sun.jvm.hotspot.runtime.JavaThread.getAnchorField().getOffset()));
}
public Address getLastJavaPC(Address addr) {
return null;
}
public Address getBaseOfStackPointer(Address addr) {
return null;
}
public Frame getLastFramePD(JavaThread thread, Address addr) {
Address fp = thread.getLastJavaFP();
if (fp == null) {
return null; // no information
}
return new AARCH64Frame(thread.getLastJavaSP(), fp);
}
public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) {
return new AARCH64RegisterMap(thread, updateMap);
}
public Frame getCurrentFrameGuess(JavaThread thread, Address addr) {
ThreadProxy t = getThreadProxy(addr);
AARCH64ThreadContext context = (AARCH64ThreadContext) t.getContext();
AARCH64CurrentFrameGuess guesser = new AARCH64CurrentFrameGuess(context, thread);
if (!guesser.run(GUESS_SCAN_RANGE)) {
return null;
}
if (guesser.getPC() == null) {
return new AARCH64Frame(guesser.getSP(), guesser.getFP());
} else {
return new AARCH64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC());
}
}
public void printThreadIDOn(Address addr, PrintStream tty) {
tty.print(getThreadProxy(addr));
}
public void printInfoOn(Address threadAddr, PrintStream tty) {
tty.print("Thread id: ");
printThreadIDOn(threadAddr, tty);
// tty.println("\nPostJavaState: " + getPostJavaState(threadAddr));
}
public Address getLastSP(Address addr) {
ThreadProxy t = getThreadProxy(addr);
AARCH64ThreadContext context = (AARCH64ThreadContext) t.getContext();
return context.getRegisterAsAddress(AARCH64ThreadContext.SP);
}
public ThreadProxy getThreadProxy(Address addr) {
// Addr is the address of the JavaThread.
// Fetch the OSThread (for now and for simplicity, not making a
// separate "OSThread" class in this package)
Address osThreadAddr = osThreadField.getValue(addr);
// Get the address of the _thread_id from the OSThread
Address threadIdAddr = osThreadAddr.addOffsetTo(osThreadThreadIDField.getOffset());
Address uniqueThreadIdAddr = osThreadAddr.addOffsetTo(osThreadUniqueThreadIDField.getOffset());
BsdDebuggerLocal debugger = (BsdDebuggerLocal) VM.getVM().getDebugger();
return debugger.getThreadForIdentifierAddress(threadIdAddr, uniqueThreadIdAddr);
}
}

View File

@ -1,5 +1,6 @@
#
# Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
@ -107,13 +108,19 @@ else
ifeq ($(OS),Darwin)
CPU = $(shell uname -m)
ARCH1=$(CPU:x86_64=amd64)
ARCH=$(ARCH1:i686=i386)
ARCH2=$(ARCH1:arm64=aarch64)
ARCH=$(ARCH2:i686=i386)
CONFIGURE_ARGS/aarch64= --enable-targets=aarch64-darwin
CONFIGURE_ARGS = $(CONFIGURE_ARGS/$(ARCH))
ifdef LP64
CFLAGS/amd64 += -m64
else
ARCH=$(ARCH1:amd64=i386)
ARCH=$(ARCH2:amd64=i386)
CFLAGS/i386 += -m32
endif # LP64
ifeq ($(CPU), arm64)
CFLAGS/aarch64 += -m64
endif # arm64
CFLAGS += $(CFLAGS/$(ARCH))
CFLAGS += -fPIC
OS = macosx

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,6 +38,8 @@
#include "jni.h"
#include "unittest.hpp"
#include "runtime/thread.inline.hpp"
// Default value for -new-thread option: true on AIX because we run into
// problems when attempting to initialize the JVM on the primordial thread.
#ifdef _AIX
@ -91,7 +93,14 @@ static int init_jvm(int argc, char **argv, bool disable_error_handling) {
JavaVM* jvm;
JNIEnv* env;
return JNI_CreateJavaVM(&jvm, (void**)&env, &args);
int ret = JNI_CreateJavaVM(&jvm, (void**)&env, &args);
if (ret == JNI_OK) {
// CreateJavaVM leaves WXExec context, while gtests
// calls internal functions assuming running in WXWwrite.
// Switch to WXWrite once for all test cases.
MACOS_AARCH64_ONLY(Thread::current()->enable_wx(WXWrite));
}
return ret;
}
static bool is_same_vm_test(const char* name) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -125,9 +125,11 @@ public class CompressedClassPointers {
"-XX:+VerifyBeforeGC", "-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
if (testNarrowKlassBase()) {
output.shouldContain("Narrow klass base: 0x0000000000000000");
if (!Platform.isAArch64() && !Platform.isOSX()) {
output.shouldContain("Narrow klass shift: 0");
if (!(Platform.isAArch64() && Platform.isOSX())) { // see JDK-8262895
output.shouldContain("Narrow klass base: 0x0000000000000000");
if (!Platform.isAArch64() && !Platform.isOSX()) {
output.shouldContain("Narrow klass shift: 0");
}
}
}
output.shouldHaveExitValue(0);

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @requires os.arch == "aarch64" & os.family == "mac"
* @run main/othervm/native TestCodegenAttach
*/
public class TestCodegenAttach {
static native void testCodegenAttach();
static {
System.loadLibrary("codegenAttach");
}
public static void main(String[] args) throws Throwable {
testCodegenAttach();
}
}

View File

@ -0,0 +1,124 @@
/*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include "jni.h"
#if defined(__APPLE__) && defined(__aarch64__)
#include <pthread.h>
#include <sys/mman.h>
JavaVM* jvm;
static void* codegen;
static int thread_start2(int val) {
JNIEnv *env;
jclass class_id;
jmethodID method_id;
int res;
printf("Native thread is running and attaching ...\n");
res = (*jvm)->AttachCurrentThread(jvm, (void **)&env, NULL);
if (res != JNI_OK) {
fprintf(stderr, "Test ERROR. Can't attach current thread: %d\n", res);
exit(1);
}
res = (*jvm)->DetachCurrentThread(jvm);
if (res != JNI_OK) {
fprintf(stderr, "Test ERROR. Can't detach current thread: %d\n", res);
exit(1);
}
printf("Native thread is about to finish\n");
return 1 + val;
}
static int trampoline(int(*fn)(int), int arg) {
int val = fn(arg);
// ensure code in MAP_JIT area after target function returns
return 1 + val;
}
static void * thread_start(void* unused) {
int val = ((int(*)(int(*)(int),int))codegen)(thread_start2, 10);
printf("return val = %d\n", val);
return NULL;
}
JNIEXPORT void JNICALL
Java_TestCodegenAttach_testCodegenAttach
(JNIEnv *env, jclass cls) {
codegen = mmap(NULL, 0x1000,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_JIT, -1, 0);
if (codegen == MAP_FAILED) {
perror("mmap");
exit(1);
}
pthread_jit_write_protect_np(false);
memcpy(codegen, trampoline, 128);
pthread_jit_write_protect_np(true);
pthread_t thread;
int res = (*env)->GetJavaVM(env, &jvm);
if (res != JNI_OK) {
fprintf(stderr, "Test ERROR. Can't extract JavaVM: %d\n", res);
exit(1);
}
if ((res = pthread_create(&thread, NULL, thread_start, NULL)) != 0) {
fprintf(stderr, "TEST ERROR: pthread_create failed: %s (%d)\n", strerror(res), res);
exit(1);
}
if ((res = pthread_join(thread, NULL)) != 0) {
fprintf(stderr, "TEST ERROR: pthread_join failed: %s (%d)\n", strerror(res), res);
exit(1);
}
}
#else
JNIEXPORT void JNICALL
Java_TestCodegenAttach_testCodegenAttach
(JNIEnv *env, jclass cls) {
printf("should not reach here\n");
exit(1);
}
#endif // __APPLE__ && __aarch64__

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -531,6 +531,8 @@ class CheckedFeatures {
{"linux-s390x", "com.sun.jdi.SharedMemoryAttach"},
{"macosx-amd64", "com.sun.jdi.SharedMemoryAttach"},
{"mac-x64", "com.sun.jdi.SharedMemoryAttach"},
{"macosx-aarch64", "com.sun.jdi.SharedMemoryAttach"},
{"mac-aarch64", "com.sun.jdi.SharedMemoryAttach"},
{"aix-ppc64", "com.sun.jdi.SharedMemoryAttach"},
// listening connectors
@ -554,6 +556,8 @@ class CheckedFeatures {
{"linux-s390x", "com.sun.jdi.SharedMemoryListen"},
{"macosx-amd64", "com.sun.jdi.SharedMemoryListen"},
{"mac-x64", "com.sun.jdi.SharedMemoryListen"},
{"macosx-aarch64", "com.sun.jdi.SharedMemoryListen"},
{"mac-aarch64", "com.sun.jdi.SharedMemoryListen"},
{"aix-ppc64", "com.sun.jdi.SharedMemoryListen"},
// launching connectors
@ -611,8 +615,14 @@ class CheckedFeatures {
{"macosx-amd64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"},
{"macosx-amd64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"},
{"mac-x64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"},
{"mac-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"},
{"mac-x64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"},
{"mac-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"},
{"macosx-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"},
{"macosx-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"},
{"mac-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"},
{"mac-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"},
{"aix-ppc64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"},
{"aix-ppc64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"},
@ -629,6 +639,8 @@ class CheckedFeatures {
{"linux-s390x", "dt_shmem"},
{"macosx-amd64", "dt_shmem"},
{"mac-x64", "dt_shmem"},
{"macosx-aarch64", "dt_shmem"},
{"mac-aarch64", "dt_shmem"},
{"aix-ppc64", "dt_shmem"},
};
}