Compare commits

..

No commits in common. "70248c8a34aa47cc5e8d8309fea58550d491f398" and "98c3bff8ceaab165e26a1f7b84ddaf1cd2d7d5c0" have entirely different histories.

1008 changed files with 24133 additions and 30324 deletions

View File

@ -36,7 +36,7 @@ on:
platforms:
description: 'Platform(s) to execute on (comma separated, e.g. "linux-x64, macos, aarch64")'
required: true
default: 'linux-x64, linux-x64-variants, linux-cross-compile, alpine-linux-x64, macos-x64, macos-aarch64, windows-x64, windows-aarch64, docs'
default: 'linux-x64, linux-x86-hs, linux-x64-variants, linux-cross-compile, alpine-linux-x64, macos-x64, macos-aarch64, windows-x64, windows-aarch64, docs'
configure-arguments:
description: 'Additional configure arguments'
required: false
@ -62,6 +62,7 @@ jobs:
EXCLUDED_PLATFORMS: 'alpine-linux-x64'
outputs:
linux-x64: ${{ steps.include.outputs.linux-x64 }}
linux-x86-hs: ${{ steps.include.outputs.linux-x86-hs }}
linux-x64-variants: ${{ steps.include.outputs.linux-x64-variants }}
linux-cross-compile: ${{ steps.include.outputs.linux-cross-compile }}
alpine-linux-x64: ${{ steps.include.outputs.alpine-linux-x64 }}
@ -144,6 +145,7 @@ jobs:
}
echo "linux-x64=$(check_platform linux-x64 linux x64)" >> $GITHUB_OUTPUT
echo "linux-x86-hs=$(check_platform linux-x86-hs linux x86)" >> $GITHUB_OUTPUT
echo "linux-x64-variants=$(check_platform linux-x64-variants variants)" >> $GITHUB_OUTPUT
echo "linux-cross-compile=$(check_platform linux-cross-compile cross-compile)" >> $GITHUB_OUTPUT
echo "alpine-linux-x64=$(check_platform alpine-linux-x64 alpine-linux x64)" >> $GITHUB_OUTPUT
@ -168,6 +170,24 @@ jobs:
make-arguments: ${{ github.event.inputs.make-arguments }}
if: needs.prepare.outputs.linux-x64 == 'true'
build-linux-x86-hs:
name: linux-x86-hs
needs: prepare
uses: ./.github/workflows/build-linux.yml
with:
platform: linux-x86
make-target: 'hotspot'
gcc-major-version: '10'
gcc-package-suffix: '-multilib'
apt-architecture: 'i386'
# Some multilib libraries do not have proper inter-dependencies, so we have to
# install their dependencies manually.
apt-extra-packages: 'libfreetype-dev:i386 libtiff-dev:i386 libcupsimage2-dev:i386 libffi-dev:i386'
extra-conf-options: '--with-target-bits=32 --enable-fallback-linker --enable-libffi-bundling'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
if: needs.prepare.outputs.linux-x86-hs == 'true'
build-linux-x64-hs-nopch:
name: linux-x64-hs-nopch
needs: prepare

View File

@ -329,8 +329,8 @@ GB of free disk space is required.</p>
<p>Even for 32-bit builds, it is recommended to use a 64-bit build
machine, and instead create a 32-bit target using
<code>--with-target-bits=32</code>.</p>
<p>Note: The 32-bit x86 port is deprecated and may be removed in a
future release.</p>
<p>Note: The Windows 32-bit x86 port is deprecated and may be removed in
a future release.</p>
<h3 id="building-on-aarch64">Building on aarch64</h3>
<p>At a minimum, a machine with 8 cores is advisable, as well as 8 GB of
RAM. (The more cores to use, the more memory you need.) At least 6 GB of
@ -393,7 +393,8 @@ Build Platforms</a>. From time to time, this is updated by contributors
to list successes or failures of building on different platforms.</p>
<h3 id="windows">Windows</h3>
<p>Windows XP is not a supported platform, but all newer Windows should
be able to build the JDK.</p>
be able to build the JDK. (Note: The Windows 32-bit x86 port is
deprecated and may be removed in a future release.)</p>
<p>On Windows, it is important that you pay attention to the
instructions in the <a href="#special-considerations">Special
Considerations</a>.</p>

View File

@ -134,7 +134,8 @@ space is required.
Even for 32-bit builds, it is recommended to use a 64-bit build machine, and
instead create a 32-bit target using `--with-target-bits=32`.
Note: The 32-bit x86 port is deprecated and may be removed in a future release.
Note: The Windows 32-bit x86 port is deprecated and may be removed in a future
release.
### Building on aarch64
@ -190,7 +191,8 @@ on different platforms.
### Windows
Windows XP is not a supported platform, but all newer Windows should be able to
build the JDK.
build the JDK. (Note: The Windows 32-bit x86 port is deprecated and may be
removed in a future release.)
On Windows, it is important that you pay attention to the instructions in the
[Special Considerations](#special-considerations).

View File

@ -1344,13 +1344,9 @@ test-hotspot-jtreg-native: test-hotspot_native_sanity
test-hotspot-gtest: exploded-test-gtest
test-jdk-jtreg-native: test-jdk_native_sanity
# Set dependencies for doc tests
$(eval $(call AddTestDependency, docs_all, docs-jdk))
test-docs: test-docs_all
ALL_TARGETS += $(RUN_TEST_TARGETS) run-test exploded-run-test check \
test-hotspot-jtreg test-hotspot-jtreg-native test-hotspot-gtest \
test-jdk-jtreg-native test-docs
test-jdk-jtreg-native
################################################################################
################################################################################

View File

@ -137,15 +137,6 @@ define CleanModule
$(call Clean-include, $1)
endef
define AddTestDependency
test-$(strip $1): $2
exploded-test-$(strip $1): $2
ifneq ($(filter $(TEST), $1), )
TEST_DEPS += $2
endif
endef
################################################################################

View File

@ -666,14 +666,14 @@ AC_DEFUN([PLATFORM_CHECK_DEPRECATION],
[
AC_ARG_ENABLE(deprecated-ports, [AS_HELP_STRING([--enable-deprecated-ports@<:@=yes/no@:>@],
[Suppress the error when configuring for a deprecated port @<:@no@:>@])])
if test "x$OPENJDK_TARGET_CPU" = xx86; then
if test "x$enable_deprecated_ports" = "xyes"; then
AC_MSG_WARN([The 32-bit x86 port is deprecated and may be removed in a future release.])
else
AC_MSG_ERROR(m4_normalize([The 32-bit x86 port is deprecated and may be removed in a future release.
Use --enable-deprecated-ports=yes to suppress this error.]))
fi
fi
# if test "x$OPENJDK_TARGET_CPU" = xx86; then
# if test "x$enable_deprecated_ports" = "xyes"; then
# AC_MSG_WARN([The x86 port is deprecated and may be removed in a future release.])
# else
# AC_MSG_ERROR(m4_normalize([The 32-bit x86 port is deprecated and may be removed in a future release.
# Use --enable-deprecated-ports=yes to suppress this error.]))
# fi
# fi
])
AC_DEFUN_ONCE([PLATFORM_SETUP_OPENJDK_BUILD_OS_VERSION],

View File

@ -37,10 +37,6 @@ ifeq ($(TOOLCHAIN_TYPE), gcc)
# Need extra inlining to collapse shared marking code into the hot marking loop
BUILD_LIBJVM_shenandoahMark.cpp_CXXFLAGS := --param inline-unit-growth=1000
endif
# disable lto in g1ParScanThreadState because of special inlining/flattening used there
ifeq ($(call check-jvm-feature, link-time-opt), true)
BUILD_LIBJVM_g1ParScanThreadState.cpp_CXXFLAGS := -fno-lto
endif
endif
LIBJVM_FDLIBM_COPY_OPT_FLAG := $(CXX_O_FLAG_NONE)

View File

@ -46,6 +46,18 @@ EXCLUDE_FILES += \
javax/swing/plaf/nimbus/SpinnerPainter.java \
javax/swing/plaf/nimbus/SplitPanePainter.java \
javax/swing/plaf/nimbus/TabbedPanePainter.java \
sun/awt/resources/security-icon-bw16.png \
sun/awt/resources/security-icon-bw24.png \
sun/awt/resources/security-icon-bw32.png \
sun/awt/resources/security-icon-bw48.png \
sun/awt/resources/security-icon-interim16.png \
sun/awt/resources/security-icon-interim24.png \
sun/awt/resources/security-icon-interim32.png \
sun/awt/resources/security-icon-interim48.png \
sun/awt/resources/security-icon-yellow16.png \
sun/awt/resources/security-icon-yellow24.png \
sun/awt/resources/security-icon-yellow32.png \
sun/awt/resources/security-icon-yellow48.png \
sun/awt/X11/java-icon16.png \
sun/awt/X11/java-icon24.png \
sun/awt/X11/java-icon32.png \

View File

@ -37,6 +37,23 @@ GENSRC_AWT_ICONS_SRC += \
$(X11_ICONS_PATH_PREFIX)/classes/sun/awt/X11/java-icon32.png \
$(X11_ICONS_PATH_PREFIX)/classes/sun/awt/X11/java-icon48.png
AWT_ICONPATH := $(MODULE_SRC)/share/classes/sun/awt/resources
GENSRC_AWT_ICONS_SRC += \
$(AWT_ICONPATH)/security-icon-bw16.png \
$(AWT_ICONPATH)/security-icon-interim16.png \
$(AWT_ICONPATH)/security-icon-yellow16.png \
$(AWT_ICONPATH)/security-icon-bw24.png \
$(AWT_ICONPATH)/security-icon-interim24.png \
$(AWT_ICONPATH)/security-icon-yellow24.png \
$(AWT_ICONPATH)/security-icon-bw32.png \
$(AWT_ICONPATH)/security-icon-interim32.png \
$(AWT_ICONPATH)/security-icon-yellow32.png \
$(AWT_ICONPATH)/security-icon-bw48.png \
$(AWT_ICONPATH)/security-icon-interim48.png \
$(AWT_ICONPATH)/security-icon-yellow48.png
GENSRC_AWT_ICONS_FILES := $(notdir $(GENSRC_AWT_ICONS_SRC))
GENSRC_AWT_ICONS_SHORT_NAME = $(subst .,_,$(subst -,_,$(1)))

View File

@ -1114,7 +1114,13 @@ public final class FontPanel extends JPanel implements AdjustmentListener {
/// Position and set size of zoom window as needed
zoomWindow.setLocation( canvasLoc.x + zoomAreaX, canvasLoc.y + zoomAreaY );
if ( !nowZooming ) {
zoomWindow.setSize( zoomAreaWidth + 1, zoomAreaHeight + 1 );
if ( zoomWindow.getWarningString() != null )
/// If this is not opened as a "secure" window,
/// it has a banner below the zoom dialog which makes it look really BAD
/// So enlarge it by a bit
zoomWindow.setSize( zoomAreaWidth + 1, zoomAreaHeight + 20 );
else
zoomWindow.setSize( zoomAreaWidth + 1, zoomAreaHeight + 1 );
}
/// Prepare zoomed image

View File

@ -2632,23 +2632,6 @@ bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
// to be subsumed into complex addressing expressions or compute them
// into registers?
bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
// Loads and stores with indirect memory input (e.g., volatile loads and
// stores) do not subsume the input into complex addressing expressions. If
// the addressing expression is input to at least one such load or store, do
// not clone the addressing expression. Query needs_acquiring_load and
// needs_releasing_store as a proxy for indirect memory input, as it is not
// possible to directly query for indirect memory input at this stage.
for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
Node* n = m->fast_out(i);
if (n->is_Load() && needs_acquiring_load(n)) {
return false;
}
if (n->is_Store() && needs_releasing_store(n)) {
return false;
}
}
if (clone_base_plus_offset_address(m, mstack, address_visited)) {
return true;
}

View File

@ -66,7 +66,6 @@ define_pd_global(bool, OptoScheduling, false);
define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 8);
define_pd_global(bool, IdealizeClearArrayNode, true);
define_pd_global(intx, ReservedCodeCacheSize, 48*M);

View File

@ -620,7 +620,7 @@ address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter()
// Restore Java expression stack pointer
__ ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
__ lea(esp, Address(rfp, rscratch1, Address::lsl(Interpreter::logStackElementSize)));
// and null it as marker that esp is now tos until next java call
// and NULL it as marker that esp is now tos until next java call
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
// Restore machine SP

View File

@ -64,7 +64,6 @@ define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoScheduling, true);
define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 16);
define_pd_global(bool, IdealizeClearArrayNode, true);
#ifdef _LP64

View File

@ -59,7 +59,6 @@ define_pd_global(bool, UseCISCSpill, false);
define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 16);
// GL:
// Detected a problem with unscaled compressed oops and
// narrow_oop_use_complex_address() == false.

View File

@ -2339,6 +2339,83 @@ void C2_MacroAssembler::signum_fp_v(VectorRegister dst, VectorRegister one, Basi
vfsgnj_vv(dst, one, dst, v0_t);
}
void C2_MacroAssembler::compress_bits_v(Register dst, Register src, Register mask, bool is_long) {
Assembler::SEW sew = is_long ? Assembler::e64 : Assembler::e32;
// intrinsic is enabled when MaxVectorSize >= 16
Assembler::LMUL lmul = is_long ? Assembler::m4 : Assembler::m2;
long len = is_long ? 64 : 32;
// load the src data(in bits) to be compressed.
vsetivli(x0, 1, sew, Assembler::m1);
vmv_s_x(v0, src);
// reset the src data(in bytes) to zero.
mv(t0, len);
vsetvli(x0, t0, Assembler::e8, lmul);
vmv_v_i(v4, 0);
// convert the src data from bits to bytes.
vmerge_vim(v4, v4, 1); // v0 as the implicit mask register
// reset the dst data(in bytes) to zero.
vmv_v_i(v8, 0);
// load the mask data(in bits).
vsetivli(x0, 1, sew, Assembler::m1);
vmv_s_x(v0, mask);
// compress the src data(in bytes) to dst(in bytes).
vsetvli(x0, t0, Assembler::e8, lmul);
vcompress_vm(v8, v4, v0);
// convert the dst data from bytes to bits.
vmseq_vi(v0, v8, 1);
// store result back.
vsetivli(x0, 1, sew, Assembler::m1);
vmv_x_s(dst, v0);
}
void C2_MacroAssembler::compress_bits_i_v(Register dst, Register src, Register mask) {
compress_bits_v(dst, src, mask, /* is_long */ false);
}
void C2_MacroAssembler::compress_bits_l_v(Register dst, Register src, Register mask) {
compress_bits_v(dst, src, mask, /* is_long */ true);
}
void C2_MacroAssembler::expand_bits_v(Register dst, Register src, Register mask, bool is_long) {
Assembler::SEW sew = is_long ? Assembler::e64 : Assembler::e32;
// intrinsic is enabled when MaxVectorSize >= 16
Assembler::LMUL lmul = is_long ? Assembler::m4 : Assembler::m2;
long len = is_long ? 64 : 32;
// load the src data(in bits) to be expanded.
vsetivli(x0, 1, sew, Assembler::m1);
vmv_s_x(v0, src);
// reset the src data(in bytes) to zero.
mv(t0, len);
vsetvli(x0, t0, Assembler::e8, lmul);
vmv_v_i(v4, 0);
// convert the src data from bits to bytes.
vmerge_vim(v4, v4, 1); // v0 as implicit mask register
// reset the dst data(in bytes) to zero.
vmv_v_i(v12, 0);
// load the mask data(in bits).
vsetivli(x0, 1, sew, Assembler::m1);
vmv_s_x(v0, mask);
// expand the src data(in bytes) to dst(in bytes).
vsetvli(x0, t0, Assembler::e8, lmul);
viota_m(v8, v0);
vrgather_vv(v12, v4, v8, VectorMask::v0_t); // v0 as implicit mask register
// convert the dst data from bytes to bits.
vmseq_vi(v0, v12, 1);
// store result back.
vsetivli(x0, 1, sew, Assembler::m1);
vmv_x_s(dst, v0);
}
void C2_MacroAssembler::expand_bits_i_v(Register dst, Register src, Register mask) {
expand_bits_v(dst, src, mask, /* is_long */ false);
}
void C2_MacroAssembler::expand_bits_l_v(Register dst, Register src, Register mask) {
expand_bits_v(dst, src, mask, /* is_long */ true);
}
// j.l.Math.round(float)
// Returns the closest int to the argument, with ties rounding to positive infinity.
// We need to handle 3 special cases defined by java api spec:

View File

@ -39,6 +39,9 @@
VectorRegister vrs,
bool is_latin, Label& DONE, Assembler::LMUL lmul);
void compress_bits_v(Register dst, Register src, Register mask, bool is_long);
void expand_bits_v(Register dst, Register src, Register mask, bool is_long);
public:
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
void fast_lock(Register object, Register box,
@ -181,6 +184,13 @@
// intrinsic methods implemented by rvv instructions
// compress bits, i.e. j.l.Integer/Long::compress.
void compress_bits_i_v(Register dst, Register src, Register mask);
void compress_bits_l_v(Register dst, Register src, Register mask);
// expand bits, i.e. j.l.Integer/Long::expand.
void expand_bits_i_v(Register dst, Register src, Register mask);
void expand_bits_l_v(Register dst, Register src, Register mask);
void java_round_float_v(VectorRegister dst, VectorRegister src, FloatRegister ftmp, BasicType bt, uint vector_length);
void java_round_double_v(VectorRegister dst, VectorRegister src, FloatRegister ftmp, BasicType bt, uint vector_length);

View File

@ -66,7 +66,6 @@ define_pd_global(bool, OptoScheduling, true);
define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 16);
define_pd_global(bool, IdealizeClearArrayNode, true);
define_pd_global(intx, ReservedCodeCacheSize, 48*M);

View File

@ -179,10 +179,15 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) {
assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
lbu(t1, Address(xbcp, bcp_offset));
lbu(reg, Address(xbcp, bcp_offset + 1));
slli(t1, t1, 8);
add(reg, reg, t1);
if (AvoidUnalignedAccesses && (bcp_offset % 2)) {
lbu(t1, Address(xbcp, bcp_offset));
lbu(reg, Address(xbcp, bcp_offset + 1));
slli(t1, t1, 8);
add(reg, reg, t1);
} else {
lhu(reg, Address(xbcp, bcp_offset));
revb_h_h_u(reg, reg);
}
}
void InterpreterMacroAssembler::get_dispatch() {
@ -195,7 +200,15 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
if (index_size == sizeof(u2)) {
load_short_misaligned(index, Address(xbcp, bcp_offset), tmp, false);
if (AvoidUnalignedAccesses) {
assert_different_registers(index, tmp);
load_unsigned_byte(index, Address(xbcp, bcp_offset));
load_unsigned_byte(tmp, Address(xbcp, bcp_offset + 1));
slli(tmp, tmp, 8);
add(index, index, tmp);
} else {
load_unsigned_short(index, Address(xbcp, bcp_offset));
}
} else if (index_size == sizeof(u4)) {
load_int_misaligned(index, Address(xbcp, bcp_offset), tmp, false);
} else if (index_size == sizeof(u1)) {
@ -428,14 +441,7 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
Register Rs) {
// Pay attention to the argument Rs, which is acquiesce in t0.
if (VerifyActivationFrameSize) {
Label L;
sub(t1, fp, esp);
int min_frame_size =
(frame::link_offset - frame::interpreter_frame_initial_sp_offset + frame::metadata_words) * wordSize;
sub(t1, t1, min_frame_size);
bgez(t1, L);
stop("broken stack frame");
bind(L);
Unimplemented();
}
if (verifyoop && state == atos) {
verify_oop(x10);

View File

@ -3313,11 +3313,14 @@ void MacroAssembler::store_conditional(Register dst,
}
void MacroAssembler::cmpxchg_narrow_value_helper(Register addr, Register expected, Register new_val,
void MacroAssembler::cmpxchg_narrow_value_helper(Register addr, Register expected,
Register new_val,
enum operand_size size,
Register shift, Register mask, Register aligned_addr) {
Register tmp1, Register tmp2, Register tmp3) {
assert(size == int8 || size == int16, "unsupported operand size");
Register aligned_addr = t1, shift = tmp1, mask = tmp2, not_mask = tmp3;
andi(shift, addr, 3);
slli(shift, shift, 3);
@ -3332,6 +3335,8 @@ void MacroAssembler::cmpxchg_narrow_value_helper(Register addr, Register expecte
}
sll(mask, mask, shift);
notr(not_mask, mask);
sll(expected, expected, shift);
andr(expected, expected, mask);
@ -3348,46 +3353,35 @@ void MacroAssembler::cmpxchg_narrow_value(Register addr, Register expected,
Assembler::Aqrl acquire, Assembler::Aqrl release,
Register result, bool result_as_bool,
Register tmp1, Register tmp2, Register tmp3) {
assert_different_registers(addr, expected, new_val, result, tmp1, tmp2, tmp3, t0, t1);
Register scratch0 = t0, aligned_addr = t1;
Register shift = tmp1, mask = tmp2, scratch1 = tmp3;
cmpxchg_narrow_value_helper(addr, expected, new_val, size, shift, mask, aligned_addr);
Register aligned_addr = t1, shift = tmp1, mask = tmp2, not_mask = tmp3, old = result, tmp = t0;
assert_different_registers(addr, old, mask, not_mask, new_val, expected, shift, tmp);
cmpxchg_narrow_value_helper(addr, expected, new_val, size, tmp1, tmp2, tmp3);
Label retry, fail, done;
bind(retry);
if (UseZacas) {
lw(result, aligned_addr);
lw(old, aligned_addr);
bind(retry); // amocas loads the current value into result
notr(scratch1, mask);
// if old & mask != expected
andr(tmp, old, mask);
bne(tmp, expected, fail);
andr(scratch0, result, scratch1); // scratch0 = word - cas bits
orr(scratch1, expected, scratch0); // scratch1 = non-cas bits + cas bits
bne(result, scratch1, fail); // cas bits differ, cas failed
andr(tmp, old, not_mask);
orr(tmp, tmp, new_val);
// result is the same as expected, use as expected value.
// scratch0 is still = word - cas bits
// Or in the new value to create complete new value.
orr(scratch0, scratch0, new_val);
mv(scratch1, result); // save our expected value
atomic_cas(result, scratch0, aligned_addr, operand_size::int32, acquire, release);
bne(scratch1, result, retry);
atomic_cas(old, tmp, aligned_addr, operand_size::int32, acquire, release);
bne(tmp, old, retry);
} else {
notr(scratch1, mask);
bind(retry);
lr_w(old, aligned_addr, acquire);
andr(tmp, old, mask);
bne(tmp, expected, fail);
lr_w(result, aligned_addr, acquire);
andr(scratch0, result, mask);
bne(scratch0, expected, fail);
andr(scratch0, result, scratch1); // scratch1 is ~mask
orr(scratch0, scratch0, new_val);
sc_w(scratch0, scratch0, aligned_addr, release);
bnez(scratch0, retry);
andr(tmp, old, not_mask);
orr(tmp, tmp, new_val);
sc_w(tmp, tmp, aligned_addr, release);
bnez(tmp, retry);
}
if (result_as_bool) {
@ -3399,10 +3393,10 @@ void MacroAssembler::cmpxchg_narrow_value(Register addr, Register expected,
bind(done);
} else {
bind(fail);
andr(tmp, old, mask);
andr(scratch0, result, mask);
srl(result, scratch0, shift);
bind(fail);
srl(result, tmp, shift);
if (size == int8) {
sign_extend(result, result, 8);
@ -3422,44 +3416,33 @@ void MacroAssembler::weak_cmpxchg_narrow_value(Register addr, Register expected,
Assembler::Aqrl acquire, Assembler::Aqrl release,
Register result,
Register tmp1, Register tmp2, Register tmp3) {
assert_different_registers(addr, expected, new_val, result, tmp1, tmp2, tmp3, t0, t1);
Register scratch0 = t0, aligned_addr = t1;
Register shift = tmp1, mask = tmp2, scratch1 = tmp3;
cmpxchg_narrow_value_helper(addr, expected, new_val, size, shift, mask, aligned_addr);
Register aligned_addr = t1, shift = tmp1, mask = tmp2, not_mask = tmp3, old = result, tmp = t0;
assert_different_registers(addr, old, mask, not_mask, new_val, expected, shift, tmp);
cmpxchg_narrow_value_helper(addr, expected, new_val, size, tmp1, tmp2, tmp3);
Label fail, done;
if (UseZacas) {
lw(result, aligned_addr);
lw(old, aligned_addr);
notr(scratch1, mask);
// if old & mask != expected
andr(tmp, old, mask);
bne(tmp, expected, fail);
andr(scratch0, result, scratch1); // scratch0 = word - cas bits
orr(scratch1, expected, scratch0); // scratch1 = non-cas bits + cas bits
bne(result, scratch1, fail); // cas bits differ, cas failed
andr(tmp, old, not_mask);
orr(tmp, tmp, new_val);
// result is the same as expected, use as expected value.
// scratch0 is still = word - cas bits
// Or in the new value to create complete new value.
orr(scratch0, scratch0, new_val);
mv(scratch1, result); // save our expected value
atomic_cas(result, scratch0, aligned_addr, operand_size::int32, acquire, release);
bne(scratch1, result, fail); // This weak, so just bail-out.
atomic_cas(tmp, new_val, addr, operand_size::int32, acquire, release);
bne(tmp, old, fail);
} else {
notr(scratch1, mask);
lr_w(old, aligned_addr, acquire);
andr(tmp, old, mask);
bne(tmp, expected, fail);
lr_w(result, aligned_addr, acquire);
andr(scratch0, result, mask);
bne(scratch0, expected, fail);
andr(scratch0, result, scratch1); // scratch1 is ~mask
orr(scratch0, scratch0, new_val);
sc_w(scratch0, scratch0, aligned_addr, release);
bnez(scratch0, fail);
andr(tmp, old, not_mask);
orr(tmp, tmp, new_val);
sc_w(tmp, tmp, aligned_addr, release);
bnez(tmp, fail);
}
// Success
@ -3483,17 +3466,6 @@ void MacroAssembler::cmpxchg(Register addr, Register expected,
assert_different_registers(expected, t0);
assert_different_registers(new_val, t0);
// NOTE:
// Register _result_ may be the same register as _new_val_ or _expected_.
// Hence do NOT use _result_ until after 'cas'.
//
// Register _expected_ may be the same register as _new_val_ and is assumed to be preserved.
// Hence do NOT change _expected_ or _new_val_.
//
// Having _expected_ and _new_val_ being the same register is a very puzzling cas.
//
// TODO: Address these issues.
if (UseZacas) {
if (result_as_bool) {
mv(t0, expected);
@ -3501,9 +3473,8 @@ void MacroAssembler::cmpxchg(Register addr, Register expected,
xorr(t0, t0, expected);
seqz(result, t0);
} else {
mv(t0, expected);
atomic_cas(t0, new_val, addr, size, acquire, release);
mv(result, t0);
mv(result, expected);
atomic_cas(result, new_val, addr, size, acquire, release);
}
return;
}
@ -3539,16 +3510,15 @@ void MacroAssembler::cmpxchg_weak(Register addr, Register expected,
enum operand_size size,
Assembler::Aqrl acquire, Assembler::Aqrl release,
Register result) {
assert_different_registers(addr, t0);
assert_different_registers(expected, t0);
assert_different_registers(new_val, t0);
if (UseZacas) {
cmpxchg(addr, expected, new_val, size, acquire, release, result, true);
return;
}
assert_different_registers(addr, t0);
assert_different_registers(expected, t0);
assert_different_registers(new_val, t0);
Label fail, done;
load_reserved(t0, addr, size, acquire);
bne(t0, expected, fail);
@ -3611,18 +3581,83 @@ ATOMIC_XCHGU(xchgalwu, xchgalw)
#undef ATOMIC_XCHGU
void MacroAssembler::atomic_cas(Register prev, Register newv, Register addr,
enum operand_size size, Assembler::Aqrl acquire, Assembler::Aqrl release) {
#define ATOMIC_CAS(OP, AOP, ACQUIRE, RELEASE) \
void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \
assert(UseZacas, "invariant"); \
prev = prev->is_valid() ? prev : zr; \
AOP(prev, addr, newv, (Assembler::Aqrl)(ACQUIRE | RELEASE)); \
return; \
}
ATOMIC_CAS(cas, amocas_d, Assembler::relaxed, Assembler::relaxed)
ATOMIC_CAS(casw, amocas_w, Assembler::relaxed, Assembler::relaxed)
ATOMIC_CAS(casl, amocas_d, Assembler::relaxed, Assembler::rl)
ATOMIC_CAS(caslw, amocas_w, Assembler::relaxed, Assembler::rl)
ATOMIC_CAS(casal, amocas_d, Assembler::aq, Assembler::rl)
ATOMIC_CAS(casalw, amocas_w, Assembler::aq, Assembler::rl)
#undef ATOMIC_CAS
#define ATOMIC_CASU(OP1, OP2) \
void MacroAssembler::atomic_##OP1(Register prev, Register newv, Register addr) { \
atomic_##OP2(prev, newv, addr); \
zero_extend(prev, prev, 32); \
return; \
}
ATOMIC_CASU(caswu, casw)
ATOMIC_CASU(caslwu, caslw)
ATOMIC_CASU(casalwu, casalw)
#undef ATOMIC_CASU
void MacroAssembler::atomic_cas(
Register prev, Register newv, Register addr, enum operand_size size, Assembler::Aqrl acquire, Assembler::Aqrl release) {
switch (size) {
case int64:
amocas_d(prev, addr, newv, (Assembler::Aqrl)(acquire | release));
switch ((Assembler::Aqrl)(acquire | release)) {
case Assembler::relaxed:
atomic_cas(prev, newv, addr);
break;
case Assembler::rl:
atomic_casl(prev, newv, addr);
break;
case Assembler::aqrl:
atomic_casal(prev, newv, addr);
break;
default:
ShouldNotReachHere();
}
break;
case int32:
amocas_w(prev, addr, newv, (Assembler::Aqrl)(acquire | release));
switch ((Assembler::Aqrl)(acquire | release)) {
case Assembler::relaxed:
atomic_casw(prev, newv, addr);
break;
case Assembler::rl:
atomic_caslw(prev, newv, addr);
break;
case Assembler::aqrl:
atomic_casalw(prev, newv, addr);
break;
default:
ShouldNotReachHere();
}
break;
case uint32:
amocas_w(prev, addr, newv, (Assembler::Aqrl)(acquire | release));
zero_extend(prev, prev, 32);
switch ((Assembler::Aqrl)(acquire | release)) {
case Assembler::relaxed:
atomic_caswu(prev, newv, addr);
break;
case Assembler::rl:
atomic_caslwu(prev, newv, addr);
break;
case Assembler::aqrl:
atomic_casalwu(prev, newv, addr);
break;
default:
ShouldNotReachHere();
}
break;
default:
ShouldNotReachHere();

View File

@ -1146,9 +1146,10 @@ public:
enum operand_size size,
Assembler::Aqrl acquire, Assembler::Aqrl release,
Register result);
void cmpxchg_narrow_value_helper(Register addr, Register expected, Register new_val,
void cmpxchg_narrow_value_helper(Register addr, Register expected,
Register new_val,
enum operand_size size,
Register shift, Register mask, Register aligned_addr);
Register tmp1, Register tmp2, Register tmp3);
void cmpxchg_narrow_value(Register addr, Register expected,
Register new_val,
enum operand_size size,
@ -1174,6 +1175,16 @@ public:
void atomic_xchgwu(Register prev, Register newv, Register addr);
void atomic_xchgalwu(Register prev, Register newv, Register addr);
void atomic_cas(Register prev, Register newv, Register addr);
void atomic_casw(Register prev, Register newv, Register addr);
void atomic_casl(Register prev, Register newv, Register addr);
void atomic_caslw(Register prev, Register newv, Register addr);
void atomic_casal(Register prev, Register newv, Register addr);
void atomic_casalw(Register prev, Register newv, Register addr);
void atomic_caswu(Register prev, Register newv, Register addr);
void atomic_caslwu(Register prev, Register newv, Register addr);
void atomic_casalwu(Register prev, Register newv, Register addr);
void atomic_cas(Register prev, Register newv, Register addr, enum operand_size size,
Assembler::Aqrl acquire = Assembler::relaxed, Assembler::Aqrl release = Assembler::relaxed);

View File

@ -942,6 +942,26 @@ reg_class v11_reg(
V11, V11_H, V11_J, V11_K
);
// class for vector register v12
reg_class v12_reg(
V12, V12_H, V12_J, V12_K
);
// class for vector register v13
reg_class v13_reg(
V13, V13_H, V13_J, V13_K
);
// class for vector register v14
reg_class v14_reg(
V14, V14_H, V14_J, V14_K
);
// class for vector register v15
reg_class v15_reg(
V15, V15_H, V15_J, V15_K
);
// class for condition codes
reg_class reg_flags(RFLAGS);
@ -1876,6 +1896,9 @@ bool Matcher::match_rule_supported(int opcode) {
}
break;
case Op_ExpandBits: // fall through
case Op_CompressBits: // fall through
guarantee(UseRVV == (MaxVectorSize >= 16), "UseRVV and MaxVectorSize not matched");
case Op_StrCompressedCopy: // fall through
case Op_StrInflatedCopy: // fall through
case Op_CountPositives: // fall through
@ -3518,6 +3541,46 @@ operand vReg_V11()
interface(REG_INTER);
%}
operand vReg_V12()
%{
constraint(ALLOC_IN_RC(v12_reg));
match(VecA);
match(vReg);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vReg_V13()
%{
constraint(ALLOC_IN_RC(v13_reg));
match(VecA);
match(vReg);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vReg_V14()
%{
constraint(ALLOC_IN_RC(v14_reg));
match(VecA);
match(vReg);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vReg_V15()
%{
constraint(ALLOC_IN_RC(v15_reg));
match(VecA);
match(vReg);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegMask()
%{
constraint(ALLOC_IN_RC(vmask_reg));

View File

@ -3843,6 +3843,116 @@ instruct vclearArray_reg_reg(iRegL_R29 cnt, iRegP_R28 base, Universe dummy,
ins_pipe(pipe_class_memory);
%}
// CompressBits of Long & Integer
instruct compressBitsI(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask, vRegMask_V0 v0,
vReg_V4 v4, vReg_V5 v5, vReg_V8 v8, vReg_V9 v9) %{
match(Set dst (CompressBits src mask));
effect(TEMP v0, TEMP v4, TEMP v5, TEMP v8, TEMP v9);
format %{ "vsetivli x0, 1, e32, m1, tu, mu\t#@compressBitsI\n\t"
"vmv.s.x $v0, $src\n\t"
"mv t0, 32\n\t"
"vsetvli x0, t0, e8, m2, tu, mu\n\t"
"vmv.v.i $v4, 0\n\t"
"vmerge.vim $v4, $v4, 1, $v0\n\t"
"vmv.v.i $v8, 0\n\t"
"vsetivli x0, 1, e32, m1, tu, mu\n\t"
"vmv.s.x $v0, $mask\n\t"
"vsetvli x0, t0, e8, m2, tu, mu\n\t"
"vcompress.vm $v8, $v4, $v0\n\t"
"vmseq.vi $v0, $v8, 1\n\t"
"vsetivli x0, 1, e32, m1, tu, mu\n\t"
"vmv.x.s $dst, $v0\t#@compressBitsI\n\t"
%}
ins_encode %{
__ compress_bits_i_v(as_Register($dst$$reg), as_Register($src$$reg), as_Register($mask$$reg));
%}
ins_pipe(pipe_slow);
%}
instruct compressBitsL(iRegLNoSp dst, iRegL src, iRegL mask, vRegMask_V0 v0,
vReg_V4 v4, vReg_V5 v5, vReg_V6 v6, vReg_V7 v7,
vReg_V8 v8, vReg_V9 v9, vReg_V10 v10, vReg_V11 v11) %{
match(Set dst (CompressBits src mask));
effect(TEMP v0, TEMP v4, TEMP v5, TEMP v6, TEMP v7, TEMP v8, TEMP v9, TEMP v10, TEMP v11);
format %{ "vsetivli x0, 1, e64, m1, tu, mu\t#@compressBitsL\n\t"
"vmv.s.x $v0, $src\n\t"
"mv t0, 64\n\t"
"vsetvli x0, t0, e8, m4, tu, mu\n\t"
"vmv.v.i $v4, 0\n\t"
"vmerge.vim $v4, $v4, 1, $v0\n\t"
"vmv.v.i $v8, 0\n\t"
"vsetivli x0, 1, e64, m1, tu, mu\n\t"
"vmv.s.x $v0, $mask\n\t"
"vsetvli x0, t0, e8, m4, tu, mu\n\t"
"vcompress.vm $v8, $v4, $v0\n\t"
"vmseq.vi $v0, $v8, 1\n\t"
"vsetivli x0, 1, e64, m1, tu, mu\n\t"
"vmv.x.s $dst, $v0\t#@compressBitsL\n\t"
%}
ins_encode %{
__ compress_bits_l_v(as_Register($dst$$reg), as_Register($src$$reg), as_Register($mask$$reg));
%}
ins_pipe(pipe_slow);
%}
// ExpandBits of Long & Integer
instruct expandBitsI(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask, vRegMask_V0 v0,
vReg_V4 v4, vReg_V5 v5, vReg_V8 v8, vReg_V9 v9, vReg_V12 v12, vReg_V13 v13) %{
match(Set dst (ExpandBits src mask));
effect(TEMP v0, TEMP v4, TEMP v5, TEMP v8, TEMP v9, TEMP v12, TEMP v13);
format %{ "vsetivli x0, 1, e32, m1, tu, mu\t#@expandBitsI\n\t"
"vmv.s.x $v0, $src\n\t"
"mv t0, 32\n\t"
"vsetvli x0, t0, e8, m2, tu, mu\n\t"
"vmv.v.i $v4, 0\n\t"
"vmerge.vim $v4, $v4, 1, $v0\n\t"
"vmv.v.i $v12, 0\n\t"
"vsetivli x0, 1, e32, m1, tu, mu\n\t"
"vmv.s.x $v0, $mask\n\t"
"vsetvli x0, t0, e8, m2, tu, mu\n\t"
"viota.m $v8, $v0\n\t"
"vrgather.vv $v12, $v4, $v8, $v0.t\n\t"
"vmseq.vi $v0, $v12, 1\n\t"
"vsetivli x0, 1, e32, m1, tu, mu\n\t"
"vmv.x.s $dst, $v0\t#@expandBitsI\n\t"
%}
ins_encode %{
__ expand_bits_i_v(as_Register($dst$$reg), as_Register($src$$reg), as_Register($mask$$reg));
%}
ins_pipe(pipe_slow);
%}
instruct expandBitsL(iRegLNoSp dst, iRegL src, iRegL mask, vRegMask_V0 v0,
vReg_V4 v4, vReg_V5 v5, vReg_V6 v6, vReg_V7 v7,
vReg_V8 v8, vReg_V9 v9, vReg_V10 v10, vReg_V11 v11,
vReg_V12 v12, vReg_V13 v13, vReg_V14 v14, vReg_V15 v15) %{
match(Set dst (ExpandBits src mask));
effect(TEMP v0, TEMP v4, TEMP v5, TEMP v6, TEMP v7, TEMP v8, TEMP v9, TEMP v10, TEMP v11,
TEMP v12, TEMP v13, TEMP v14, TEMP v15);
format %{ "vsetivli x0, 1, e64, m1, tu, mu\t#@expandBitsL\n\t"
"vmv.s.x $v0, $src\n\t"
"mv t0, 64\n\t"
"vsetvli x0, t0, e8, m4, tu, mu\n\t"
"vmv.v.i $v4, 0\n\t"
"vmerge.vim $v4, $v4, 1, $v0\n\t"
"vmv.v.i $v12, 0\n\t"
"vsetivli x0, 1, e64, m1, tu, mu\n\t"
"vmv.s.x $v0, $mask\n\t"
"vsetvli x0, t0, e8, m4, tu, mu\n\t"
"viota.m $v8, $v0\n\t"
"vrgather.vv $v12, $v4, $v8, $v0.t\n\t"
"vmseq.vi $v0, $v12, 1\n\t"
"vsetivli x0, 1, e64, m1, tu, mu\n\t"
"vmv.x.s $dst, $v0\t#@expandBitsL\n\t"
%}
ins_encode %{
__ expand_bits_l_v(as_Register($dst$$reg), as_Register($src$$reg), as_Register($mask$$reg));
%}
ins_pipe(pipe_slow);
%}
// Vector Load Const
instruct vloadcon(vReg dst, immI0 src) %{
match(Set dst (VectorLoadConst src));

View File

@ -2112,7 +2112,7 @@ class StubGenerator: public StubCodeGenerator {
// Remaining count is less than 8 bytes. Fill it by a single store.
// Note that the total length is no less than 8 bytes.
if (!AvoidUnalignedAccesses && (t == T_BYTE || t == T_SHORT)) {
if (t == T_BYTE || t == T_SHORT) {
__ beqz(count, L_exit1);
__ shadd(to, count, to, tmp_reg, shift); // points to the end
__ sd(value, Address(to, -8)); // overwrite some elements

View File

@ -552,7 +552,7 @@ address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter()
// Restore Java expression stack pointer
__ ld(t0, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
__ shadd(esp, t0, fp, t0, Interpreter::logStackElementSize);
// and null it as marker that esp is now tos until next java call
// and NULL it as marker that esp is now tos until next java call
__ sd(zr, Address(fp, frame::interpreter_frame_last_sp_offset * wordSize));
// Restore machine SP

View File

@ -292,10 +292,15 @@ void TemplateTable::bipush() {
void TemplateTable::sipush() {
transition(vtos, itos);
__ load_signed_byte(x10, at_bcp(1));
__ load_unsigned_byte(t1, at_bcp(2));
__ slli(x10, x10, 8);
__ add(x10, x10, t1);
if (AvoidUnalignedAccesses) {
__ load_signed_byte(x10, at_bcp(1));
__ load_unsigned_byte(t1, at_bcp(2));
__ slli(x10, x10, 8);
__ add(x10, x10, t1);
} else {
__ load_unsigned_short(x10, at_bcp(1));
__ revb_h_h(x10, x10); // reverse bytes in half-word and sign-extend
}
}
void TemplateTable::ldc(LdcType type) {
@ -1621,10 +1626,15 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// load branch displacement
if (!is_wide) {
__ lb(x12, at_bcp(1));
__ lbu(t1, at_bcp(2));
__ slli(x12, x12, 8);
__ add(x12, x12, t1);
if (AvoidUnalignedAccesses) {
__ lb(x12, at_bcp(1));
__ lbu(t1, at_bcp(2));
__ slli(x12, x12, 8);
__ add(x12, x12, t1);
} else {
__ lhu(x12, at_bcp(1));
__ revb_h_h(x12, x12); // reverse bytes in half-word and sign-extend
}
} else {
__ lwu(x12, at_bcp(1));
__ revb_w_w(x12, x12); // reverse bytes in word and sign-extend

View File

@ -61,7 +61,6 @@ define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoScheduling, false);
define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 16);
// On s390x, we can clear the array with a single instruction,
// so don't idealize it.
define_pd_global(bool, IdealizeClearArrayNode, false);

View File

@ -76,7 +76,6 @@ define_pd_global(bool, OptoScheduling, false);
define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoRegScheduling, true);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 16);
define_pd_global(bool, IdealizeClearArrayNode, true);
define_pd_global(uintx, ReservedCodeCacheSize, 48*M);

View File

@ -4912,10 +4912,6 @@ void MacroAssembler::population_count(Register dst, Register src,
}
bind(done);
}
#ifdef ASSERT
mov64(scratch1, 0xCafeBabeDeadBeef);
movq(scratch2, scratch1);
#endif
}
// Ensure that the inline code and the stub are using the same registers.
@ -5117,7 +5113,6 @@ void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass,
const Register r_array_base = *available_regs++;
// Get the first array index that can contain super_klass into r_array_index.
// Note: Clobbers r_array_base and slot.
population_count(r_array_index, r_array_index, /*temp2*/r_array_base, /*temp3*/slot);
// NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word.
@ -5135,7 +5130,7 @@ void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass,
jccb(Assembler::equal, L_success);
// Restore slot to its true value
movb(slot, Address(r_super_klass, Klass::hash_slot_offset()));
xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64)
// Linear probe. Rotate the bitmap so that the next bit to test is
// in Bit 1.

View File

@ -400,7 +400,7 @@ address TemplateInterpreterGenerator::generate_cont_resume_interpreter_adapter()
// Restore stack bottom
__ movptr(rcx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
__ lea(rsp, Address(rbp, rcx, Address::times_ptr));
// and null it as marker that esp is now tos until next java call
// and NULL it as marker that esp is now tos until next java call
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
__ jmp(rax);

View File

@ -6179,7 +6179,6 @@ instruct evmulL_reg(vec dst, vec src1, vec src2) %{
VM_Version::supports_avx512dq()) ||
VM_Version::supports_avx512vldq());
match(Set dst (MulVL src1 src2));
ins_cost(500);
format %{ "evpmullq $dst,$src1,$src2\t! mul packedL" %}
ins_encode %{
assert(UseAVX > 2, "required");
@ -6196,7 +6195,6 @@ instruct evmulL_mem(vec dst, vec src, memory mem) %{
VM_Version::supports_avx512vldq()));
match(Set dst (MulVL src (LoadVector mem)));
format %{ "evpmullq $dst,$src,$mem\t! mul packedL" %}
ins_cost(500);
ins_encode %{
assert(UseAVX > 2, "required");
int vlen_enc = vector_length_encoding(this);
@ -6208,7 +6206,6 @@ instruct evmulL_mem(vec dst, vec src, memory mem) %{
instruct vmulL(vec dst, vec src1, vec src2, vec xtmp) %{
predicate(UseAVX == 0);
match(Set dst (MulVL src1 src2));
ins_cost(500);
effect(TEMP dst, TEMP xtmp);
format %{ "mulVL $dst, $src1, $src2\t! using $xtmp as TEMP" %}
ins_encode %{
@ -6235,7 +6232,6 @@ instruct vmulL_reg(vec dst, vec src1, vec src2, vec xtmp1, vec xtmp2) %{
!VM_Version::supports_avx512vldq())));
match(Set dst (MulVL src1 src2));
effect(TEMP xtmp1, TEMP xtmp2);
ins_cost(500);
format %{ "vmulVL $dst, $src1, $src2\t! using $xtmp1, $xtmp2 as TEMP" %}
ins_encode %{
int vlen_enc = vector_length_encoding(this);
@ -6252,30 +6248,6 @@ instruct vmulL_reg(vec dst, vec src1, vec src2, vec xtmp1, vec xtmp2) %{
ins_pipe( pipe_slow );
%}
instruct vmuludq_reg(vec dst, vec src1, vec src2) %{
predicate(UseAVX > 0 && n->as_MulVL()->has_uint_inputs());
match(Set dst (MulVL src1 src2));
ins_cost(100);
format %{ "vpmuludq $dst,$src1,$src2\t! muludq packedL" %}
ins_encode %{
int vlen_enc = vector_length_encoding(this);
__ vpmuludq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vlen_enc);
%}
ins_pipe( pipe_slow );
%}
instruct vmuldq_reg(vec dst, vec src1, vec src2) %{
predicate(UseAVX > 0 && n->as_MulVL()->has_int_inputs());
match(Set dst (MulVL src1 src2));
ins_cost(100);
format %{ "vpmuldq $dst,$src1,$src2\t! muldq packedL" %}
ins_encode %{
int vlen_enc = vector_length_encoding(this);
__ vpmuldq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vlen_enc);
%}
ins_pipe( pipe_slow );
%}
// Floats vector mul
instruct vmulF(vec dst, vec src) %{
predicate(UseAVX == 0);

View File

@ -229,6 +229,8 @@ size_t os::rss() {
// Cpu architecture string
#if defined(ZERO)
static char cpu_arch[] = ZERO_LIBARCH;
#elif defined(IA64)
static char cpu_arch[] = "ia64";
#elif defined(IA32)
static char cpu_arch[] = "i386";
#elif defined(AMD64)
@ -1190,6 +1192,8 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
static Elf32_Half running_arch_code=EM_386;
#elif (defined AMD64)
static Elf32_Half running_arch_code=EM_X86_64;
#elif (defined IA64)
static Elf32_Half running_arch_code=EM_IA_64;
#elif (defined __powerpc64__)
static Elf32_Half running_arch_code=EM_PPC64;
#elif (defined __powerpc__)
@ -1210,7 +1214,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
static Elf32_Half running_arch_code=EM_68K;
#else
#error Method os::dll_load requires that one of following is defined:\
IA32, AMD64, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K
IA32, AMD64, IA64, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K
#endif
// Identify compatibility class for VM's architecture and library's architecture

View File

@ -55,7 +55,8 @@ static size_t scan_default_hugepagesize() {
// large_page_size on Linux is used to round up heap size. x86 uses either
// 2M or 4M page, depending on whether PAE (Physical Address Extensions)
// mode is enabled. AMD64/EM64T uses 2M page in 64bit mode.
// mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
// page as large as 1G.
//
// Here we try to figure out page size by parsing /proc/meminfo and looking
// for a line with the following format:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,6 +41,10 @@ public:
static const char* impact() {
return "Low";
}
static const JavaPermission permission() {
JavaPermission p = { "java.lang.management.ManagementPermission", "monitor", nullptr };
return p;
}
void execute(DCmdSource source, TRAPS) override;
};

View File

@ -461,17 +461,26 @@ bool os::Linux::get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu
}
#ifndef SYS_gettid
// i386: 224, amd64: 186, sparc: 143
#if defined(__i386__)
#define SYS_gettid 224
#elif defined(__amd64__)
#define SYS_gettid 186
#elif defined(__sparc__)
#define SYS_gettid 143
// i386: 224, ia64: 1105, amd64: 186, sparc: 143
#ifdef __ia64__
#define SYS_gettid 1105
#else
#error "Define SYS_gettid for this architecture"
#ifdef __i386__
#define SYS_gettid 224
#else
#ifdef __amd64__
#define SYS_gettid 186
#else
#ifdef __sparc__
#define SYS_gettid 143
#else
#error define gettid for the arch
#endif
#endif
#endif
#endif
#endif // SYS_gettid
#endif
// pid_t gettid()
//
@ -1769,6 +1778,8 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
static Elf32_Half running_arch_code=EM_386;
#elif (defined AMD64) || (defined X32)
static Elf32_Half running_arch_code=EM_X86_64;
#elif (defined IA64)
static Elf32_Half running_arch_code=EM_IA_64;
#elif (defined __sparc) && (defined _LP64)
static Elf32_Half running_arch_code=EM_SPARCV9;
#elif (defined __sparc) && (!defined _LP64)
@ -1801,7 +1812,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
static Elf32_Half running_arch_code=EM_LOONGARCH;
#else
#error Method os::dll_load requires that one of following is defined:\
AARCH64, ALPHA, ARM, AMD64, IA32, LOONGARCH64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, RISCV, S390, SH, __sparc
AARCH64, ALPHA, ARM, AMD64, IA32, IA64, LOONGARCH64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, RISCV, S390, SH, __sparc
#endif
// Identify compatibility class for VM's architecture and library's architecture
@ -2708,6 +2719,8 @@ void os::get_summary_cpu_info(char* cpuinfo, size_t length) {
strncpy(cpuinfo, "ARM", length);
#elif defined(IA32)
strncpy(cpuinfo, "x86_32", length);
#elif defined(IA64)
strncpy(cpuinfo, "IA64", length);
#elif defined(PPC)
strncpy(cpuinfo, "PPC64", length);
#elif defined(RISCV)

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2021 SAP SE. All rights reserved.
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,6 +42,10 @@ public:
static const char* impact() {
return "Low";
}
static const JavaPermission permission() {
JavaPermission p = { "java.lang.management.ManagementPermission", "control", nullptr };
return p;
}
virtual void execute(DCmdSource source, TRAPS);
};

View File

@ -2111,7 +2111,7 @@ void os::shutdown() {
// easily trigger secondary faults in those threads. To reduce the likelihood
// of that we use _exit rather than exit, so that no atexit hooks get run.
// But note that os::shutdown() could also trigger secondary faults.
void os::abort(bool dump_core, const void* siginfo, const void* context) {
void os::abort(bool dump_core, void* siginfo, const void* context) {
os::shutdown();
if (dump_core) {
LINUX_ONLY(if (DumpPrivateMappingsInCore) ClassLoader::close_jrt_image();)
@ -2186,43 +2186,3 @@ char* os::pd_map_memory(int fd, const char* unused,
bool os::pd_unmap_memory(char* addr, size_t bytes) {
return munmap(addr, bytes) == 0;
}
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
static ucontext_t _saved_assert_context;
static bool _has_saved_context = false;
#endif // CAN_SHOW_REGISTERS_ON_ASSERT
void os::save_assert_context(const void* ucVoid) {
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
assert(ucVoid != nullptr, "invariant");
assert(!_has_saved_context, "invariant");
memcpy(&_saved_assert_context, ucVoid, sizeof(ucontext_t));
// on Linux ppc64, ucontext_t contains pointers into itself which have to be patched up
// after copying the context (see comment in sys/ucontext.h):
#if defined(PPC64)
*((void**)&_saved_assert_context.uc_mcontext.regs) = &(_saved_assert_context.uc_mcontext.gp_regs);
#elif defined(AMD64)
// In the copied version, fpregs should point to the copied contents.
// Sanity check: fpregs should point into the context.
if ((address)((const ucontext_t*)ucVoid)->uc_mcontext.fpregs > (address)ucVoid) {
size_t fpregs_offset = pointer_delta(((const ucontext_t*)ucVoid)->uc_mcontext.fpregs, ucVoid, 1);
if (fpregs_offset < sizeof(ucontext_t)) {
// Preserve the offset.
*((void**)&_saved_assert_context.uc_mcontext.fpregs) = (void*)((address)(void*)&_saved_assert_context + fpregs_offset);
}
}
#endif
_has_saved_context = true;
#endif // CAN_SHOW_REGISTERS_ON_ASSERT
}
const void* os::get_saved_assert_context(const void** sigInfo) {
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
assert(sigInfo != nullptr, "invariant");
*sigInfo = nullptr;
return _has_saved_context ? &_saved_assert_context : nullptr;
#endif
*sigInfo = nullptr;
return nullptr;
}

View File

@ -578,8 +578,9 @@ int JVM_HANDLE_XXX_SIGNAL(int sig, siginfo_t* info,
// Handle assertion poison page accesses.
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
if (VMError::was_assert_poison_crash(info)) {
signal_was_handled = handle_assert_poison_fault(ucVoid);
if (!signal_was_handled &&
((sig == SIGSEGV || sig == SIGBUS) && info != nullptr && info->si_addr == g_assert_poison)) {
signal_was_handled = handle_assert_poison_fault(ucVoid, info->si_addr);
}
#endif
@ -960,6 +961,10 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
{ SIGILL, ILL_PRVREG, "ILL_PRVREG", "Privileged register." },
{ SIGILL, ILL_COPROC, "ILL_COPROC", "Coprocessor error." },
{ SIGILL, ILL_BADSTK, "ILL_BADSTK", "Internal stack error." },
#if defined(IA64) && defined(LINUX)
{ SIGILL, ILL_BADIADDR, "ILL_BADIADDR", "Unimplemented instruction address" },
{ SIGILL, ILL_BREAK, "ILL_BREAK", "Application Break instruction" },
#endif
{ SIGFPE, FPE_INTDIV, "FPE_INTDIV", "Integer divide by zero." },
{ SIGFPE, FPE_INTOVF, "FPE_INTOVF", "Integer overflow." },
{ SIGFPE, FPE_FLTDIV, "FPE_FLTDIV", "Floating-point divide by zero." },
@ -973,6 +978,9 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
#if defined(AIX)
// no explanation found what keyerr would be
{ SIGSEGV, SEGV_KEYERR, "SEGV_KEYERR", "key error" },
#endif
#if defined(IA64) && !defined(AIX)
{ SIGSEGV, SEGV_PSTKOVF, "SEGV_PSTKOVF", "Paragraph stack overflow" },
#endif
{ SIGBUS, BUS_ADRALN, "BUS_ADRALN", "Invalid address alignment." },
{ SIGBUS, BUS_ADRERR, "BUS_ADRERR", "Nonexistent physical address." },
@ -1128,16 +1136,8 @@ static const char* get_signal_name(int sig, char* out, size_t outlen) {
}
void os::print_siginfo(outputStream* os, const void* si0) {
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
// If we are here because of an assert/guarantee, we suppress
// printing the siginfo, because it is only an implementation
// detail capturing the context for said assert/guarantee.
if (VMError::was_assert_poison_crash(si0)) {
return;
}
#endif
const siginfo_t* const si = (const siginfo_t*)si0;
const siginfo_t* const si = (const siginfo_t*) si0;
char buf[20];
os->print("siginfo:");

View File

@ -84,8 +84,8 @@ static void crash_handler(int sig, siginfo_t* info, void* context) {
// Needed because asserts may happen in error handling too.
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
if (VMError::was_assert_poison_crash(info)) {
if (handle_assert_poison_fault(context)) {
if ((sig == SIGSEGV || sig == SIGBUS) && info != nullptr && info->si_addr == g_assert_poison) {
if (handle_assert_poison_fault(context, info->si_addr)) {
return;
}
}
@ -127,14 +127,3 @@ void VMError::check_failing_cds_access(outputStream* st, const void* siginfo) {
}
#endif
}
bool VMError::was_assert_poison_crash(const void* siginfo) {
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
if (siginfo == nullptr) {
return false;
}
const siginfo_t* const si = (siginfo_t*)siginfo;
return (si->si_signo == SIGSEGV || si->si_signo == SIGBUS) && si->si_addr == g_assert_poison_read_only;
#endif
return false;
}

View File

@ -72,7 +72,6 @@
#include "services/runtimeService.hpp"
#include "symbolengine.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/decoder.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/events.hpp"
@ -1318,7 +1317,7 @@ void os::check_core_dump_prerequisites(char* buffer, size_t bufferSize, bool che
}
}
void os::abort(bool dump_core, const void* siginfo, const void* context) {
void os::abort(bool dump_core, void* siginfo, const void* context) {
EXCEPTION_POINTERS ep;
MINIDUMP_EXCEPTION_INFORMATION mei;
MINIDUMP_EXCEPTION_INFORMATION* pmei;
@ -2113,17 +2112,7 @@ bool os::signal_sent_by_kill(const void* siginfo) {
}
void os::print_siginfo(outputStream *st, const void* siginfo) {
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
// If we are here because of an assert/guarantee, we suppress
// printing the siginfo, because it is only an implementation
// detail capturing the context for said assert/guarantee.
if (VMError::was_assert_poison_crash(siginfo)) {
return;
}
#endif
const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
st->print("siginfo:");
char tmp[64];
@ -2636,14 +2625,6 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
#endif
#endif
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
if (VMError::was_assert_poison_crash(exception_record)) {
if (handle_assert_poison_fault(exceptionInfo)) {
return EXCEPTION_CONTINUE_EXECUTION;
}
}
#endif
if (t != nullptr && t->is_Java_thread()) {
JavaThread* thread = JavaThread::cast(t);
bool in_java = thread->thread_state() == _thread_in_Java;
@ -6184,26 +6165,3 @@ void os::print_user_info(outputStream* st) {
void os::print_active_locale(outputStream* st) {
// not implemented yet
}
static CONTEXT _saved_assert_context;
static EXCEPTION_RECORD _saved_exception_record;
static bool _has_saved_context = false;
void os::save_assert_context(const void* ucVoid) {
assert(ucVoid != nullptr, "invariant");
assert(!_has_saved_context, "invariant");
const EXCEPTION_POINTERS* ep = static_cast<const EXCEPTION_POINTERS*>(ucVoid);
memcpy(&_saved_assert_context, ep->ContextRecord, sizeof(CONTEXT));
memcpy(&_saved_exception_record, ep->ExceptionRecord, sizeof(EXCEPTION_RECORD));
_has_saved_context = true;
}
const void* os::get_saved_assert_context(const void** sigInfo) {
assert(sigInfo != nullptr, "invariant");
if (_has_saved_context) {
*sigInfo = &_saved_exception_record;
return &_saved_assert_context;
}
*sigInfo = nullptr;
return nullptr;
}

View File

@ -28,7 +28,6 @@
#include "runtime/arguments.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include "utilities/vmError.hpp"
LONG WINAPI crash_handler(struct _EXCEPTION_POINTERS* exceptionInfo) {
@ -68,23 +67,10 @@ void VMError::check_failing_cds_access(outputStream* st, const void* siginfo) {
void VMError::reporting_started() {}
void VMError::interrupt_reporting_thread() {}
void VMError::raise_fail_fast(const void* exrecord, const void* context) {
void VMError::raise_fail_fast(void* exrecord, void* context) {
DWORD flags = (exrecord == nullptr) ? FAIL_FAST_GENERATE_EXCEPTION_ADDRESS : 0;
PEXCEPTION_RECORD exception_record = static_cast<PEXCEPTION_RECORD>(const_cast<void*>(exrecord));
PCONTEXT ctx = static_cast<PCONTEXT>(const_cast<void*>(context));
RaiseFailFastException(exception_record, ctx, flags);
RaiseFailFastException(static_cast<PEXCEPTION_RECORD>(exrecord),
static_cast<PCONTEXT>(context),
flags);
::abort();
}
bool VMError::was_assert_poison_crash(const void* siginfo) {
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
if (siginfo == nullptr) {
return false;
}
const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && er->NumberParameters >= 2) {
return (void*)er->ExceptionInformation[1] == g_assert_poison_read_only;
}
#endif
return false;
}

View File

@ -1294,15 +1294,10 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
if (source_oop != nullptr) {
// This is a regular oop that got archived.
// Don't print the requested addr again as we have just printed it at the beginning of the line.
// Example:
// 0x00000007ffd27938: @@ Object (0xfffa4f27) java.util.HashMap
print_oop_info_cr(&st, source_oop, /*print_requested_addr=*/false);
print_oop_with_requested_addr_cr(&st, source_oop, false);
byte_size = source_oop->size() * BytesPerWord;
} else if ((byte_size = ArchiveHeapWriter::get_filler_size_at(start)) > 0) {
// We have a filler oop, which also does not exist in BufferOffsetToSourceObjectTable.
// Example:
// 0x00000007ffc3ffd8: @@ Object filler 40 bytes
st.print_cr("filler " SIZE_FORMAT " bytes", byte_size);
} else {
ShouldNotReachHere();
@ -1320,7 +1315,7 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
// ArchivedFieldPrinter is used to print the fields of archived objects. We can't
// use _source_obj->print_on(), because we want to print the oop fields
// in _source_obj with their requested addresses using print_oop_info_cr().
// in _source_obj with their requested addresses using print_oop_with_requested_addr_cr().
class ArchivedFieldPrinter : public FieldClosure {
ArchiveHeapInfo* _heap_info;
outputStream* _st;
@ -1336,14 +1331,8 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
switch (ft) {
case T_ARRAY:
case T_OBJECT:
{
fd->print_on(_st); // print just the name and offset
oop obj = _source_obj->obj_field(fd->offset());
if (java_lang_Class::is_instance(obj)) {
obj = HeapShared::scratch_java_mirror(obj);
}
print_oop_info_cr(_st, obj);
}
fd->print_on(_st); // print just the name and offset
print_oop_with_requested_addr_cr(_st, _source_obj->obj_field(fd->offset()));
break;
default:
if (ArchiveHeapWriter::is_marked_as_native_pointer(_heap_info, _source_obj, fd->offset())) {
@ -1399,78 +1388,37 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
objArrayOop source_obj_array = objArrayOop(source_oop);
for (int i = 0; i < source_obj_array->length(); i++) {
st.print(" -%4d: ", i);
oop obj = source_obj_array->obj_at(i);
if (java_lang_Class::is_instance(obj)) {
obj = HeapShared::scratch_java_mirror(obj);
}
print_oop_info_cr(&st, obj);
print_oop_with_requested_addr_cr(&st, source_obj_array->obj_at(i));
}
} else {
st.print_cr(" - fields (" SIZE_FORMAT " words):", source_oop->size());
ArchivedFieldPrinter print_field(heap_info, &st, source_oop, buffered_addr);
InstanceKlass::cast(source_klass)->print_nonstatic_fields(&print_field);
if (java_lang_Class::is_instance(source_oop)) {
oop scratch_mirror = source_oop;
st.print(" - signature: ");
print_class_signature_for_mirror(&st, scratch_mirror);
st.cr();
Klass* src_klass = java_lang_Class::as_Klass(scratch_mirror);
if (src_klass != nullptr && src_klass->is_instance_klass()) {
oop rr = HeapShared::scratch_resolved_references(InstanceKlass::cast(src_klass)->constants());
st.print(" - archived_resolved_references: ");
print_oop_info_cr(&st, rr);
// We need to print the fields in the scratch_mirror, not the original mirror.
// (if a class is not aot-initialized, static fields in its scratch mirror will be cleared).
assert(scratch_mirror == HeapShared::scratch_java_mirror(src_klass->java_mirror()), "sanity");
st.print_cr("- ---- static fields (%d):", java_lang_Class::static_oop_field_count(scratch_mirror));
InstanceKlass::cast(src_klass)->do_local_static_fields(&print_field);
}
}
}
}
}
static void print_class_signature_for_mirror(outputStream* st, oop scratch_mirror) {
assert(java_lang_Class::is_instance(scratch_mirror), "sanity");
if (java_lang_Class::is_primitive(scratch_mirror)) {
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
BasicType bt = (BasicType)i;
if (!is_reference_type(bt) && scratch_mirror == HeapShared::scratch_java_mirror(bt)) {
oop orig_mirror = Universe::java_mirror(bt);
java_lang_Class::print_signature(orig_mirror, st);
return;
}
}
ShouldNotReachHere();
}
java_lang_Class::print_signature(scratch_mirror, st);
}
static void log_heap_roots() {
LogStreamHandle(Trace, cds, map, oops) st;
if (st.is_enabled()) {
for (int i = 0; i < HeapShared::pending_roots()->length(); i++) {
st.print("roots[%4d]: ", i);
print_oop_info_cr(&st, HeapShared::pending_roots()->at(i));
print_oop_with_requested_addr_cr(&st, HeapShared::pending_roots()->at(i));
}
}
}
// Example output:
// - The first number is the requested address (if print_requested_addr == true)
// - The second number is the narrowOop version of the requested address (if UseCompressedOops == true)
// 0x00000007ffc7e840 (0xfff8fd08) java.lang.Class Ljava/util/Array;
// The output looks like this. The first number is the requested address. The second number is
// the narrowOop version of the requested address.
// 0x00000007ffc7e840 (0xfff8fd08) java.lang.Class
// 0x00000007ffc000f8 (0xfff8001f) [B length: 11
static void print_oop_info_cr(outputStream* st, oop source_oop, bool print_requested_addr = true) {
static void print_oop_with_requested_addr_cr(outputStream* st, oop source_oop, bool print_addr = true) {
if (source_oop == nullptr) {
st->print_cr("null");
} else {
ResourceMark rm;
oop requested_obj = ArchiveHeapWriter::source_obj_to_requested_obj(source_oop);
if (print_requested_addr) {
if (print_addr) {
st->print(PTR_FORMAT " ", p2i(requested_obj));
}
if (UseCompressedOops) {
@ -1480,27 +1428,7 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
int array_len = arrayOop(source_oop)->length();
st->print_cr("%s length: %d", source_oop->klass()->external_name(), array_len);
} else {
st->print("%s", source_oop->klass()->external_name());
if (java_lang_String::is_instance(source_oop)) {
st->print(" ");
java_lang_String::print(source_oop, st);
} else if (java_lang_Class::is_instance(source_oop)) {
oop scratch_mirror = source_oop;
st->print(" ");
print_class_signature_for_mirror(st, scratch_mirror);
Klass* src_klass = java_lang_Class::as_Klass(scratch_mirror);
if (src_klass != nullptr && src_klass->is_instance_klass()) {
InstanceKlass* buffered_klass =
ArchiveBuilder::current()->get_buffered_addr(InstanceKlass::cast(src_klass));
if (buffered_klass->has_aot_initialized_mirror()) {
st->print(" (aot-inited)");
}
}
}
st->cr();
st->print_cr("%s", source_oop->klass()->external_name());
}
}
}

View File

@ -343,20 +343,10 @@ public:
template <typename T>
u4 any_to_offset_u4(T p) const {
assert(p != nullptr, "must not be null");
uintx offset = any_to_offset((address)p);
return to_offset_u4(offset);
}
template <typename T>
u4 any_or_null_to_offset_u4(T p) const {
if (p == nullptr) {
return 0;
} else {
return any_to_offset_u4<T>(p);
}
}
template <typename T>
T offset_to_buffered(u4 offset) const {
return (T)offset_to_buffered_address(offset);

View File

@ -399,3 +399,188 @@ size_t HeapRootSegments::segment_offset(size_t seg_idx) {
return _base_offset + seg_idx * _max_size_in_bytes;
}
ArchiveWorkers ArchiveWorkers::_workers;
ArchiveWorkers::ArchiveWorkers() :
_start_semaphore(0),
_end_semaphore(0),
_num_workers(0),
_started_workers(0),
_waiting_workers(0),
_running_workers(0),
_state(NOT_READY),
_task(nullptr) {
}
void ArchiveWorkers::initialize() {
assert(Atomic::load(&_state) == NOT_READY, "Should be");
Atomic::store(&_num_workers, max_workers());
Atomic::store(&_state, READY);
// Kick off pool startup by creating a single worker.
start_worker_if_needed();
}
int ArchiveWorkers::max_workers() {
// The pool is used for short-lived bursty tasks. We do not want to spend
// too much time creating and waking up threads unnecessarily. Plus, we do
// not want to overwhelm large machines. This is why we want to be very
// conservative about the number of workers actually needed.
return MAX2(0, log2i_graceful(os::active_processor_count()));
}
bool ArchiveWorkers::is_parallel() {
return _num_workers > 0;
}
void ArchiveWorkers::shutdown() {
while (true) {
State state = Atomic::load(&_state);
if (state == SHUTDOWN) {
// Already shut down.
return;
}
if (Atomic::cmpxchg(&_state, state, SHUTDOWN, memory_order_relaxed) == state) {
if (is_parallel()) {
// Execute a shutdown task and block until all workers respond.
run_task(&_shutdown_task);
}
}
}
}
void ArchiveWorkers::start_worker_if_needed() {
while (true) {
int cur = Atomic::load(&_started_workers);
if (cur >= _num_workers) {
return;
}
if (Atomic::cmpxchg(&_started_workers, cur, cur + 1, memory_order_relaxed) == cur) {
new ArchiveWorkerThread(this);
return;
}
}
}
void ArchiveWorkers::signal_worker_if_needed() {
while (true) {
int cur = Atomic::load(&_waiting_workers);
if (cur == 0) {
return;
}
if (Atomic::cmpxchg(&_waiting_workers, cur, cur - 1, memory_order_relaxed) == cur) {
_start_semaphore.signal(1);
return;
}
}
}
void ArchiveWorkers::run_task(ArchiveWorkerTask* task) {
assert((Atomic::load(&_state) == READY) ||
((Atomic::load(&_state) == SHUTDOWN) && (task == &_shutdown_task)),
"Should be in correct state");
assert(Atomic::load(&_task) == nullptr, "Should not have running tasks");
if (is_parallel()) {
run_task_multi(task);
} else {
run_task_single(task);
}
}
void ArchiveWorkers::run_task_single(ArchiveWorkerTask* task) {
// Single thread needs no chunking.
task->configure_max_chunks(1);
// Execute the task ourselves, as there are no workers.
task->work(0, 1);
}
void ArchiveWorkers::run_task_multi(ArchiveWorkerTask* task) {
// Multiple threads can work with multiple chunks.
task->configure_max_chunks(_num_workers * CHUNKS_PER_WORKER);
// Set up the run and publish the task.
Atomic::store(&_waiting_workers, _num_workers);
Atomic::store(&_running_workers, _num_workers);
Atomic::release_store(&_task, task);
// Kick off pool wakeup by signaling a single worker, and proceed
// immediately to executing the task locally.
signal_worker_if_needed();
// Execute the task ourselves, while workers are catching up.
// This allows us to hide parts of task handoff latency.
task->run();
// Done executing task locally, wait for any remaining workers to complete,
// and then do the final housekeeping.
_end_semaphore.wait();
Atomic::store(&_task, (ArchiveWorkerTask *) nullptr);
OrderAccess::fence();
assert(Atomic::load(&_waiting_workers) == 0, "All workers were signaled");
assert(Atomic::load(&_running_workers) == 0, "No workers are running");
}
void ArchiveWorkerTask::run() {
while (true) {
int chunk = Atomic::load(&_chunk);
if (chunk >= _max_chunks) {
return;
}
if (Atomic::cmpxchg(&_chunk, chunk, chunk + 1, memory_order_relaxed) == chunk) {
assert(0 <= chunk && chunk < _max_chunks, "Sanity");
work(chunk, _max_chunks);
}
}
}
void ArchiveWorkerTask::configure_max_chunks(int max_chunks) {
if (_max_chunks == 0) {
_max_chunks = max_chunks;
}
}
bool ArchiveWorkers::run_as_worker() {
assert(is_parallel(), "Should be in parallel mode");
_start_semaphore.wait();
// Avalanche wakeups: each worker signals two others.
signal_worker_if_needed();
signal_worker_if_needed();
ArchiveWorkerTask* task = Atomic::load_acquire(&_task);
task->run();
// All work done in threads should be visible to caller.
OrderAccess::fence();
// Signal the pool the tasks are complete, if this is the last worker.
if (Atomic::sub(&_running_workers, 1, memory_order_relaxed) == 0) {
_end_semaphore.signal();
}
// Continue if task was not a termination task.
return (task != &_shutdown_task);
}
ArchiveWorkerThread::ArchiveWorkerThread(ArchiveWorkers* pool) : NamedThread(), _pool(pool) {
set_name("ArchiveWorkerThread");
os::create_thread(this, os::os_thread);
os::start_thread(this);
}
void ArchiveWorkerThread::run() {
// Avalanche thread startup: each starting worker starts two others.
_pool->start_worker_if_needed();
_pool->start_worker_if_needed();
// Set ourselves up.
os::set_priority(this, NearMaxPriority);
while (_pool->run_as_worker()) {
// Work until terminated.
}
}

View File

@ -33,6 +33,8 @@
#include "utilities/bitMap.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
#include "runtime/nonJavaThread.hpp"
#include "runtime/semaphore.hpp"
class BootstrapInfo;
class ReservedSpace;
@ -162,7 +164,7 @@ public:
DumpRegion(const char* name, uintx max_delta = 0)
: _name(name), _base(nullptr), _top(nullptr), _end(nullptr),
_max_delta(max_delta), _is_packed(false),
_rs(nullptr), _vs(nullptr) {}
_rs(NULL), _vs(NULL) {}
char* expand_top_to(char* newtop);
char* allocate(size_t num_bytes, size_t alignment = 0);
@ -259,33 +261,16 @@ public:
static bool has_aot_initialized_mirror(InstanceKlass* src_ik);
template <typename T> static Array<T>* archive_array(GrowableArray<T>* tmp_array);
// The following functions translate between a u4 offset and an address in the
// the range of the mapped CDS archive (e.g., Metaspace::is_in_shared_metaspace()).
// Since the first 16 bytes in this range are dummy data (see ArchiveBuilder::reserve_buffer()),
// we know that offset 0 never represents a valid object. As a result, an offset of 0
// is used to encode a nullptr.
//
// Use the "archived_address_or_null" variants if a nullptr may be encoded.
// offset must represent an object of type T in the mapped shared space. Return
// a direct pointer to this object.
template <typename T> T static offset_to_archived_address(u4 offset) {
assert(offset != 0, "sanity");
template <typename T> T static from_offset(u4 offset) {
T p = (T)(SharedBaseAddress + offset);
assert(Metaspace::is_in_shared_metaspace(p), "must be");
return p;
}
template <typename T> T static offset_to_archived_address_or_null(u4 offset) {
if (offset == 0) {
return nullptr;
} else {
return offset_to_archived_address<T>(offset);
}
}
// p must be an archived object. Get its offset from SharedBaseAddress
template <typename T> static u4 archived_address_to_offset(T p) {
template <typename T> static u4 to_offset(T p) {
uintx pn = (uintx)p;
uintx base = (uintx)SharedBaseAddress;
assert(Metaspace::is_in_shared_metaspace(p), "must be");
@ -294,14 +279,6 @@ public:
assert(offset <= MAX_SHARED_DELTA, "range check");
return static_cast<u4>(offset);
}
template <typename T> static u4 archived_address_or_null_to_offset(T p) {
if (p == nullptr) {
return 0;
} else {
return archived_address_to_offset<T>(p);
}
}
};
class HeapRootSegments {
@ -344,4 +321,95 @@ public:
HeapRootSegments& operator=(const HeapRootSegments&) = default;
};
class ArchiveWorkers;
// A task to be worked on by worker threads
class ArchiveWorkerTask : public CHeapObj<mtInternal> {
friend class ArchiveWorkers;
friend class ArchiveWorkerShutdownTask;
private:
const char* _name;
int _max_chunks;
volatile int _chunk;
void run();
void configure_max_chunks(int max_chunks);
public:
ArchiveWorkerTask(const char* name) :
_name(name), _max_chunks(0), _chunk(0) {}
const char* name() const { return _name; }
virtual void work(int chunk, int max_chunks) = 0;
};
class ArchiveWorkerThread : public NamedThread {
friend class ArchiveWorkers;
private:
ArchiveWorkers* const _pool;
public:
ArchiveWorkerThread(ArchiveWorkers* pool);
const char* type_name() const override { return "Archive Worker Thread"; }
void run() override;
};
class ArchiveWorkerShutdownTask : public ArchiveWorkerTask {
public:
ArchiveWorkerShutdownTask() : ArchiveWorkerTask("Archive Worker Shutdown") {
// This task always have only one chunk.
configure_max_chunks(1);
}
void work(int chunk, int max_chunks) override {
// Do nothing.
}
};
// Special worker pool for archive workers. The goal for this pool is to
// startup fast, distribute spiky workloads efficiently, and being able to
// shutdown after use. This makes the implementation quite different from
// the normal GC worker pool.
class ArchiveWorkers {
friend class ArchiveWorkerThread;
private:
// Target number of chunks per worker. This should be large enough to even
// out work imbalance, and small enough to keep bookkeeping overheads low.
static constexpr int CHUNKS_PER_WORKER = 4;
static int max_workers();
// Global shared instance. Can be uninitialized, can be shut down.
static ArchiveWorkers _workers;
ArchiveWorkerShutdownTask _shutdown_task;
Semaphore _start_semaphore;
Semaphore _end_semaphore;
int _num_workers;
int _started_workers;
int _waiting_workers;
int _running_workers;
typedef enum { NOT_READY, READY, SHUTDOWN } State;
volatile State _state;
ArchiveWorkerTask* _task;
bool run_as_worker();
void start_worker_if_needed();
void signal_worker_if_needed();
void run_task_single(ArchiveWorkerTask* task);
void run_task_multi(ArchiveWorkerTask* task);
bool is_parallel();
ArchiveWorkers();
public:
static ArchiveWorkers* workers() { return &_workers; }
void initialize();
void shutdown();
void run_task(ArchiveWorkerTask* task);
};
#endif // SHARE_CDS_ARCHIVEUTILS_HPP

View File

@ -117,7 +117,10 @@
product(bool, AOTClassLinking, false, \
"Load/link all archived classes for the boot/platform/app " \
"loaders before application main") \
\
product(bool, AOTCacheParallelRelocation, true, DIAGNOSTIC, \
"Use parallel relocation code to speed up startup.") \
\
// end of CDS_FLAGS
DECLARE_FLAGS(CDS_FLAGS)

View File

@ -53,18 +53,15 @@
#include "memory/oopFactory.hpp"
#include "memory/universe.hpp"
#include "nmt/memTracker.hpp"
#include "oops/access.hpp"
#include "oops/compressedOops.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/compressedKlass.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayKlass.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
@ -1975,6 +1972,32 @@ char* FileMapInfo::map_bitmap_region() {
return bitmap_base;
}
class SharedDataRelocationTask : public ArchiveWorkerTask {
private:
BitMapView* const _rw_bm;
BitMapView* const _ro_bm;
SharedDataRelocator* const _rw_reloc;
SharedDataRelocator* const _ro_reloc;
public:
SharedDataRelocationTask(BitMapView* rw_bm, BitMapView* ro_bm, SharedDataRelocator* rw_reloc, SharedDataRelocator* ro_reloc) :
ArchiveWorkerTask("Shared Data Relocation"),
_rw_bm(rw_bm), _ro_bm(ro_bm), _rw_reloc(rw_reloc), _ro_reloc(ro_reloc) {}
void work(int chunk, int max_chunks) override {
work_on(chunk, max_chunks, _rw_bm, _rw_reloc);
work_on(chunk, max_chunks, _ro_bm, _ro_reloc);
}
void work_on(int chunk, int max_chunks, BitMapView* bm, SharedDataRelocator* reloc) {
BitMap::idx_t size = bm->size();
BitMap::idx_t start = MIN2(size, size * chunk / max_chunks);
BitMap::idx_t end = MIN2(size, size * (chunk + 1) / max_chunks);
assert(end > start, "Sanity: no empty slices");
bm->iterate(reloc, start, end);
}
};
// This is called when we cannot map the archive at the requested[ base address (usually 0x800000000).
// We relocate all pointers in the 2 core regions (ro, rw).
bool FileMapInfo::relocate_pointers_in_core_regions(intx addr_delta) {
@ -2013,8 +2036,14 @@ bool FileMapInfo::relocate_pointers_in_core_regions(intx addr_delta) {
valid_new_base, valid_new_end, addr_delta);
SharedDataRelocator ro_patcher((address*)ro_patch_base + header()->ro_ptrmap_start_pos(), (address*)ro_patch_end, valid_old_base, valid_old_end,
valid_new_base, valid_new_end, addr_delta);
rw_ptrmap.iterate(&rw_patcher);
ro_ptrmap.iterate(&ro_patcher);
if (AOTCacheParallelRelocation) {
SharedDataRelocationTask task(&rw_ptrmap, &ro_ptrmap, &rw_patcher, &ro_patcher);
ArchiveWorkers::workers()->run_task(&task);
} else {
rw_ptrmap.iterate(&rw_patcher);
ro_ptrmap.iterate(&ro_patcher);
}
// The MetaspaceShared::bm region will be unmapped in MetaspaceShared::initialize_shared_spaces().
@ -2681,44 +2710,11 @@ ClassFileStream* FileMapInfo::open_stream_for_jvmti(InstanceKlass* ik, Handle cl
const char* const file_name = ClassLoader::file_name_for_class_name(class_name,
name->utf8_length());
ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
ClassFileStream* cfs;
if (class_loader() != nullptr && !cpe->is_modules_image()) {
cfs = get_stream_from_class_loader(class_loader, cpe, file_name, CHECK_NULL);
} else {
cfs = cpe->open_stream_for_loader(THREAD, file_name, loader_data);
}
ClassFileStream* cfs = cpe->open_stream_for_loader(THREAD, file_name, loader_data);
assert(cfs != nullptr, "must be able to read the classfile data of shared classes for built-in loaders.");
log_debug(cds, jvmti)("classfile data for %s [%d: %s] = %d bytes", class_name, path_index,
cfs->source(), cfs->length());
return cfs;
}
ClassFileStream* FileMapInfo::get_stream_from_class_loader(Handle class_loader,
ClassPathEntry* cpe,
const char* file_name,
TRAPS) {
JavaValue result(T_OBJECT);
TempNewSymbol class_name_sym = SymbolTable::new_symbol(file_name);
Handle ext_class_name = java_lang_String::externalize_classname(class_name_sym, CHECK_NULL);
// byte[] ClassLoader.getResourceAsByteArray(String name)
JavaCalls::call_virtual(&result,
class_loader,
vmClasses::ClassLoader_klass(),
vmSymbols::getResourceAsByteArray_name(),
vmSymbols::getResourceAsByteArray_signature(),
ext_class_name,
CHECK_NULL);
assert(result.get_type() == T_OBJECT, "just checking");
oop obj = result.get_oop();
assert(obj != nullptr, "ClassLoader.getResourceAsByteArray should not return null");
// copy from byte[] to a buffer
typeArrayOop ba = typeArrayOop(obj);
jint len = ba->length();
u1* buffer = NEW_RESOURCE_ARRAY(u1, len);
ArrayAccess<>::arraycopy_to_native<>(ba, typeArrayOopDesc::element_offset<jbyte>(0), buffer, len);
return new ClassFileStream(buffer, len, cpe->name());
}
#endif

View File

@ -507,10 +507,6 @@ public:
#if INCLUDE_JVMTI
// Caller needs a ResourceMark because parts of the returned cfs are resource-allocated.
static ClassFileStream* open_stream_for_jvmti(InstanceKlass* ik, Handle class_loader, TRAPS);
static ClassFileStream* get_stream_from_class_loader(Handle class_loader,
ClassPathEntry* cpe,
const char* file_name,
TRAPS);
#endif
static SharedClassPathEntry* shared_path(int index) {

View File

@ -254,7 +254,7 @@ void LambdaFormInvokers::read_static_archive_invokers() {
if (_static_archive_invokers != nullptr) {
for (int i = 0; i < _static_archive_invokers->length(); i++) {
u4 offset = _static_archive_invokers->at(i);
Array<char>* line = ArchiveUtils::offset_to_archived_address<Array<char>*>(offset);
Array<char>* line = ArchiveUtils::from_offset<Array<char>*>(offset);
char* str = line->adr_at(0);
append(str);
}

View File

@ -143,7 +143,7 @@ public:
u4 invoked_name = b->any_to_offset_u4(key.invoked_name());
u4 invoked_type = b->any_to_offset_u4(key.invoked_type());
u4 method_type = b->any_to_offset_u4(key.method_type());
u4 member_method = b->any_or_null_to_offset_u4(key.member_method()); // could be null
u4 member_method = b->any_to_offset_u4(key.member_method());
u4 instantiated_method_type = b->any_to_offset_u4(key.instantiated_method_type());
return RunTimeLambdaProxyClassKey(caller_ik, invoked_name, invoked_type, method_type,
@ -158,12 +158,12 @@ public:
Symbol* instantiated_method_type) {
// All parameters must be in shared space, or else you'd get an assert in
// ArchiveUtils::to_offset().
return RunTimeLambdaProxyClassKey(ArchiveUtils::archived_address_to_offset(caller_ik),
ArchiveUtils::archived_address_to_offset(invoked_name),
ArchiveUtils::archived_address_to_offset(invoked_type),
ArchiveUtils::archived_address_to_offset(method_type),
ArchiveUtils::archived_address_or_null_to_offset(member_method), // could be null
ArchiveUtils::archived_address_to_offset(instantiated_method_type));
return RunTimeLambdaProxyClassKey(ArchiveUtils::to_offset(caller_ik),
ArchiveUtils::to_offset(invoked_name),
ArchiveUtils::to_offset(invoked_type),
ArchiveUtils::to_offset(method_type),
ArchiveUtils::to_offset(member_method),
ArchiveUtils::to_offset(instantiated_method_type));
}
unsigned int hash() const;

View File

@ -315,7 +315,7 @@ static GrowableArrayCHeap<OopHandle, mtClassShared>* _extra_interned_strings = n
// Extra Symbols to be added to the archive
static GrowableArrayCHeap<Symbol*, mtClassShared>* _extra_symbols = nullptr;
// Methods managed by SystemDictionary::find_method_handle_intrinsic() to be added to the archive
static GrowableArray<Method*>* _pending_method_handle_intrinsics = nullptr;
static GrowableArray<Method*>* _pending_method_handle_intrinsics = NULL;
void MetaspaceShared::read_extra_data(JavaThread* current, const char* filename) {
_extra_interned_strings = new GrowableArrayCHeap<OopHandle, mtClassShared>(10000);
@ -1088,6 +1088,9 @@ void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
assert(CDSConfig::is_using_archive(), "Must be called when UseSharedSpaces is enabled");
MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
// We are about to open the archives. Initialize workers now.
ArchiveWorkers::workers()->initialize();
FileMapInfo* static_mapinfo = open_static_archive();
FileMapInfo* dynamic_mapinfo = nullptr;
@ -1679,6 +1682,9 @@ void MetaspaceShared::initialize_shared_spaces() {
dynamic_mapinfo->unmap_region(MetaspaceShared::bm);
}
// Archive was fully read. Workers are no longer needed.
ArchiveWorkers::workers()->shutdown();
LogStreamHandle(Info, cds) lsh;
if (lsh.is_enabled()) {
lsh.print("Using AOT-linked classes: %s (static archive: %s aot-linked classes",

View File

@ -79,7 +79,7 @@ InstanceKlass* RunTimeClassInfo::klass() const {
if (ArchiveBuilder::is_active() && ArchiveBuilder::current()->is_in_buffer_space((address)this)) {
return ArchiveBuilder::current()->offset_to_buffered<InstanceKlass*>(_klass_offset);
} else {
return ArchiveUtils::offset_to_archived_address<InstanceKlass*>(_klass_offset);
return ArchiveUtils::from_offset<InstanceKlass*>(_klass_offset);
}
}

View File

@ -52,15 +52,15 @@ public:
struct RTVerifierConstraint {
u4 _name;
u4 _from_name;
Symbol* name() { return ArchiveUtils::offset_to_archived_address<Symbol*>(_name); }
Symbol* from_name() { return ArchiveUtils::offset_to_archived_address<Symbol*>(_from_name); }
Symbol* name() { return ArchiveUtils::from_offset<Symbol*>(_name); }
Symbol* from_name() { return ArchiveUtils::from_offset<Symbol*>(_from_name); }
};
struct RTLoaderConstraint {
u4 _name;
char _loader_type1;
char _loader_type2;
Symbol* constraint_name() { return ArchiveUtils::offset_to_archived_address<Symbol*>(_name); }
Symbol* constraint_name() { return ArchiveUtils::from_offset<Symbol*>(_name); }
};
struct RTEnumKlassStaticFields {
int _num;
@ -177,7 +177,11 @@ public:
InstanceKlass* nest_host() {
assert(!ArchiveBuilder::is_active(), "not called when dumping archive");
return ArchiveUtils::offset_to_archived_address_or_null<InstanceKlass*>(_nest_host_offset);
if (_nest_host_offset == 0) {
return nullptr;
} else {
return ArchiveUtils::from_offset<InstanceKlass*>(_nest_host_offset);
}
}
RTLoaderConstraint* loader_constraints() {

View File

@ -34,10 +34,6 @@
PRAGMA_DIAG_PUSH
PRAGMA_FORMAT_NONLITERAL_IGNORED
// None of the error routines below take in a free-form, potentially unbounded
// string, and names are all limited to < 64K, so we know that all formatted
// strings passed to fthrow will not be excessively large.
void ClassFileParser::classfile_parse_error(const char* msg, TRAPS) const {
assert(_class_name != nullptr, "invariant");
ResourceMark rm(THREAD);

View File

@ -1794,7 +1794,6 @@ void ClassFileParser::throwIllegalSignature(const char* type,
assert(sig != nullptr, "invariant");
ResourceMark rm(THREAD);
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(THREAD_AND_LOCATION,
vmSymbols::java_lang_ClassFormatError(),
"%s \"%s\" in class %s has illegal signature \"%s\"", type,
@ -4074,8 +4073,6 @@ void ClassFileParser::check_super_class_access(const InstanceKlass* this_klass,
char* msg = Reflection::verify_class_access_msg(this_klass,
InstanceKlass::cast(super),
vca_result);
// Names are all known to be < 64k so we know this formatted message is not excessively large.
if (msg == nullptr) {
bool same_module = (this_klass->module() == super->module());
Exceptions::fthrow(
@ -4124,8 +4121,6 @@ void ClassFileParser::check_super_interface_access(const InstanceKlass* this_kla
char* msg = Reflection::verify_class_access_msg(this_klass,
k,
vca_result);
// Names are all known to be < 64k so we know this formatted message is not excessively large.
if (msg == nullptr) {
bool same_module = (this_klass->module() == k->module());
Exceptions::fthrow(
@ -4222,8 +4217,6 @@ static void check_illegal_static_method(const InstanceKlass* this_klass, TRAPS)
// if m is static and not the init method, throw a verify error
if ((m->is_static()) && (m->name() != vmSymbols::class_initializer_name())) {
ResourceMark rm(THREAD);
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_VerifyError(),
@ -4243,7 +4236,6 @@ void ClassFileParser::verify_legal_class_modifiers(jint flags, TRAPS) const {
assert(_major_version >= JAVA_9_VERSION || !is_module, "JVM_ACC_MODULE should not be set");
if (is_module) {
ResourceMark rm(THREAD);
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_NoClassDefFoundError(),
@ -4267,7 +4259,6 @@ void ClassFileParser::verify_legal_class_modifiers(jint flags, TRAPS) const {
(is_interface && major_gte_1_5 && (is_super || is_enum)) ||
(!is_interface && major_gte_1_5 && is_annotation)) {
ResourceMark rm(THREAD);
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_ClassFormatError(),
@ -4304,7 +4295,6 @@ void ClassFileParser::verify_class_version(u2 major, u2 minor, Symbol* class_nam
}
if (major > max_version) {
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_UnsupportedClassVersionError(),
@ -4320,7 +4310,6 @@ void ClassFileParser::verify_class_version(u2 major, u2 minor, Symbol* class_nam
if (minor == JAVA_PREVIEW_MINOR_VERSION) {
if (major != max_version) {
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_UnsupportedClassVersionError(),
@ -4373,7 +4362,6 @@ void ClassFileParser::verify_legal_field_modifiers(jint flags,
if (is_illegal) {
ResourceMark rm(THREAD);
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_ClassFormatError(),
@ -4457,7 +4445,6 @@ void ClassFileParser::verify_legal_method_modifiers(jint flags,
if (is_illegal) {
ResourceMark rm(THREAD);
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_ClassFormatError(),
@ -4699,7 +4686,6 @@ void ClassFileParser::verify_legal_class_name(const Symbol* name, TRAPS) const {
if (!legal) {
ResourceMark rm(THREAD);
assert(_class_name != nullptr, "invariant");
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_ClassFormatError(),
@ -4733,7 +4719,6 @@ void ClassFileParser::verify_legal_field_name(const Symbol* name, TRAPS) const {
if (!legal) {
ResourceMark rm(THREAD);
assert(_class_name != nullptr, "invariant");
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_ClassFormatError(),
@ -4771,7 +4756,6 @@ void ClassFileParser::verify_legal_method_name(const Symbol* name, TRAPS) const
if (!legal) {
ResourceMark rm(THREAD);
assert(_class_name != nullptr, "invariant");
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_ClassFormatError(),
@ -5543,7 +5527,6 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
if (_class_name != class_name_in_cp) {
if (_class_name != vmSymbols::unknown_class_name()) {
ResourceMark rm(THREAD);
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(THREAD_AND_LOCATION,
vmSymbols::java_lang_NoClassDefFoundError(),
"%s (wrong name: %s)",

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -46,6 +46,11 @@ public:
static const char* impact() {
return "Medium: Depends on number of class loaders and classes loaded.";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission",
"monitor", nullptr};
return p;
}
static int num_arguments() { return 3; }
virtual void execute(DCmdSource source, TRAPS);

View File

@ -58,6 +58,12 @@ public:
static int num_arguments() {
return 0;
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission",
"monitor", nullptr};
return p;
}
};

View File

@ -44,7 +44,7 @@ const size_t REHASH_LEN = 100;
Dictionary::Dictionary(ClassLoaderData* loader_data, size_t table_size)
: _number_of_entries(0), _loader_data(loader_data) {
size_t start_size_log_2 = MAX2(log2i_ceil(table_size), 2); // 2 is minimum size even though some dictionaries only have one entry
size_t start_size_log_2 = MAX2(ceil_log2(table_size), 2); // 2 is minimum size even though some dictionaries only have one entry
size_t current_size = ((size_t)1) << start_size_log_2;
log_info(class, loader, data)("Dictionary start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
current_size, start_size_log_2);

View File

@ -309,7 +309,7 @@ public:
};
void StringTable::create_table() {
size_t start_size_log_2 = log2i_ceil(StringTableSize);
size_t start_size_log_2 = ceil_log2(StringTableSize);
_current_size = ((size_t)1) << start_size_log_2;
log_trace(stringtable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
_current_size, start_size_log_2);

View File

@ -212,7 +212,7 @@ private:
};
void SymbolTable::create_table () {
size_t start_size_log_2 = log2i_ceil(SymbolTableSize);
size_t start_size_log_2 = ceil_log2(SymbolTableSize);
_current_size = ((size_t)1) << start_size_log_2;
log_trace(symboltable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
_current_size, start_size_log_2);

View File

@ -105,13 +105,11 @@ static verify_byte_codes_fn_t verify_byte_codes_fn() {
// Methods in Verifier
// This method determines whether we run the verifier and class file format checking code.
bool Verifier::should_verify_for(oop class_loader) {
return class_loader == nullptr ?
BytecodeVerificationLocal : BytecodeVerificationRemote;
}
// This method determines whether we allow package access in access checks in reflection.
bool Verifier::relax_access_for(oop loader) {
bool trusted = java_lang_ClassLoader::is_trusted_loader(loader);
bool need_verify =
@ -122,21 +120,6 @@ bool Verifier::relax_access_for(oop loader) {
return !need_verify;
}
// Callers will pass should_verify_class as true, depending on the results of should_verify_for() above,
// or pass true for redefinition of any class.
static bool is_eligible_for_verification(InstanceKlass* klass, bool should_verify_class) {
Symbol* name = klass->name();
return (should_verify_class &&
// Can not verify the bytecodes for shared classes because they have
// already been rewritten to contain constant pool cache indices,
// which the verifier can't understand.
// Shared classes shouldn't have stackmaps either.
// However, bytecodes for shared old classes can be verified because
// they have not been rewritten.
!(klass->is_shared() && klass->is_rewritten()));
}
void Verifier::trace_class_resolution(Klass* resolve_class, InstanceKlass* verify_class) {
assert(verify_class != nullptr, "Unexpected null verify_class");
ResourceMark rm;
@ -290,6 +273,27 @@ bool Verifier::verify(InstanceKlass* klass, bool should_verify_class, TRAPS) {
}
}
bool Verifier::is_eligible_for_verification(InstanceKlass* klass, bool should_verify_class) {
Symbol* name = klass->name();
return (should_verify_class &&
// return if the class is a bootstrapping class
// or defineClass specified not to verify by default (flags override passed arg)
// We need to skip the following four for bootstraping
name != vmSymbols::java_lang_Object() &&
name != vmSymbols::java_lang_Class() &&
name != vmSymbols::java_lang_String() &&
name != vmSymbols::java_lang_Throwable() &&
// Can not verify the bytecodes for shared classes because they have
// already been rewritten to contain constant pool cache indices,
// which the verifier can't understand.
// Shared classes shouldn't have stackmaps either.
// However, bytecodes for shared old classes can be verified because
// they have not been rewritten.
!(klass->is_shared() && klass->is_rewritten()));
}
Symbol* Verifier::inference_verify(
InstanceKlass* klass, char* message, size_t message_len, TRAPS) {
JavaThread* thread = THREAD;

View File

@ -61,6 +61,7 @@ class Verifier : AllStatic {
static void trace_class_resolution(Klass* resolve_class, InstanceKlass* verify_class);
private:
static bool is_eligible_for_verification(InstanceKlass* klass, bool should_verify_class);
static Symbol* inference_verify(
InstanceKlass* klass, char* msg, size_t msg_len, TRAPS);
};

View File

@ -723,8 +723,6 @@ class SerializeClosure;
template(dumpSharedArchive_signature, "(ZLjava/lang/String;)Ljava/lang/String;") \
template(generateLambdaFormHolderClasses, "generateLambdaFormHolderClasses") \
template(generateLambdaFormHolderClasses_signature, "([Ljava/lang/String;)[Ljava/lang/Object;") \
template(getResourceAsByteArray_name, "getResourceAsByteArray") \
template(getResourceAsByteArray_signature, "(Ljava/lang/String;)[B") \
template(java_lang_Enum, "java/lang/Enum") \
template(java_lang_invoke_Invokers_Holder, "java/lang/invoke/Invokers$Holder") \
template(java_lang_invoke_DirectMethodHandle_Holder, "java/lang/invoke/DirectMethodHandle$Holder") \

View File

@ -219,22 +219,21 @@ bool MethodMatcher::match(Symbol* candidate, Symbol* match, Mode match_mode) con
static MethodMatcher::Mode check_mode(char name[], const char*& error_msg) {
int match = MethodMatcher::Exact;
size_t len = strlen(name);
if (name[0] == '*') {
if (len == 1) {
if (strlen(name) == 1) {
return MethodMatcher::Any;
}
match |= MethodMatcher::Suffix;
memmove(name, name + 1, len); // Include terminating nul in move.
len--;
memmove(name, name + 1, strlen(name + 1) + 1);
}
size_t len = strlen(name);
if (len > 0 && name[len - 1] == '*') {
match |= MethodMatcher::Prefix;
name[--len] = '\0';
}
if (len == 0) {
if (strlen(name) == 0) {
error_msg = "** Not a valid pattern";
return MethodMatcher::Any;
}

View File

@ -244,7 +244,10 @@ void G1Arguments::initialize() {
if (max_parallel_refinement_threads > UINT_MAX / divisor) {
vm_exit_during_initialization("Too large parallelism for remembered sets.");
}
}
void G1Arguments::initialize_heap_flags_and_sizes() {
GCArguments::initialize_heap_flags_and_sizes();
FullGCForwarding::initialize_flags(heap_reserved_size_bytes());
}

View File

@ -39,6 +39,7 @@ class G1Arguments : public GCArguments {
static void parse_verification_type(const char* type);
virtual void initialize_alignments();
virtual void initialize_heap_flags_and_sizes();
virtual void initialize();
virtual size_t conservative_max_heap_alignment();

View File

@ -83,8 +83,6 @@ void ParallelArguments::initialize() {
if (FLAG_IS_DEFAULT(ParallelRefProcEnabled) && ParallelGCThreads > 1) {
FLAG_SET_DEFAULT(ParallelRefProcEnabled, true);
}
FullGCForwarding::initialize_flags(heap_reserved_size_bytes());
}
// The alignment used for boundary between young gen and old gen
@ -130,6 +128,7 @@ void ParallelArguments::initialize_heap_flags_and_sizes() {
// Redo everything from the start
initialize_heap_flags_and_sizes_one_pass();
}
FullGCForwarding::initialize_flags(heap_reserved_size_bytes());
}
size_t ParallelArguments::heap_reserved_size_bytes() {

View File

@ -362,9 +362,8 @@ HeapWord* ParallelCompactData::summarize_split_space(size_t src_region,
split_info.record(split_region, overflowing_obj, preceding_live_words);
// The [overflowing_obj, src_region_start) part has been accounted for, so
// must move back the new_top, now that this overflowing obj is deferred.
HeapWord* new_top = destination - pointer_delta(region_to_addr(src_region), overflowing_obj);
HeapWord* src_region_start = region_to_addr(src_region);
HeapWord* new_top = destination - pointer_delta(src_region_start, overflowing_obj);
// If the overflowing obj was relocated to its original destination,
// those destination regions would have their source_region set. Now that
@ -891,7 +890,7 @@ void PSParallelCompact::summary_phase()
_summary_data.summarize_dense_prefix(old_space->bottom(), dense_prefix_end);
}
// Compacting objs in [dense_prefix_end, old_space->top())
// Compacting objs inn [dense_prefix_end, old_space->top())
_summary_data.summarize(_space_info[id].split_info(),
dense_prefix_end, old_space->top(), nullptr,
dense_prefix_end, old_space->end(),
@ -1597,9 +1596,9 @@ void PSParallelCompact::forward_to_new_addr() {
&start_region, &end_region);
for (size_t cur_region = start_region; cur_region < end_region; ++cur_region) {
RegionData* region_ptr = _summary_data.region(cur_region);
size_t partial_obj_size = region_ptr->partial_obj_size();
size_t live_words = region_ptr->partial_obj_size();
if (partial_obj_size == ParallelCompactData::RegionSize) {
if (live_words == ParallelCompactData::RegionSize) {
// No obj-start
continue;
}
@ -1607,18 +1606,19 @@ void PSParallelCompact::forward_to_new_addr() {
HeapWord* region_start = _summary_data.region_to_addr(cur_region);
HeapWord* region_end = region_start + ParallelCompactData::RegionSize;
if (split_info.is_split(cur_region)) {
// Part 1: will be relocated to space-1
HeapWord* preceding_destination = split_info.preceding_destination();
HeapWord* split_point = split_info.split_point();
forward_objs_in_range(cm, region_start + partial_obj_size, split_point, preceding_destination + partial_obj_size);
forward_objs_in_range(cm, region_start + live_words, split_point, preceding_destination + live_words);
// Part 2: will be relocated to space-2
HeapWord* destination = region_ptr->destination();
forward_objs_in_range(cm, split_point, region_end, destination);
} else {
HeapWord* destination = region_ptr->destination();
forward_objs_in_range(cm, region_start + partial_obj_size, region_end, destination + partial_obj_size);
forward_objs_in_range(cm, region_start + live_words, region_end, destination + live_words);
}
}
}
@ -1984,11 +1984,11 @@ HeapWord* PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_
}
}
// On starting to fill a destination region (dest-region), we need to know the
// location of the word that will be at the start of the dest-region after
// compaction. A dest-region can have one or more source regions, but only the
// first source-region contains this location. This location is retrieved by
// calling `first_src_addr` on a dest-region.
// On filling a destination region (dest-region), we need to know the location
// of the word that will be at the start of the dest-region after compaction.
// A dest-region can have one or more source regions, but only the first
// source-region contains this location. This location is retrieved by calling
// `first_src_addr` on a dest-region.
// Conversely, a source-region has a dest-region which holds the destination of
// the first live word on this source-region, based on which the destination
// for the rest of live words can be derived.
@ -2017,9 +2017,9 @@ HeapWord* PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_
// ^ ^
// | old-space-end | eden-space-start
//
// Therefore, in this example, region-n will have two dest-regions:
// 1. the final region in old-space
// 2. the first region in eden-space.
// Therefore, in this example, region-n will have two dest-regions, one for
// the final region in old-space and the other for the first region in
// eden-space.
// To handle this special case, we introduce the concept of split-region, whose
// contents are relocated to two spaces. `SplitInfo` captures all necessary
// info about the split, the first part, spliting-point, and the second part.

View File

@ -24,12 +24,12 @@
#include "precompiled.hpp"
#include "gc/shared/fullGCForwarding.hpp"
#include "gc/shared/gcArguments.hpp"
#include "gc/shared/genArguments.hpp"
#include "gc/serial/serialArguments.hpp"
#include "gc/serial/serialHeap.hpp"
void SerialArguments::initialize() {
GCArguments::initialize();
void SerialArguments::initialize_heap_flags_and_sizes() {
GenArguments::initialize_heap_flags_and_sizes();
FullGCForwarding::initialize_flags(MaxHeapSize);
}

View File

@ -31,8 +31,8 @@ class CollectedHeap;
class SerialArguments : public GenArguments {
private:
virtual void initialize();
virtual CollectedHeap* create_heap();
virtual void initialize_heap_flags_and_sizes();
};
#endif // SHARE_GC_SERIAL_SERIALARGUMENTS_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,7 +54,7 @@ bool BlockLocationPrinter<CollectedHeapT>::print_location(outputStream* st, void
// Check if addr points into Java heap.
bool in_heap = CollectedHeapT::heap()->is_in(addr);
if (in_heap) {
// base_oop_or_null() might be unimplemented and return null for some GCs/generations
// base_oop_or_null() might be unimplemented and return NULL for some GCs/generations
oop o = base_oop_or_null(addr);
if (o != nullptr) {
if ((void*)o == addr) {

View File

@ -176,8 +176,6 @@ void ShenandoahArguments::initialize() {
if (FLAG_IS_DEFAULT(TLABAllocationWeight)) {
FLAG_SET_DEFAULT(TLABAllocationWeight, 90);
}
FullGCForwarding::initialize_flags(MaxHeapSize);
}
size_t ShenandoahArguments::conservative_max_heap_alignment() {
@ -201,6 +199,11 @@ void ShenandoahArguments::initialize_alignments() {
HeapAlignment = align;
}
void ShenandoahArguments::initialize_heap_flags_and_sizes() {
GCArguments::initialize_heap_flags_and_sizes();
FullGCForwarding::initialize_flags(MaxHeapSize);
}
CollectedHeap* ShenandoahArguments::create_heap() {
return new ShenandoahHeap(new ShenandoahCollectorPolicy());
}

View File

@ -35,6 +35,7 @@ private:
virtual void initialize();
virtual size_t conservative_max_heap_alignment();
virtual void initialize_heap_flags_and_sizes();
virtual CollectedHeap* create_heap();
};

View File

@ -185,33 +185,33 @@ void ShenandoahPhaseTimings::flush_par_workers_to_cycle() {
for (uint pi = 0; pi < _num_phases; pi++) {
Phase phase = Phase(pi);
if (is_worker_phase(phase)) {
double sum = uninitialized();
double s = uninitialized();
for (uint i = 1; i < _num_par_phases; i++) {
ShenandoahWorkerData* wd = worker_data(phase, ParPhase(i));
double worker_sum = uninitialized();
double ws = uninitialized();
for (uint c = 0; c < _max_workers; c++) {
double worker_time = wd->get(c);
if (worker_time != ShenandoahWorkerData::uninitialized()) {
if (worker_sum == uninitialized()) {
worker_sum = worker_time;
double v = wd->get(c);
if (v != ShenandoahWorkerData::uninitialized()) {
if (ws == uninitialized()) {
ws = v;
} else {
worker_sum += worker_time;
ws += v;
}
}
}
if (worker_sum != uninitialized()) {
if (ws != uninitialized()) {
// add to each line in phase
set_cycle_data(Phase(phase + i + 1), worker_sum);
if (sum == uninitialized()) {
sum = worker_sum;
set_cycle_data(Phase(phase + i + 1), ws);
if (s == uninitialized()) {
s = ws;
} else {
sum += worker_sum;
s += ws;
}
}
}
if (sum != uninitialized()) {
if (s != uninitialized()) {
// add to total for phase
set_cycle_data(Phase(phase + 1), sum);
set_cycle_data(Phase(phase + 1), s);
}
}
}

View File

@ -1147,6 +1147,13 @@ JVM_GetClassFileVersion(JNIEnv *env, jclass current);
JNIEXPORT jboolean JNICALL
JVM_PrintWarningAtDynamicAgentLoad(void);
#define JNI_ONLOAD_SYMBOLS {"JNI_OnLoad"}
#define JNI_ONUNLOAD_SYMBOLS {"JNI_OnUnload"}
#define JVM_ONLOAD_SYMBOLS {"JVM_OnLoad"}
#define AGENT_ONLOAD_SYMBOLS {"Agent_OnLoad"}
#define AGENT_ONUNLOAD_SYMBOLS {"Agent_OnUnload"}
#define AGENT_ONATTACH_SYMBOLS {"Agent_OnAttach"}
/*
* This structure is used by the launcher to get the default thread
* stack size from the VM using JNI_GetDefaultJavaVMInitArgs() with a

View File

@ -323,9 +323,6 @@ void LinkResolver::check_klass_accessibility(Klass* ref_klass, Klass* sel_klass,
char* msg = Reflection::verify_class_access_msg(ref_klass,
InstanceKlass::cast(base_klass),
vca_result);
// Names are all known to be < 64k so we know this formatted message is not excessively large.
bool same_module = (base_klass->module() == ref_klass->module());
if (msg == nullptr) {
Exceptions::fthrow(
@ -618,7 +615,6 @@ void LinkResolver::check_method_accessability(Klass* ref_klass,
print_nest_host_error_on(&ss, ref_klass, sel_klass);
}
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(THREAD_AND_LOCATION,
vmSymbols::java_lang_IllegalAccessError(),
"%s",
@ -972,7 +968,6 @@ void LinkResolver::check_field_accessability(Klass* ref_klass,
if (fd.is_private()) {
print_nest_host_error_on(&ss, ref_klass, sel_klass);
}
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(THREAD_AND_LOCATION,
vmSymbols::java_lang_IllegalAccessError(),
"%s",
@ -1192,7 +1187,6 @@ Method* LinkResolver::linktime_resolve_special_method(const LinkInfo& link_info,
ss.print(" %s(", resolved_method->name()->as_C_string());
resolved_method->signature()->print_as_signature_external_parameters(&ss);
ss.print(")' not found");
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_NoSuchMethodError(),

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,6 +59,10 @@ class JfrStartFlightRecordingDCmd : public JfrDCmd {
static const char* impact() {
return "Medium: Depending on the settings for a recording, the impact can range from low to high.";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr};
return p;
}
virtual const char* javaClass() const {
return "jdk/jfr/internal/dcmd/DCmdStart";
}
@ -80,6 +84,10 @@ class JfrDumpFlightRecordingDCmd : public JfrDCmd {
static const char* impact() {
return "Low";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr};
return p;
}
virtual const char* javaClass() const {
return "jdk/jfr/internal/dcmd/DCmdDump";
}
@ -101,6 +109,10 @@ class JfrCheckFlightRecordingDCmd : public JfrDCmd {
static const char* impact() {
return "Low";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr};
return p;
}
virtual const char* javaClass() const {
return "jdk/jfr/internal/dcmd/DCmdCheck";
}
@ -122,6 +134,10 @@ class JfrStopFlightRecordingDCmd : public JfrDCmd {
static const char* impact() {
return "Low";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr};
return p;
}
virtual const char* javaClass() const {
return "jdk/jfr/internal/dcmd/DCmdStop";
}
@ -143,6 +159,10 @@ class JfrViewFlightRecordingDCmd : public JfrDCmd {
static const char* impact() {
return "Medium";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr};
return p;
}
virtual const char* javaClass() const {
return "jdk/jfr/internal/dcmd/DCmdView";
}
@ -164,6 +184,10 @@ class JfrQueryFlightRecordingDCmd : public JfrDCmd {
static const char* impact() {
return "Medium";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr};
return p;
}
virtual const char* javaClass() const {
return "jdk/jfr/internal/dcmd/DCmdQuery";
}
@ -201,6 +225,10 @@ class JfrConfigureFlightRecorderDCmd : public DCmdWithParser {
static const char* impact() {
return "Low";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr};
return p;
}
static int num_arguments() { return 10; }
virtual void execute(DCmdSource source, TRAPS);
virtual void print_help(const char* name) const;

View File

@ -258,25 +258,12 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, bool
// quick reject, will not fit
return;
}
ObjectSample* popped = _priority_queue->pop();
size_t popped_span = popped->span();
ObjectSample* previous = popped->prev();
sample = _list->reuse(popped);
assert(sample != nullptr, "invariant");
if (previous != nullptr) {
push_span(previous, popped_span);
sample->set_span(span);
} else {
// The removed sample was the youngest sample in the list, which means the new sample is now the youngest
// sample. It should cover the spans of both.
sample->set_span(span + popped_span);
}
sample = _list->reuse(_priority_queue->pop());
} else {
sample = _list->get();
assert(sample != nullptr, "invariant");
sample->set_span(span);
}
assert(sample != nullptr, "invariant");
signal_unresolved_entry();
sample->set_thread_id(thread_id);
if (virtual_thread) {
@ -291,6 +278,7 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, bool
sample->set_stack_trace_hash(stacktrace_hash);
}
sample->set_span(allocated);
sample->set_object(cast_to_oop(obj));
sample->set_allocated(allocated);
sample->set_allocation_time(JfrTicks::now());
@ -317,18 +305,14 @@ void ObjectSampler::remove_dead(ObjectSample* sample) {
ObjectSample* const previous = sample->prev();
// push span onto previous
if (previous != nullptr) {
push_span(previous, sample->span());
_priority_queue->remove(previous);
previous->add_span(sample->span());
_priority_queue->push(previous);
}
_priority_queue->remove(sample);
_list->release(sample);
}
void ObjectSampler::push_span(ObjectSample* sample, size_t span) {
_priority_queue->remove(sample);
sample->add_span(span);
_priority_queue->push(sample);
}
ObjectSample* ObjectSampler::last() const {
return _list->last();
}

View File

@ -64,7 +64,6 @@ class ObjectSampler : public CHeapObj<mtTracing> {
void add(HeapWord* object, size_t size, traceid thread_id, bool virtual_thread, const JfrBlobHandle& bh, JavaThread* thread);
void scavenge();
void remove_dead(ObjectSample* sample);
void push_span(ObjectSample* sample, size_t span);
const ObjectSample* item_at(int index) const;
ObjectSample* item_at(int index);

View File

@ -58,6 +58,11 @@ class LogDiagnosticCommand : public DCmdWithParser {
static const char* description() {
return "Lists current log configuration, enables/disables/configures a log output, or rotates all logs.";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "control", nullptr};
return p;
}
};
#endif // SHARE_LOGGING_LOGDIAGNOSTICCOMMAND_HPP

View File

@ -353,7 +353,7 @@ class MetaspaceObj {
void* operator new(size_t size, ClassLoaderData* loader_data,
size_t word_size,
Type type) throw();
void operator delete(void* p) = delete;
void operator delete(void* p) { ShouldNotCallThis(); }
// Declare a *static* method with the same signature in any subclass of MetaspaceObj
// that should be read-only by default. See symbol.hpp for an example. This function

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -52,6 +52,11 @@ public:
static const char* impact() {
return "Medium: Depends on number of classes loaded.";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission",
"monitor", nullptr};
return p;
}
static int num_arguments() { return 8; }
virtual void execute(DCmdSource source, TRAPS);
};

View File

@ -62,7 +62,6 @@
#include "oops/instanceMirrorKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/objLayout.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oopHandle.inline.hpp"
#include "oops/typeArrayKlass.hpp"
@ -869,8 +868,6 @@ jint universe_init() {
// Initialize CPUTimeCounters object, which must be done before creation of the heap.
CPUTimeCounters::initialize();
ObjLayout::initialize();
#ifdef _LP64
MetaspaceShared::adjust_heap_sizes_for_dumping();
#endif // _LP64

View File

@ -179,11 +179,15 @@ const GrowableArrayCHeap<MemoryFileTracker::MemoryFile*, mtNMT>& MemoryFileTrack
};
void MemoryFileTracker::summary_snapshot(VirtualMemorySnapshot* snapshot) const {
iterate_summary([&](MemTag tag, const VirtualMemory* current) {
VirtualMemory* snap = snapshot->by_type(tag);
// Only account the committed memory.
snap->commit_memory(current->committed());
});
for (int d = 0; d < _files.length(); d++) {
const MemoryFile* file = _files.at(d);
for (int i = 0; i < mt_number_of_tags; i++) {
VirtualMemory* snap = snapshot->by_type(NMTUtil::index_to_tag(i));
const VirtualMemory* current = file->_summary.by_type(NMTUtil::index_to_tag(i));
// Only account the committed memory.
snap->commit_memory(current->committed());
}
}
}
void MemoryFileTracker::Instance::summary_snapshot(VirtualMemorySnapshot* snapshot) {

View File

@ -39,8 +39,6 @@
// The MemoryFileTracker tracks memory of 'memory files',
// storage with its own memory space separate from the process.
// A typical example of such a file is a memory mapped file.
// All memory is accounted as committed, there is no reserved memory.
// Any reserved memory is expected to exist in the VirtualMemoryTracker.
class MemoryFileTracker {
friend class NMTMemoryFileTrackerTest;
@ -74,16 +72,6 @@ public:
MemoryFile* make_file(const char* descriptive_name);
void free_file(MemoryFile* file);
template<typename F>
void iterate_summary(F f) const {
for (int d = 0; d < _files.length(); d++) {
const MemoryFile* file = _files.at(d);
for (int i = 0; i < mt_number_of_tags; i++) {
f(NMTUtil::index_to_tag(i), file->_summary.by_type(NMTUtil::index_to_tag(i)));
}
}
}
void summary_snapshot(VirtualMemorySnapshot* snapshot) const;
// Print detailed report of file
@ -111,11 +99,6 @@ public:
const NativeCallStack& stack, MemTag mem_tag);
static void free_memory(MemoryFile* device, size_t offset, size_t size);
template<typename F>
static void iterate_summary(F f) {
_tracker->iterate_summary(f);
};
static void summary_snapshot(VirtualMemorySnapshot* snapshot);
static void print_report_on(const MemoryFile* device, outputStream* stream, size_t scale);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,6 +53,11 @@ class NMTDCmd: public DCmdWithParser {
static const char* impact() {
return "Medium";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission",
"monitor", nullptr};
return p;
}
virtual void execute(DCmdSource source, TRAPS);
private:

View File

@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "nmt/mallocTracker.hpp"
#include "nmt/memoryFileTracker.hpp"
#include "nmt/nmtCommon.hpp"
#include "nmt/nmtUsage.hpp"
#include "nmt/threadStackTracker.hpp"
@ -91,16 +90,6 @@ void NMTUsage::update_vm_usage() {
_vm_total.reserved += vm->reserved();
_vm_total.committed += vm->committed();
}
{ // MemoryFileTracker addition
using MFT = MemoryFileTracker::Instance;
MFT::Locker lock;
MFT::iterate_summary([&](MemTag tag, const VirtualMemory* vm) {
int i = NMTUtil::tag_to_index(tag);
_vm_by_type[i].committed += vm->committed();
_vm_total.committed += vm->committed();
});
}
}
void NMTUsage::refresh() {

View File

@ -26,7 +26,6 @@
#define SHARE_NMT_NMTUSAGE_HPP
#include "memory/allocation.hpp"
#include "nmt/memTag.hpp"
#include "utilities/globalDefinitions.hpp"
struct NMTUsagePair {

View File

@ -428,7 +428,7 @@ void ConstantPool::restore_unshareable_info(TRAPS) {
assert(is_shared(), "should always be set for shared constant pools");
if (is_for_method_handle_intrinsic()) {
// See the same check in remove_unshareable_info() below.
assert(cache() == nullptr, "must not have cpCache");
assert(cache() == NULL, "must not have cpCache");
return;
}
assert(_cache != nullptr, "constant pool _cache should not be null");
@ -474,7 +474,7 @@ void ConstantPool::remove_unshareable_info() {
// This CP was created by Method::make_method_handle_intrinsic() and has nothing
// that need to be removed/restored. It has no cpCache since the intrinsic methods
// don't have any bytecodes.
assert(cache() == nullptr, "must not have cpCache");
assert(cache() == NULL, "must not have cpCache");
return;
}
@ -1266,7 +1266,6 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp,
cp_index,
callee->is_interface() ? "CONSTANT_MethodRef" : "CONSTANT_InterfaceMethodRef",
callee->is_interface() ? "CONSTANT_InterfaceMethodRef" : "CONSTANT_MethodRef");
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_IncompatibleClassChangeError(), "%s", ss.as_string());
save_and_throw_exception(this_cp, cp_index, tag, CHECK_NULL);
}

View File

@ -829,15 +829,9 @@ oop ConstantPoolCache::appendix_if_resolved(ResolvedMethodEntry* method_entry) c
void ConstantPoolCache::print_on(outputStream* st) const {
st->print_cr("%s", internal_name());
// print constant pool cache entries
if (_resolved_field_entries != nullptr) {
print_resolved_field_entries(st);
}
if (_resolved_method_entries != nullptr) {
print_resolved_method_entries(st);
}
if (_resolved_indy_entries != nullptr) {
print_resolved_indy_entries(st);
}
print_resolved_field_entries(st);
print_resolved_method_entries(st);
print_resolved_indy_entries(st);
}
void ConstantPoolCache::print_resolved_field_entries(outputStream* st) const {

View File

@ -899,7 +899,6 @@ bool InstanceKlass::link_class_impl(TRAPS) {
// if we are executing Java code. This is not a problem for CDS dumping phase since
// it doesn't execute any Java code.
ResourceMark rm(THREAD);
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(THREAD_AND_LOCATION,
vmSymbols::java_lang_NoClassDefFoundError(),
"Class %s, or one of its supertypes, failed class initialization",
@ -920,7 +919,6 @@ bool InstanceKlass::link_class_impl(TRAPS) {
if (super_klass != nullptr) {
if (super_klass->is_interface()) { // check if super class is an interface
ResourceMark rm(THREAD);
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_IncompatibleClassChangeError(),
@ -3288,7 +3286,6 @@ InstanceKlass* InstanceKlass::compute_enclosing_class(bool* inner_is_member, TRA
// If the outer class is not an instance klass then it cannot have
// declared any inner classes.
ResourceMark rm(THREAD);
// Names are all known to be < 64k so we know this formatted message is not excessively large.
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_IncompatibleClassChangeError(),

View File

@ -45,6 +45,7 @@
class ConstantPool;
class DeoptimizationScope;
class klassItable;
class Monitor;
class RecordComponent;
// An InstanceKlass is the VM level representation of a Java class.
@ -67,6 +68,7 @@ class ClassFileStream;
class KlassDepChange;
class DependencyContext;
class fieldDescriptor;
class jniIdMapBase;
class JNIid;
class JvmtiCachedClassFieldMap;
class nmethodBucket;

View File

@ -25,6 +25,8 @@
#ifndef SHARE_OOPS_KLASS_HPP
#define SHARE_OOPS_KLASS_HPP
#include "memory/iterator.hpp"
#include "memory/memRegion.hpp"
#include "oops/klassFlags.hpp"
#include "oops/markWord.hpp"
#include "oops/metadata.hpp"
@ -58,6 +60,8 @@ class fieldDescriptor;
class klassVtable;
class ModuleEntry;
class PackageEntry;
class ParCompactionManager;
class PSPromotionManager;
class vtableEntry;
class Klass : public Metadata {

View File

@ -335,8 +335,7 @@ int Method::bci_from(address bcp) const {
int Method::validate_bci(int bci) const {
// Called from the verifier, and should return -1 if not valid.
return ((is_native() && bci == 0) || (!is_native() && 0 <= bci && bci < code_size())) ? bci : -1;
return (bci == 0 || bci < code_size()) ? bci : -1;
}
// Return bci if it appears to be a valid bcp

View File

@ -32,6 +32,7 @@
#include "oops/methodFlags.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/oop.hpp"
#include "oops/typeArrayOop.hpp"
#include "utilities/accessFlags.hpp"
#include "utilities/align.hpp"
#include "utilities/growableArray.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -181,11 +181,10 @@ int Block::is_Empty() const {
return success_result;
}
// Ideal nodes (except BoxLock) are allowable in empty blocks: skip them. Only
// Mach and BoxLock nodes turn directly into code via emit().
while ((end_idx > 0) &&
!get_node(end_idx)->is_Mach() &&
!get_node(end_idx)->is_BoxLock()) {
// Ideal nodes are allowable in empty blocks: skip them Only MachNodes
// turn directly into code, because only MachNodes have non-trivial
// emit() functions.
while ((end_idx > 0) && !get_node(end_idx)->is_Mach()) {
end_idx--;
}

View File

@ -355,12 +355,6 @@
product(bool, SuperWordReductions, true, \
"Enable reductions support in superword.") \
\
product_pd(uint, SuperWordStoreToLoadForwardingFailureDetection, DIAGNOSTIC, \
"if >0, auto-vectorization detects possible store-to-load " \
"forwarding failures. The number specifies over how many " \
"loop iterations this detection spans.") \
range(0, 4096) \
\
product(bool, UseCMoveUnconditionally, false, \
"Use CMove (scalar and vector) ignoring profitability test.") \
\

View File

@ -956,6 +956,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// Each entry is reg_pressure_per_value,number_of_regs
// RegL RegI RegFlags RegF RegD INTPRESSURE FLOATPRESSURE
// IA32 2 1 1 1 1 6 6
// IA64 1 1 1 1 1 50 41
// SPARC 2 2 2 2 2 48 (24) 52 (26)
// SPARCV9 2 2 2 2 2 48 (24) 52 (26)
// AMD64 1 1 1 1 1 14 15

View File

@ -4335,9 +4335,6 @@ Compile::TracePhase::TracePhase(PhaseTraceId id)
Compile::TracePhase::~TracePhase() {
if (_compile->failing_internal()) {
if (_log != nullptr) {
_log->done("phase");
}
return; // timing code, not stressing bailouts.
}
#ifdef ASSERT

View File

@ -224,6 +224,10 @@ void GraphKit::gen_stub(address C_function,
store_to_memory(control(), adr_sp, null(), T_ADDRESS, MemNode::unordered);
// Clear last_Java_pc
store_to_memory(control(), adr_last_Java_pc, null(), T_ADDRESS, MemNode::unordered);
#if (defined(IA64) && !defined(AIX))
Node* adr_last_Java_fp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_fp_offset()));
store_to_memory(control(), adr_last_Java_fp, null(), T_ADDRESS, MemNode::unordered);
#endif
// For is-fancy-jump, the C-return value is also the branch target
Node* target = map()->in(TypeFunc::Parms);

Some files were not shown because too many files have changed in this diff Show More