From 44ec501a41f4794259dd03cd168838e79334890e Mon Sep 17 00:00:00 2001 From: Roman Kennke Date: Fri, 8 Nov 2024 17:21:39 +0000 Subject: [PATCH] 8305895: Implement JEP 450: Compact Object Headers (Experimental) Co-authored-by: Sandhya Viswanathan Co-authored-by: Martin Doerr Co-authored-by: Hamlin Li Co-authored-by: Thomas Stuefe Co-authored-by: Amit Kumar Co-authored-by: Stefan Karlsson Co-authored-by: Coleen Phillimore Co-authored-by: Axel Boldt-Christmas Reviewed-by: coleenp, stefank, stuefe, phh, ihse, lmesnik, tschatzl, matsaave, rcastanedalo, vpaprotski, yzheng, egahlin --- make/Images.gmk | 20 +- make/autoconf/configure.ac | 1 + make/autoconf/jdk-options.m4 | 31 ++ make/autoconf/spec.gmk.template | 1 + src/hotspot/cpu/aarch64/aarch64.ad | 20 +- .../cpu/aarch64/c1_LIRAssembler_aarch64.cpp | 47 +- .../cpu/aarch64/c1_MacroAssembler_aarch64.cpp | 20 +- .../cpu/aarch64/c2_MacroAssembler_aarch64.cpp | 9 + .../cpu/aarch64/c2_MacroAssembler_aarch64.hpp | 2 + .../cpu/aarch64/compressedKlass_aarch64.cpp | 16 - .../cpu/aarch64/macroAssembler_aarch64.cpp | 77 +++- .../cpu/aarch64/macroAssembler_aarch64.hpp | 4 +- .../cpu/aarch64/templateTable_aarch64.cpp | 19 +- src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp | 58 +-- src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp | 15 +- src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp | 9 + src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp | 2 + src/hotspot/cpu/ppc/macroAssembler_ppc.cpp | 62 ++- src/hotspot/cpu/ppc/macroAssembler_ppc.hpp | 3 + src/hotspot/cpu/ppc/ppc.ad | 15 + src/hotspot/cpu/ppc/templateTable_ppc_64.cpp | 20 +- .../riscv/c1_LIRAssembler_arraycopy_riscv.cpp | 27 +- .../cpu/riscv/c1_LIRAssembler_riscv.cpp | 7 +- .../cpu/riscv/c1_MacroAssembler_riscv.cpp | 22 +- .../cpu/riscv/c2_MacroAssembler_riscv.cpp | 10 + .../cpu/riscv/c2_MacroAssembler_riscv.hpp | 2 + .../cpu/riscv/compressedKlass_riscv.cpp | 4 +- .../cpu/riscv/macroAssembler_riscv.cpp | 58 ++- .../cpu/riscv/macroAssembler_riscv.hpp | 3 +- src/hotspot/cpu/riscv/riscv.ad | 16 + src/hotspot/cpu/riscv/templateTable_riscv.cpp | 27 +- src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp | 27 +- .../cpu/s390/c1_MacroAssembler_s390.cpp | 14 +- .../cpu/s390/c2_MacroAssembler_s390.cpp | 7 + .../cpu/s390/c2_MacroAssembler_s390.hpp | 2 + src/hotspot/cpu/s390/macroAssembler_s390.cpp | 82 +++- src/hotspot/cpu/s390/macroAssembler_s390.hpp | 7 + src/hotspot/cpu/s390/s390.ad | 16 + src/hotspot/cpu/s390/templateTable_s390.cpp | 27 +- src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp | 30 +- src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp | 16 +- src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp | 10 + src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp | 4 + .../x86/c2_stubGenerator_x86_64_string.cpp | 178 +++++--- src/hotspot/cpu/x86/macroAssembler_x86.cpp | 101 ++++- src/hotspot/cpu/x86/macroAssembler_x86.hpp | 11 + src/hotspot/cpu/x86/matcher_x86.hpp | 2 +- src/hotspot/cpu/x86/stubRoutines_x86.hpp | 2 +- src/hotspot/cpu/x86/templateTable_x86.cpp | 31 +- src/hotspot/cpu/x86/x86_64.ad | 15 + src/hotspot/share/cds/archiveBuilder.cpp | 56 ++- src/hotspot/share/cds/archiveBuilder.hpp | 30 +- src/hotspot/share/cds/archiveHeapWriter.cpp | 28 +- src/hotspot/share/cds/archiveHeapWriter.hpp | 11 - src/hotspot/share/cds/archiveUtils.cpp | 9 +- src/hotspot/share/cds/archiveUtils.hpp | 5 +- src/hotspot/share/cds/cdsConfig.cpp | 21 +- src/hotspot/share/cds/dumpAllocStats.cpp | 12 + src/hotspot/share/cds/dumpAllocStats.hpp | 3 + src/hotspot/share/cds/filemap.cpp | 73 +++- src/hotspot/share/cds/filemap.hpp | 6 + src/hotspot/share/cds/metaspaceShared.cpp | 35 +- src/hotspot/share/ci/ciKlass.cpp | 20 + src/hotspot/share/ci/ciKlass.hpp | 3 + .../share/classfile/classFileParser.cpp | 9 + .../share/classfile/classFileParser.hpp | 4 + .../classfile/systemDictionaryShared.cpp | 16 +- src/hotspot/share/gc/g1/g1Arguments.cpp | 2 + src/hotspot/share/gc/g1/g1CollectedHeap.cpp | 4 +- .../share/gc/g1/g1FullGCCompactTask.cpp | 13 +- .../share/gc/g1/g1FullGCCompactionPoint.cpp | 13 +- .../gc/g1/g1FullGCOopClosures.inline.hpp | 7 +- .../gc/g1/g1FullGCPrepareTask.inline.hpp | 7 +- src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp | 4 +- src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp | 3 +- .../share/gc/g1/g1OopClosures.inline.hpp | 2 +- .../share/gc/g1/g1ParScanThreadState.cpp | 41 +- .../share/gc/g1/g1ParScanThreadState.hpp | 11 +- src/hotspot/share/gc/g1/g1YoungCollector.cpp | 1 - .../gc/g1/g1YoungGCPostEvacuateTasks.cpp | 27 +- .../gc/g1/g1YoungGCPostEvacuateTasks.hpp | 4 +- .../share/gc/parallel/mutableSpace.cpp | 15 +- .../share/gc/parallel/parallelArguments.cpp | 4 +- .../gc/parallel/parallelScavengeHeap.cpp | 3 + .../share/gc/parallel/psParallelCompact.cpp | 22 +- .../gc/parallel/psParallelCompact.inline.hpp | 5 +- .../share/gc/parallel/psPromotionManager.cpp | 3 +- .../share/gc/parallel/psPromotionManager.hpp | 2 +- .../gc/parallel/psPromotionManager.inline.hpp | 30 +- .../share/gc/serial/defNewGeneration.cpp | 23 +- .../share/gc/serial/defNewGeneration.hpp | 6 - .../share/gc/serial/serialArguments.cpp | 6 + .../share/gc/serial/serialArguments.hpp | 1 + src/hotspot/share/gc/serial/serialFullGC.cpp | 15 +- src/hotspot/share/gc/serial/serialHeap.cpp | 3 + .../share/gc/shared/c2/barrierSetC2.cpp | 5 +- src/hotspot/share/gc/shared/collectedHeap.cpp | 22 +- src/hotspot/share/gc/shared/collectedHeap.hpp | 2 +- .../share/gc/shared/fullGCForwarding.cpp | 57 +++ .../share/gc/shared/fullGCForwarding.hpp | 59 +++ .../gc/shared/fullGCForwarding.inline.hpp | 60 +++ src/hotspot/share/gc/shared/memAllocator.cpp | 13 +- .../share/gc/shared/preservedMarks.cpp | 7 +- .../gc/shenandoah/shenandoahArguments.cpp | 6 + .../gc/shenandoah/shenandoahArguments.hpp | 1 + .../share/gc/shenandoah/shenandoahAsserts.cpp | 4 +- .../gc/shenandoah/shenandoahForwarding.hpp | 2 + .../shenandoahForwarding.inline.hpp | 17 + .../share/gc/shenandoah/shenandoahFullGC.cpp | 17 +- .../share/gc/shenandoah/shenandoahHeap.cpp | 5 +- .../gc/shenandoah/shenandoahHeap.inline.hpp | 2 +- .../gc/shenandoah/shenandoahVerifier.cpp | 22 +- src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp | 2 +- src/hotspot/share/gc/z/zObjArrayAllocator.cpp | 14 +- .../interpreter/zero/bytecodeInterpreter.cpp | 11 +- .../chains/objectSampleMarker.hpp | 4 +- .../jfr/support/jfrObjectAllocationSample.cpp | 8 +- src/hotspot/share/jvmci/jvmciCompilerToVM.hpp | 3 + .../share/jvmci/jvmciCompilerToVMInit.cpp | 5 + src/hotspot/share/jvmci/vmStructs_jvmci.cpp | 6 +- .../share/memory/classLoaderMetaspace.cpp | 72 ++- .../share/memory/classLoaderMetaspace.hpp | 11 +- src/hotspot/share/memory/metaspace.cpp | 37 +- src/hotspot/share/memory/metaspace.hpp | 13 +- .../share/memory/metaspace/binList.hpp | 26 +- .../share/memory/metaspace/blockTree.cpp | 12 +- .../share/memory/metaspace/blockTree.hpp | 31 +- .../share/memory/metaspace/freeBlocks.cpp | 31 +- .../share/memory/metaspace/freeBlocks.hpp | 12 +- .../share/memory/metaspace/metablock.hpp | 76 ++++ .../memory/metaspace/metablock.inline.hpp | 87 ++++ .../share/memory/metaspace/metaspaceArena.cpp | 224 ++++++---- .../share/memory/metaspace/metaspaceArena.hpp | 49 ++- .../memory/metaspace/metaspaceCommon.hpp | 6 +- .../memory/metaspace/metaspaceContext.cpp | 14 +- .../memory/metaspace/metaspaceContext.hpp | 12 +- .../memory/metaspace/metaspaceReporter.cpp | 6 +- .../memory/metaspace/metaspaceStatistics.cpp | 6 +- .../memory/metaspace/runningCounters.cpp | 12 +- .../memory/metaspace/runningCounters.hpp | 14 +- .../share/memory/metaspace/testHelpers.cpp | 27 +- .../share/memory/metaspace/testHelpers.hpp | 10 +- src/hotspot/share/memory/universe.cpp | 9 +- src/hotspot/share/oops/arrayOop.hpp | 8 +- src/hotspot/share/oops/compressedKlass.cpp | 235 ++++++++-- src/hotspot/share/oops/compressedKlass.hpp | 119 ++++- .../share/oops/compressedKlass.inline.hpp | 69 +-- src/hotspot/share/oops/instanceKlass.cpp | 7 +- src/hotspot/share/oops/instanceOop.hpp | 13 +- src/hotspot/share/oops/klass.cpp | 34 +- src/hotspot/share/oops/klass.hpp | 10 + src/hotspot/share/oops/klass.inline.hpp | 27 ++ src/hotspot/share/oops/markWord.cpp | 6 + src/hotspot/share/oops/markWord.hpp | 60 ++- src/hotspot/share/oops/markWord.inline.hpp | 81 ++++ src/hotspot/share/oops/objArrayKlass.cpp | 5 +- .../share/oops/objArrayKlass.inline.hpp | 4 +- src/hotspot/share/oops/oop.cpp | 5 +- src/hotspot/share/oops/oop.hpp | 52 ++- src/hotspot/share/oops/oop.inline.hpp | 90 +++- src/hotspot/share/oops/typeArrayKlass.cpp | 5 +- src/hotspot/share/opto/callnode.cpp | 10 +- src/hotspot/share/opto/compile.cpp | 4 + src/hotspot/share/opto/lcm.cpp | 4 +- src/hotspot/share/opto/library_call.cpp | 1 - src/hotspot/share/opto/macro.cpp | 4 +- src/hotspot/share/opto/memnode.cpp | 17 +- src/hotspot/share/runtime/arguments.cpp | 47 +- src/hotspot/share/runtime/arguments.hpp | 3 +- src/hotspot/share/runtime/globals.hpp | 4 + src/hotspot/share/runtime/vmStructs.cpp | 3 +- .../jvm/hotspot/debugger/DebuggerBase.java | 13 +- .../classes/sun/jvm/hotspot/oops/Array.java | 4 +- .../sun/jvm/hotspot/oops/Instance.java | 4 +- .../classes/sun/jvm/hotspot/oops/Mark.java | 13 + .../classes/sun/jvm/hotspot/oops/Oop.java | 32 +- .../classes/sun/jvm/hotspot/runtime/VM.java | 10 + .../utilities/RobustOopDeterminator.java | 27 +- .../jdk/vm/ci/hotspot/HotSpotVMConfig.java | 2 +- .../hotspot/SharedLibraryJVMCIReflection.java | 6 +- .../gtest/gc/shared/test_preservedMarks.cpp | 69 ++- .../metaspace/metaspaceGtestContexts.hpp | 8 +- test/hotspot/gtest/metaspace/test_binlist.cpp | 33 +- .../gtest/metaspace/test_blocktree.cpp | 31 +- test/hotspot/gtest/metaspace/test_clms.cpp | 409 ++++++++++++++++++ .../gtest/metaspace/test_freeblocks.cpp | 189 +------- .../gtest/metaspace/test_metablock.cpp | 97 +++++ .../gtest/metaspace/test_metaspaceUtils.cpp | 14 +- .../gtest/metaspace/test_metaspacearena.cpp | 289 ++++++++++--- .../metaspace/test_metaspacearena_stress.cpp | 41 +- test/hotspot/gtest/oops/test_arrayOop.cpp | 18 +- .../gtest/oops/test_compressedKlass.cpp | 35 +- test/hotspot/gtest/oops/test_objArrayOop.cpp | 41 +- test/hotspot/gtest/oops/test_typeArrayOop.cpp | 6 +- test/hotspot/jtreg/ProblemList.txt | 4 + .../TestVectorizationMismatchedAccess.java | 24 + .../c2/irTests/TestVectorizationNotRun.java | 4 +- .../intrinsics/bmi/BMITestRunner.java | 6 +- .../lib/ir_framework/TestFramework.java | 3 +- .../loopopts/superword/TestAlignVector.java | 9 +- .../loopopts/superword/TestMulAddS2I.java | 20 +- .../runner/LoopCombinedOpTest.java | 6 +- .../jtreg/gc/g1/TestGCLogMessages.java | 3 +- .../jtreg/gc/g1/plab/TestPLABPromotion.java | 7 +- .../jtreg/gtest/CompressedKlassGtest.java | 23 +- test/hotspot/jtreg/gtest/MetaspaceGtests.java | 11 + .../jtreg/gtest/MetaspaceUtilsGtests.java | 40 -- ...essedCPUSpecificClassSpaceReservation.java | 3 +- .../CompressedClassPointers.java | 18 +- ...CompressedClassPointersEncodingScheme.java | 54 ++- .../runtime/FieldLayout/ArrayBaseOffsets.java | 113 ----- .../runtime/FieldLayout/BaseOffsets.java | 157 +++++++ .../cds/TestDefaultArchiveLoading.java | 120 +++++ .../runtime/cds/appcds/TestZGCWithCDS.java | 21 +- ...toCreateSharedArchiveNoDefaultArchive.java | 3 +- .../serviceability/sa/ClhsdbLongConstant.java | 6 +- .../GetObjectSizeIntrinsicsTest.java | 17 +- .../tools/jlink/plugins/CDSPluginTest.java | 17 +- 218 files changed, 4353 insertions(+), 1632 deletions(-) create mode 100644 src/hotspot/share/gc/shared/fullGCForwarding.cpp create mode 100644 src/hotspot/share/gc/shared/fullGCForwarding.hpp create mode 100644 src/hotspot/share/gc/shared/fullGCForwarding.inline.hpp create mode 100644 src/hotspot/share/memory/metaspace/metablock.hpp create mode 100644 src/hotspot/share/memory/metaspace/metablock.inline.hpp create mode 100644 src/hotspot/share/oops/markWord.inline.hpp create mode 100644 test/hotspot/gtest/metaspace/test_clms.cpp create mode 100644 test/hotspot/gtest/metaspace/test_metablock.cpp create mode 100644 test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java create mode 100644 test/hotspot/jtreg/runtime/cds/TestDefaultArchiveLoading.java diff --git a/make/Images.gmk b/make/Images.gmk index 5703a74afa5..10fc8041325 100644 --- a/make/Images.gmk +++ b/make/Images.gmk @@ -132,10 +132,16 @@ CDS_DUMP_FLAGS = -Xmx128M -Xms128M # Helper function for creating the CDS archives for the JDK and JRE # # Param1 - VM variant (e.g., server, client, zero, ...) -# Param2 - _nocoops, or empty +# Param2 - _nocoops, _coh, _nocoops_coh, or empty define CreateCDSArchive - $1_$2_DUMP_EXTRA_ARG := $(if $(filter _nocoops, $2), -XX:-UseCompressedOops, ) - $1_$2_DUMP_TYPE := $(if $(filter _nocoops, $2), -NOCOOPS, ) + $1_$2_COOPS_OPTION := $(if $(findstring _nocoops, $2),-XX:-UseCompressedOops) + # enable and also explicitly disable coh as needed. + ifeq ($(call isTargetCpuBits, 64), true) + $1_$2_COH_OPTION := -XX:+UnlockExperimentalVMOptions \ + $(if $(findstring _coh, $2),-XX:+UseCompactObjectHeaders,-XX:-UseCompactObjectHeaders) + endif + $1_$2_DUMP_EXTRA_ARG := $$($1_$2_COOPS_OPTION) $$($1_$2_COH_OPTION) + $1_$2_DUMP_TYPE := $(if $(findstring _nocoops, $2),-NOCOOPS,)$(if $(findstring _coh, $2),-COH,) # Only G1 supports dumping the shared heap, so explicitly use G1 if the JVM supports it. $1_$2_CDS_DUMP_FLAGS := $(CDS_DUMP_FLAGS) $(if $(filter g1gc, $(JVM_FEATURES_$1)), -XX:+UseG1GC) @@ -190,6 +196,14 @@ ifeq ($(BUILD_CDS_ARCHIVE), true) $(foreach v, $(JVM_VARIANTS), \ $(eval $(call CreateCDSArchive,$v,_nocoops)) \ ) + ifeq ($(BUILD_CDS_ARCHIVE_COH), true) + $(foreach v, $(JVM_VARIANTS), \ + $(eval $(call CreateCDSArchive,$v,_coh)) \ + ) + $(foreach v, $(JVM_VARIANTS), \ + $(eval $(call CreateCDSArchive,$v,_nocoops_coh)) \ + ) + endif endif endif diff --git a/make/autoconf/configure.ac b/make/autoconf/configure.ac index 21bf61a3fa1..66809127a75 100644 --- a/make/autoconf/configure.ac +++ b/make/autoconf/configure.ac @@ -261,6 +261,7 @@ JDKOPT_ENABLE_DISABLE_GENERATE_CLASSLIST JDKOPT_EXCLUDE_TRANSLATIONS JDKOPT_ENABLE_DISABLE_MANPAGES JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE +JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE_COH JDKOPT_ENABLE_DISABLE_COMPATIBLE_CDS_ALIGNMENT JDKOPT_SETUP_MACOSX_SIGNING diff --git a/make/autoconf/jdk-options.m4 b/make/autoconf/jdk-options.m4 index 11c55c78930..fec0a93161b 100644 --- a/make/autoconf/jdk-options.m4 +++ b/make/autoconf/jdk-options.m4 @@ -673,6 +673,37 @@ AC_DEFUN([JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE], AC_SUBST(BUILD_CDS_ARCHIVE) ]) +################################################################################ +# +# Enable or disable the default CDS archive generation for Compact Object Headers +# +AC_DEFUN([JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE_COH], +[ + UTIL_ARG_ENABLE(NAME: cds-archive-coh, DEFAULT: auto, RESULT: BUILD_CDS_ARCHIVE_COH, + DESC: [enable generation of default CDS archives for compact object headers (requires --enable-cds-archive)], + DEFAULT_DESC: [auto], + CHECKING_MSG: [if default CDS archives for compact object headers should be generated], + CHECK_AVAILABLE: [ + AC_MSG_CHECKING([if CDS archive with compact object headers is available]) + if test "x$BUILD_CDS_ARCHIVE" = "xfalse"; then + AC_MSG_RESULT([no (CDS default archive generation is disabled)]) + AVAILABLE=false + elif test "x$OPENJDK_TARGET_CPU" != "xx86_64" && + test "x$OPENJDK_TARGET_CPU" != "xaarch64" && + test "x$OPENJDK_TARGET_CPU" != "xppc64" && + test "x$OPENJDK_TARGET_CPU" != "xppc64le" && + test "x$OPENJDK_TARGET_CPU" != "xriscv64" && + test "x$OPENJDK_TARGET_CPU" != "xs390x"; then + AC_MSG_RESULT([no (compact object headers not supported for this platform)]) + AVAILABLE=false + else + AC_MSG_RESULT([yes]) + AVAILABLE=true + fi + ]) + AC_SUBST(BUILD_CDS_ARCHIVE_COH) +]) + ################################################################################ # # Enable the alternative CDS core region alignment diff --git a/make/autoconf/spec.gmk.template b/make/autoconf/spec.gmk.template index eb2b1a688e1..62afd6577ab 100644 --- a/make/autoconf/spec.gmk.template +++ b/make/autoconf/spec.gmk.template @@ -370,6 +370,7 @@ EXCLUDE_TRANSLATIONS := @EXCLUDE_TRANSLATIONS@ BUILD_MANPAGES := @BUILD_MANPAGES@ BUILD_CDS_ARCHIVE := @BUILD_CDS_ARCHIVE@ +BUILD_CDS_ARCHIVE_COH := @BUILD_CDS_ARCHIVE_COH@ ENABLE_COMPATIBLE_CDS_ALIGNMENT := @ENABLE_COMPATIBLE_CDS_ALIGNMENT@ diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index eedf29cc563..26ccd217fd0 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -5756,6 +5756,10 @@ opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indInde indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P); +opclass memory_noindex(indirect, + indOffI1, indOffL1,indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8, + indirectN, indOffIN, indOffLN, indirectX2P, indOffX2P); + // iRegIorL2I is used for src inputs in rules for 32 bit int (I) // operations. it allows the src to be either an iRegI or a (ConvL2I // iRegL). in the latter case the l2i normally planted for a ConvL2I @@ -6682,7 +6686,7 @@ instruct loadKlass(iRegPNoSp dst, memory8 mem) instruct loadNKlass(iRegNNoSp dst, memory4 mem) %{ match(Set dst (LoadNKlass mem)); - predicate(!needs_acquiring_load(n)); + predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders); ins_cost(4 * INSN_COST); format %{ "ldrw $dst, $mem\t# compressed class ptr" %} @@ -6692,6 +6696,20 @@ instruct loadNKlass(iRegNNoSp dst, memory4 mem) ins_pipe(iload_reg_mem); %} +instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory_noindex mem) +%{ + match(Set dst (LoadNKlass mem)); + predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders); + + ins_cost(4 * INSN_COST); + format %{ "load_narrow_klass_compact $dst, $mem\t# compressed class ptr" %} + ins_encode %{ + assert($mem$$index$$Register == noreg, "must not have indexed address"); + __ load_narrow_klass_compact_c2($dst$$Register, $mem$$base$$Register, $mem$$disp); + %} + ins_pipe(iload_reg_mem); +%} + // Load Float instruct loadF(vRegF dst, memory4 mem) %{ diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp index 3d1be91e9b2..b01360c3f7e 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -2243,8 +2243,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); - Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); - Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); // test for null if (flags & LIR_OpArrayCopy::src_null_check) { @@ -2305,15 +2303,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // We don't know the array types are compatible if (basic_type != T_OBJECT) { // Simple test for basic type arrays - if (UseCompressedClassPointers) { - __ ldrw(tmp, src_klass_addr); - __ ldrw(rscratch1, dst_klass_addr); - __ cmpw(tmp, rscratch1); - } else { - __ ldr(tmp, src_klass_addr); - __ ldr(rscratch1, dst_klass_addr); - __ cmp(tmp, rscratch1); - } + __ cmp_klasses_from_objects(src, dst, tmp, rscratch1); __ br(Assembler::NE, *stub->entry()); } else { // For object arrays, if src is a sub class of dst then we can @@ -2435,36 +2425,14 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // but not necessarily exactly of type default_type. Label known_ok, halt; __ mov_metadata(tmp, default_type->constant_encoding()); - if (UseCompressedClassPointers) { - __ encode_klass_not_null(tmp); - } if (basic_type != T_OBJECT) { - - if (UseCompressedClassPointers) { - __ ldrw(rscratch1, dst_klass_addr); - __ cmpw(tmp, rscratch1); - } else { - __ ldr(rscratch1, dst_klass_addr); - __ cmp(tmp, rscratch1); - } + __ cmp_klass(dst, tmp, rscratch1); __ br(Assembler::NE, halt); - if (UseCompressedClassPointers) { - __ ldrw(rscratch1, src_klass_addr); - __ cmpw(tmp, rscratch1); - } else { - __ ldr(rscratch1, src_klass_addr); - __ cmp(tmp, rscratch1); - } + __ cmp_klass(src, tmp, rscratch1); __ br(Assembler::EQ, known_ok); } else { - if (UseCompressedClassPointers) { - __ ldrw(rscratch1, dst_klass_addr); - __ cmpw(tmp, rscratch1); - } else { - __ ldr(rscratch1, dst_klass_addr); - __ cmp(tmp, rscratch1); - } + __ cmp_klass(dst, tmp, rscratch1); __ br(Assembler::EQ, known_ok); __ cmp(src, dst); __ br(Assembler::EQ, known_ok); @@ -2547,12 +2515,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { add_debug_info_for_null_check_here(info); } - if (UseCompressedClassPointers) { - __ ldrw(result, Address (obj, oopDesc::klass_offset_in_bytes())); - __ decode_klass_not_null(result); - } else { - __ ldr(result, Address (obj, oopDesc::klass_offset_in_bytes())); - } + __ load_klass(result, obj); } void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp index 8d1b3902ce4..946471b51fd 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp @@ -175,15 +175,19 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { assert_different_registers(obj, klass, len); - // This assumes that all prototype bits fit in an int32_t - mov(t1, (int32_t)(intptr_t)markWord::prototype().value()); - str(t1, Address(obj, oopDesc::mark_offset_in_bytes())); - if (UseCompressedClassPointers) { // Take care not to kill klass - encode_klass_not_null(t1, klass); - strw(t1, Address(obj, oopDesc::klass_offset_in_bytes())); + if (UseCompactObjectHeaders) { + ldr(t1, Address(klass, Klass::prototype_header_offset())); + str(t1, Address(obj, oopDesc::mark_offset_in_bytes())); } else { - str(klass, Address(obj, oopDesc::klass_offset_in_bytes())); + mov(t1, checked_cast(markWord::prototype().value())); + str(t1, Address(obj, oopDesc::mark_offset_in_bytes())); + if (UseCompressedClassPointers) { // Take care not to kill klass + encode_klass_not_null(t1, klass); + strw(t1, Address(obj, oopDesc::klass_offset_in_bytes())); + } else { + str(klass, Address(obj, oopDesc::klass_offset_in_bytes())); + } } if (len->is_valid()) { @@ -194,7 +198,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register // Clear gap/first 4 bytes following the length field. strw(zr, Address(obj, base_offset)); } - } else if (UseCompressedClassPointers) { + } else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { store_klass_gap(obj, zr); } } diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp index 5ce76106c9e..a7bbb6ebe0f 100644 --- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp @@ -2689,3 +2689,12 @@ bool C2_MacroAssembler::in_scratch_emit_size() { } return MacroAssembler::in_scratch_emit_size(); } + +void C2_MacroAssembler::load_narrow_klass_compact_c2(Register dst, Register obj, int disp) { + // Note: Don't clobber obj anywhere in that method! + + // The incoming address is pointing into obj-start + klass_offset_in_bytes. We need to extract + // obj-start, so that we can load from the object's mark-word instead. + ldr(dst, Address(obj, disp - oopDesc::klass_offset_in_bytes())); + lsr(dst, dst, markWord::klass_shift); +} diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp index d61b050407d..c6ddcf46cba 100644 --- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp @@ -186,4 +186,6 @@ void vector_signum_sve(FloatRegister dst, FloatRegister src, FloatRegister zero, FloatRegister one, FloatRegister vtmp, PRegister pgtmp, SIMD_RegVariant T); + void load_narrow_klass_compact_c2(Register dst, Register obj, int disp); + #endif // CPU_AARCH64_C2_MACROASSEMBLER_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/compressedKlass_aarch64.cpp b/src/hotspot/cpu/aarch64/compressedKlass_aarch64.cpp index fc78813c161..b96241aab19 100644 --- a/src/hotspot/cpu/aarch64/compressedKlass_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/compressedKlass_aarch64.cpp @@ -117,19 +117,3 @@ char* CompressedKlassPointers::reserve_address_space_for_compressed_classes(size return result; } - -void CompressedKlassPointers::initialize(address addr, size_t len) { - constexpr uintptr_t unscaled_max = nth_bit(32); - assert(len <= unscaled_max, "Klass range larger than 32 bits?"); - - // Shift is always 0 on aarch64. - _shift = 0; - - // On aarch64, we don't bother with zero-based encoding (base=0 shift>0). - address const end = addr + len; - _base = (end <= (address)unscaled_max) ? nullptr : addr; - - // Remember the Klass range: - _klass_range_start = addr; - _klass_range_end = addr + len; -} diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index 9f35a6e75a7..99ae1f1d901 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -1002,10 +1002,11 @@ address MacroAssembler::ic_call(address entry, jint method_index) { } int MacroAssembler::ic_check_size() { + int extra_instructions = UseCompactObjectHeaders ? 1 : 0; if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) { - return NativeInstruction::instruction_size * 7; + return NativeInstruction::instruction_size * (7 + extra_instructions); } else { - return NativeInstruction::instruction_size * 5; + return NativeInstruction::instruction_size * (5 + extra_instructions); } } @@ -1023,7 +1024,11 @@ int MacroAssembler::ic_check(int end_alignment) { int uep_offset = offset(); - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(tmp1, receiver); + ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); + cmpw(tmp1, tmp2); + } else if (UseCompressedClassPointers) { ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); cmpw(tmp1, tmp2); @@ -5009,8 +5014,22 @@ void MacroAssembler::load_method_holder(Register holder, Register method) { ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* } +// Loads the obj's Klass* into dst. +// Preserves all registers (incl src, rscratch1 and rscratch2). +// Input: +// src - the oop we want to load the klass from. +// dst - output narrow klass. +void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) { + assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders"); + ldr(dst, Address(src, oopDesc::mark_offset_in_bytes())); + lsr(dst, dst, markWord::klass_shift); +} + void MacroAssembler::load_klass(Register dst, Register src) { - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(dst, src); + decode_klass_not_null(dst); + } else if (UseCompressedClassPointers) { ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); decode_klass_not_null(dst); } else { @@ -5065,28 +5084,50 @@ void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, R resolve_oop_handle(dst, tmp1, tmp2); } -void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) { +void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) { + assert_different_registers(obj, klass, tmp); if (UseCompressedClassPointers) { - ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(tmp, obj); + } else { + ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes())); + } if (CompressedKlassPointers::base() == nullptr) { - cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift()); + cmp(klass, tmp, LSL, CompressedKlassPointers::shift()); return; } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 && CompressedKlassPointers::shift() == 0) { // Only the bottom 32 bits matter - cmpw(trial_klass, tmp); + cmpw(klass, tmp); return; } decode_klass_not_null(tmp); } else { - ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); + ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes())); + } + cmp(klass, tmp); +} + +void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) { + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(tmp1, obj1); + load_narrow_klass_compact(tmp2, obj2); + cmpw(tmp1, tmp2); + } else if (UseCompressedClassPointers) { + ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); + ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes())); + cmpw(tmp1, tmp2); + } else { + ldr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); + ldr(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes())); + cmp(tmp1, tmp2); } - cmp(trial_klass, tmp); } void MacroAssembler::store_klass(Register dst, Register src) { // FIXME: Should this be a store release? concurrent gcs assumes // klass length is valid if klass field is not null. + assert(!UseCompactObjectHeaders, "not with compact headers"); if (UseCompressedClassPointers) { encode_klass_not_null(src); strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); @@ -5096,6 +5137,7 @@ void MacroAssembler::store_klass(Register dst, Register src) { } void MacroAssembler::store_klass_gap(Register dst, Register src) { + assert(!UseCompactObjectHeaders, "not with compact headers"); if (UseCompressedClassPointers) { // Store to klass gap in destination strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); @@ -5246,9 +5288,6 @@ MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { return _klass_decode_mode; } - assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift() - || 0 == CompressedKlassPointers::shift(), "decode alg wrong"); - if (CompressedKlassPointers::base() == nullptr) { return (_klass_decode_mode = KlassDecodeZero); } @@ -5274,7 +5313,7 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) { switch (klass_decode_mode()) { case KlassDecodeZero: if (CompressedKlassPointers::shift() != 0) { - lsr(dst, src, LogKlassAlignmentInBytes); + lsr(dst, src, CompressedKlassPointers::shift()); } else { if (dst != src) mov(dst, src); } @@ -5283,7 +5322,7 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) { case KlassDecodeXor: if (CompressedKlassPointers::shift() != 0) { eor(dst, src, (uint64_t)CompressedKlassPointers::base()); - lsr(dst, dst, LogKlassAlignmentInBytes); + lsr(dst, dst, CompressedKlassPointers::shift()); } else { eor(dst, src, (uint64_t)CompressedKlassPointers::base()); } @@ -5291,7 +5330,7 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) { case KlassDecodeMovk: if (CompressedKlassPointers::shift() != 0) { - ubfx(dst, src, LogKlassAlignmentInBytes, 32); + ubfx(dst, src, CompressedKlassPointers::shift(), 32); } else { movw(dst, src); } @@ -5313,7 +5352,7 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) { switch (klass_decode_mode()) { case KlassDecodeZero: if (CompressedKlassPointers::shift() != 0) { - lsl(dst, src, LogKlassAlignmentInBytes); + lsl(dst, src, CompressedKlassPointers::shift()); } else { if (dst != src) mov(dst, src); } @@ -5321,7 +5360,7 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) { case KlassDecodeXor: if (CompressedKlassPointers::shift() != 0) { - lsl(dst, src, LogKlassAlignmentInBytes); + lsl(dst, src, CompressedKlassPointers::shift()); eor(dst, dst, (uint64_t)CompressedKlassPointers::base()); } else { eor(dst, src, (uint64_t)CompressedKlassPointers::base()); @@ -5336,7 +5375,7 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) { movk(dst, shifted_base >> 32, 32); if (CompressedKlassPointers::shift() != 0) { - lsl(dst, dst, LogKlassAlignmentInBytes); + lsl(dst, dst, CompressedKlassPointers::shift()); } break; diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp index b23acc15718..2a1edb01b20 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp @@ -875,9 +875,11 @@ public: void load_method_holder(Register holder, Register method); // oop manipulations + void load_narrow_klass_compact(Register dst, Register src); void load_klass(Register dst, Register src); void store_klass(Register dst, Register src); - void cmp_klass(Register oop, Register trial_klass, Register tmp); + void cmp_klass(Register obj, Register klass, Register tmp); + void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2); void resolve_weak_handle(Register result, Register tmp1, Register tmp2); void resolve_oop_handle(Register result, Register tmp1, Register tmp2); diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp index 48ff356f9a5..60d4c3c5110 100644 --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp @@ -3629,12 +3629,14 @@ void TemplateTable::_new() { // The object is initialized before the header. If the object size is // zero, go directly to the header initialization. - __ sub(r3, r3, sizeof(oopDesc)); + int header_size = oopDesc::header_size() * HeapWordSize; + assert(is_aligned(header_size, BytesPerLong), "oop header size must be 8-byte-aligned"); + __ sub(r3, r3, header_size); __ cbz(r3, initialize_header); // Initialize object fields { - __ add(r2, r0, sizeof(oopDesc)); + __ add(r2, r0, header_size); Label loop; __ bind(loop); __ str(zr, Address(__ post(r2, BytesPerLong))); @@ -3644,10 +3646,15 @@ void TemplateTable::_new() { // initialize object header only. __ bind(initialize_header); - __ mov(rscratch1, (intptr_t)markWord::prototype().value()); - __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); - __ store_klass_gap(r0, zr); // zero klass gap for compressed oops - __ store_klass(r0, r4); // store klass last + if (UseCompactObjectHeaders) { + __ ldr(rscratch1, Address(r4, Klass::prototype_header_offset())); + __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); + } else { + __ mov(rscratch1, (intptr_t)markWord::prototype().value()); + __ str(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes())); + __ store_klass_gap(r0, zr); // zero klass gap for compressed oops + __ store_klass(r0, r4); // store klass last + } if (DTraceAllocProbes) { // Trigger dtrace event for fastpath diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp index 36e1ac82334..b8a271ffc3b 100644 --- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp @@ -1996,16 +1996,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // We don't know the array types are compatible. if (basic_type != T_OBJECT) { // Simple test for basic type arrays. - if (UseCompressedClassPointers) { - // We don't need decode because we just need to compare. - __ lwz(tmp, oopDesc::klass_offset_in_bytes(), src); - __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst); - __ cmpw(CCR0, tmp, tmp2); - } else { - __ ld(tmp, oopDesc::klass_offset_in_bytes(), src); - __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst); - __ cmpd(CCR0, tmp, tmp2); - } + __ cmp_klasses_from_objects(CCR0, src, dst, tmp, tmp2); __ beq(CCR0, cont); } else { // For object arrays, if src is a sub class of dst then we can @@ -2128,39 +2119,15 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // but not necessarily exactly of type default_type. Label known_ok, halt; metadata2reg(default_type->constant_encoding(), tmp); - if (UseCompressedClassPointers) { - // Tmp holds the default type. It currently comes uncompressed after the - // load of a constant, so encode it. - __ encode_klass_not_null(tmp); - // Load the raw value of the dst klass, since we will be comparing - // uncompressed values directly. - __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst); - __ cmpw(CCR0, tmp, tmp2); - if (basic_type != T_OBJECT) { - __ bne(CCR0, halt); - // Load the raw value of the src klass. - __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), src); - __ cmpw(CCR0, tmp, tmp2); - __ beq(CCR0, known_ok); - } else { - __ beq(CCR0, known_ok); - __ cmpw(CCR0, src, dst); - __ beq(CCR0, known_ok); - } + __ cmp_klass(CCR0, dst, tmp, R11_scratch1, R12_scratch2); + if (basic_type != T_OBJECT) { + __ bne(CCR0, halt); + __ cmp_klass(CCR0, src, tmp, R11_scratch1, R12_scratch2); + __ beq(CCR0, known_ok); } else { - __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst); - __ cmpd(CCR0, tmp, tmp2); - if (basic_type != T_OBJECT) { - __ bne(CCR0, halt); - // Load the raw value of the src klass. - __ ld(tmp2, oopDesc::klass_offset_in_bytes(), src); - __ cmpd(CCR0, tmp, tmp2); - __ beq(CCR0, known_ok); - } else { - __ beq(CCR0, known_ok); - __ cmpd(CCR0, src, dst); - __ beq(CCR0, known_ok); - } + __ beq(CCR0, known_ok); + __ cmpw(CCR0, src, dst); + __ beq(CCR0, known_ok); } __ bind(halt); __ stop("incorrect type information in arraycopy"); @@ -2738,12 +2705,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { } } - if (UseCompressedClassPointers) { - __ lwz(result, oopDesc::klass_offset_in_bytes(), obj); - __ decode_klass_not_null(result); - } else { - __ ld(result, oopDesc::klass_offset_in_bytes(), obj); - } + __ load_klass(result, obj); } void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp index ea4d76e200f..0eddec09b63 100644 --- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp @@ -201,12 +201,19 @@ void C1_MacroAssembler::try_allocate( void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { assert_different_registers(obj, klass, len, t1, t2); - load_const_optimized(t1, (intx)markWord::prototype().value()); - std(t1, oopDesc::mark_offset_in_bytes(), obj); - store_klass(obj, klass); + + if (UseCompactObjectHeaders) { + ld(t1, in_bytes(Klass::prototype_header_offset()), klass); + std(t1, oopDesc::mark_offset_in_bytes(), obj); + } else { + load_const_optimized(t1, (intx)markWord::prototype().value()); + std(t1, oopDesc::mark_offset_in_bytes(), obj); + store_klass(obj, klass); + } + if (len->is_valid()) { stw(len, arrayOopDesc::length_offset_in_bytes(), obj); - } else if (UseCompressedClassPointers) { + } else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { // Otherwise length is in the class gap. store_klass_gap(obj); } diff --git a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp index 1147c3b42b2..82d9a046bc6 100644 --- a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.cpp @@ -47,6 +47,15 @@ void C2_MacroAssembler::fast_unlock_lightweight(ConditionRegister flag, Register compiler_fast_unlock_lightweight_object(flag, obj, box, tmp1, tmp2, tmp3); } +void C2_MacroAssembler::load_narrow_klass_compact_c2(Register dst, Register obj, int disp) { + // Note: Don't clobber obj anywhere in that method! + + // The incoming address is pointing into obj-start + klass_offset_in_bytes. We need to extract + // obj-start, so that we can load from the object's mark-word instead. + ld(dst, disp - oopDesc::klass_offset_in_bytes(), obj); + srdi(dst, dst, markWord::klass_shift); +} + // Intrinsics for CompactStrings // Compress char[] to byte[] by compressing 16 bytes at once. diff --git a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp index 5096810ef91..48a362aa63c 100644 --- a/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/c2_MacroAssembler_ppc.hpp @@ -34,6 +34,8 @@ void fast_unlock_lightweight(ConditionRegister flag, Register obj, Register box, Register tmp1, Register tmp2, Register tmp3); + void load_narrow_klass_compact_c2(Register dst, Register obj, int disp); + // Intrinsics for CompactStrings // Compress char[] to byte[] by compressing 16 bytes at once. void string_compress_16(Register src, Register dst, Register cnt, diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp index 190e0c39fd7..c585e03fdf5 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp @@ -1218,6 +1218,9 @@ int MacroAssembler::ic_check_size() { num_ins = 7; if (!implicit_null_checks_available) num_ins += 2; } + + if (UseCompactObjectHeaders) num_ins++; + return num_ins * BytesPerInstWord; } @@ -1245,7 +1248,9 @@ int MacroAssembler::ic_check(int end_alignment) { if (use_trap_based_null_check) { trap_null_check(receiver); } - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(tmp1, receiver); + } else if (UseCompressedClassPointers) { lwz(tmp1, oopDesc::klass_offset_in_bytes(), receiver); } else { ld(tmp1, oopDesc::klass_offset_in_bytes(), receiver); @@ -3258,6 +3263,7 @@ Register MacroAssembler::encode_klass_not_null(Register dst, Register src) { } void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) { + assert(!UseCompactObjectHeaders, "not with compact headers"); if (UseCompressedClassPointers) { Register compressedKlass = encode_klass_not_null(ck, klass); stw(compressedKlass, oopDesc::klass_offset_in_bytes(), dst_oop); @@ -3267,12 +3273,13 @@ void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) } void MacroAssembler::store_klass_gap(Register dst_oop, Register val) { + assert(!UseCompactObjectHeaders, "not with compact headers"); if (UseCompressedClassPointers) { if (val == noreg) { val = R0; li(val, 0); } - stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); // klass gap if compressed + stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); } } @@ -3313,15 +3320,60 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) { } void MacroAssembler::load_klass(Register dst, Register src) { - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(dst, src); + decode_klass_not_null(dst); + } else if (UseCompressedClassPointers) { lwz(dst, oopDesc::klass_offset_in_bytes(), src); - // Attention: no null check here! - decode_klass_not_null(dst, dst); + decode_klass_not_null(dst); } else { ld(dst, oopDesc::klass_offset_in_bytes(), src); } } +// Loads the obj's Klass* into dst. +// Preserves all registers (incl src, rscratch1 and rscratch2). +// Input: +// src - the oop we want to load the klass from. +// dst - output nklass. +void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) { + assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders"); + ld(dst, oopDesc::mark_offset_in_bytes(), src); + srdi(dst, dst, markWord::klass_shift); +} + +void MacroAssembler::cmp_klass(ConditionRegister dst, Register obj, Register klass, Register tmp, Register tmp2) { + assert_different_registers(obj, klass, tmp); + if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(tmp, obj); + } else { + lwz(tmp, oopDesc::klass_offset_in_bytes(), obj); + } + Register encoded_klass = encode_klass_not_null(tmp2, klass); + cmpw(dst, tmp, encoded_klass); + } else { + ld(tmp, oopDesc::klass_offset_in_bytes(), obj); + cmpd(dst, tmp, klass); + } +} + +void MacroAssembler::cmp_klasses_from_objects(ConditionRegister dst, Register obj1, Register obj2, Register tmp1, Register tmp2) { + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(tmp1, obj1); + load_narrow_klass_compact(tmp2, obj2); + cmpw(dst, tmp1, tmp2); + } else if (UseCompressedClassPointers) { + lwz(tmp1, oopDesc::klass_offset_in_bytes(), obj1); + lwz(tmp2, oopDesc::klass_offset_in_bytes(), obj2); + cmpw(dst, tmp1, tmp2); + } else { + ld(tmp1, oopDesc::klass_offset_in_bytes(), obj1); + ld(tmp2, oopDesc::klass_offset_in_bytes(), obj2); + cmpd(dst, tmp1, tmp2); + } +} + void MacroAssembler::load_klass_check_null(Register dst, Register src, Label* is_null) { null_check(src, oopDesc::klass_offset_in_bytes(), is_null); load_klass(dst, src); diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp index f0e7c644535..078efd509ca 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp @@ -757,6 +757,9 @@ class MacroAssembler: public Assembler { // Load/Store klass oop from klass field. Compress. void load_klass(Register dst, Register src); + void load_narrow_klass_compact(Register dst, Register src); + void cmp_klass(ConditionRegister dst, Register obj, Register klass, Register tmp, Register tmp2); + void cmp_klasses_from_objects(ConditionRegister dst, Register obj1, Register obj2, Register tmp1, Register tmp2); void load_klass_check_null(Register dst, Register src, Label* is_null = nullptr); void store_klass(Register dst_oop, Register klass, Register tmp = R0); void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified. diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad index 6d3daa025e8..142eaea26e4 100644 --- a/src/hotspot/cpu/ppc/ppc.ad +++ b/src/hotspot/cpu/ppc/ppc.ad @@ -5496,6 +5496,7 @@ instruct loadP2X(iRegLdst dst, memoryAlg4 mem) %{ // Load compressed klass pointer. instruct loadNKlass(iRegNdst dst, memory mem) %{ match(Set dst (LoadNKlass mem)); + predicate(!UseCompactObjectHeaders); ins_cost(MEMORY_REF_COST); format %{ "LWZ $dst, $mem \t// compressed klass ptr" %} @@ -5504,6 +5505,20 @@ instruct loadNKlass(iRegNdst dst, memory mem) %{ ins_pipe(pipe_class_memory); %} +instruct loadNKlassCompactHeaders(iRegNdst dst, memory mem) %{ + match(Set dst (LoadNKlass mem)); + predicate(UseCompactObjectHeaders); + ins_cost(MEMORY_REF_COST); + + format %{ "load_narrow_klass_compact $dst, $mem \t// compressed class ptr" %} + size(8); + ins_encode %{ + assert($mem$$index$$Register == R0, "must not have indexed address: %s[%s]", $mem$$base$$Register.name(), $mem$$index$$Register.name()); + __ load_narrow_klass_compact_c2($dst$$Register, $mem$$base$$Register, $mem$$disp); + %} + ins_pipe(pipe_class_memory); +%} + // Load Klass Pointer instruct loadKlass(iRegPdst dst, memoryAlg4 mem) %{ match(Set dst (LoadKlass mem)); diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp index a55f30eb67d..0e88b2d3eb4 100644 --- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp @@ -3840,8 +3840,9 @@ void TemplateTable::_new() { // Init1: Zero out newly allocated memory. // Initialize remaining object fields. Register Rbase = Rtags; - __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); - __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); + int header_size = oopDesc::header_size() * HeapWordSize; + __ addi(Rinstance_size, Rinstance_size, 7 - header_size); + __ addi(Rbase, RallocatedObject, header_size); __ srdi(Rinstance_size, Rinstance_size, 3); // Clear out object skipping header. Takes also care of the zero length case. @@ -3851,12 +3852,15 @@ void TemplateTable::_new() { // -------------------------------------------------------------------------- // Init2: Initialize the header: mark, klass // Init mark. - __ load_const_optimized(Rscratch, markWord::prototype().value(), R0); - __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); - - // Init klass. - __ store_klass_gap(RallocatedObject); - __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) + if (UseCompactObjectHeaders) { + __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); + __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); + } else { + __ load_const_optimized(Rscratch, markWord::prototype().value(), R0); + __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); + __ store_klass_gap(RallocatedObject); + __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); + } // Check and trigger dtrace event. if (DTraceAllocProbes) { diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp index a45add1032a..7d673383cad 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp @@ -194,7 +194,10 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe // We don't know the array types are compatible if (basic_type != T_OBJECT) { // Simple test for basic type arrays - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + __ load_narrow_klass_compact(tmp, src); + __ load_narrow_klass_compact(t0, dst); + } else if (UseCompressedClassPointers) { __ lwu(tmp, Address(src, oopDesc::klass_offset_in_bytes())); __ lwu(t0, Address(dst, oopDesc::klass_offset_in_bytes())); } else { @@ -244,7 +247,6 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe void LIR_Assembler::arraycopy_assert(Register src, Register dst, Register tmp, ciArrayKlass *default_type, int flags) { assert(default_type != nullptr, "null default_type!"); BasicType basic_type = default_type->element_type()->basic_type(); - if (basic_type == T_ARRAY) { basic_type = T_OBJECT; } if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { // Sanity check the known type with the incoming class. For the @@ -261,25 +263,10 @@ void LIR_Assembler::arraycopy_assert(Register src, Register dst, Register tmp, c } if (basic_type != T_OBJECT) { - if (UseCompressedClassPointers) { - __ lwu(t0, Address(dst, oopDesc::klass_offset_in_bytes())); - } else { - __ ld(t0, Address(dst, oopDesc::klass_offset_in_bytes())); - } - __ bne(tmp, t0, halt); - if (UseCompressedClassPointers) { - __ lwu(t0, Address(src, oopDesc::klass_offset_in_bytes())); - } else { - __ ld(t0, Address(src, oopDesc::klass_offset_in_bytes())); - } - __ beq(tmp, t0, known_ok); + __ cmp_klass_compressed(dst, tmp, t0, halt, false); + __ cmp_klass_compressed(src, tmp, t0, known_ok, true); } else { - if (UseCompressedClassPointers) { - __ lwu(t0, Address(dst, oopDesc::klass_offset_in_bytes())); - } else { - __ ld(t0, Address(dst, oopDesc::klass_offset_in_bytes())); - } - __ beq(tmp, t0, known_ok); + __ cmp_klass_compressed(dst, tmp, t0, known_ok, true); __ beq(src, dst, known_ok); } __ bind(halt); diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp index 542f8d8ba58..9f8276d5765 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp @@ -1518,12 +1518,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { add_debug_info_for_null_check_here(info); } - if (UseCompressedClassPointers) { - __ lwu(result, Address(obj, oopDesc::klass_offset_in_bytes())); - __ decode_klass_not_null(result); - } else { - __ ld(result, Address(obj, oopDesc::klass_offset_in_bytes())); - } + __ load_klass(result, obj); } void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp index 1e4b66069ee..dd35cce4cab 100644 --- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp @@ -164,15 +164,19 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp1, Register tmp2) { assert_different_registers(obj, klass, len, tmp1, tmp2); - // This assumes that all prototype bits fitr in an int32_t - mv(tmp1, (int32_t)(intptr_t)markWord::prototype().value()); - sd(tmp1, Address(obj, oopDesc::mark_offset_in_bytes())); - - if (UseCompressedClassPointers) { // Take care not to kill klass - encode_klass_not_null(tmp1, klass, tmp2); - sw(tmp1, Address(obj, oopDesc::klass_offset_in_bytes())); + if (UseCompactObjectHeaders) { + ld(tmp1, Address(klass, Klass::prototype_header_offset())); + sd(tmp1, Address(obj, oopDesc::mark_offset_in_bytes())); } else { - sd(klass, Address(obj, oopDesc::klass_offset_in_bytes())); + // This assumes that all prototype bits fitr in an int32_t + mv(tmp1, checked_cast(markWord::prototype().value())); + sd(tmp1, Address(obj, oopDesc::mark_offset_in_bytes())); + if (UseCompressedClassPointers) { // Take care not to kill klass + encode_klass_not_null(tmp1, klass, tmp2); + sw(tmp1, Address(obj, oopDesc::klass_offset_in_bytes())); + } else { + sd(klass, Address(obj, oopDesc::klass_offset_in_bytes())); + } } if (len->is_valid()) { @@ -183,7 +187,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register // Clear gap/first 4 bytes following the length field. sw(zr, Address(obj, base_offset)); } - } else if (UseCompressedClassPointers) { + } else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { store_klass_gap(obj, zr); } } diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp index 0ffdcbca723..18657807bc6 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp @@ -3119,3 +3119,13 @@ void C2_MacroAssembler::extract_fp_v(FloatRegister dst, VectorRegister src, Basi vfmv_f_s(dst, tmp); } } + +void C2_MacroAssembler::load_narrow_klass_compact_c2(Register dst, Address src) { + // The incoming address is pointing into obj-start + klass_offset_in_bytes. We need to extract + // obj-start, so that we can load from the object's mark-word instead. Usually the address + // comes as obj-start in obj and klass_offset_in_bytes in disp. + assert(UseCompactObjectHeaders, "must"); + int offset = oopDesc::mark_offset_in_bytes() - oopDesc::klass_offset_in_bytes(); + ld(dst, Address(src.base(), src.offset() + offset)); + srli(dst, dst, markWord::klass_shift); +} diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp index 38351565cc6..3304e3aef72 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp @@ -277,4 +277,6 @@ void extract_v(Register dst, VectorRegister src, BasicType bt, int idx, VectorRegister tmp); void extract_fp_v(FloatRegister dst, VectorRegister src, BasicType bt, int idx, VectorRegister tmp); + void load_narrow_klass_compact_c2(Register dst, Address src); + #endif // CPU_RISCV_C2_MACROASSEMBLER_RISCV_HPP diff --git a/src/hotspot/cpu/riscv/compressedKlass_riscv.cpp b/src/hotspot/cpu/riscv/compressedKlass_riscv.cpp index cffadb4189b..7c8d6b8f5bb 100644 --- a/src/hotspot/cpu/riscv/compressedKlass_riscv.cpp +++ b/src/hotspot/cpu/riscv/compressedKlass_riscv.cpp @@ -56,9 +56,9 @@ char* CompressedKlassPointers::reserve_address_space_for_compressed_classes(size result = reserve_address_space_for_zerobased_encoding(size, aslr); } - // Failing that, optimize for case (3) - a base with only bits set between [33-44) + // Failing that, optimize for case (3) - a base with only bits set between [32-44) if (result == nullptr) { - const uintptr_t from = nth_bit(32 + (optimize_for_zero_base ? LogKlassAlignmentInBytes : 0)); + const uintptr_t from = nth_bit(32); constexpr uintptr_t to = nth_bit(44); constexpr size_t alignment = nth_bit(32); result = reserve_address_space_X(from, to, size, alignment, aslr); diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp index 1e7a3f65e8e..2e69fbf831a 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp @@ -2503,20 +2503,19 @@ void MacroAssembler::orptr(Address adr, RegisterOrConstant src, Register tmp1, R sd(tmp1, adr); } -void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp1, Register tmp2, Label &L) { - assert_different_registers(oop, trial_klass, tmp1, tmp2); - if (UseCompressedClassPointers) { - lwu(tmp1, Address(oop, oopDesc::klass_offset_in_bytes())); - if (CompressedKlassPointers::base() == nullptr) { - slli(tmp1, tmp1, CompressedKlassPointers::shift()); - beq(trial_klass, tmp1, L); - return; - } - decode_klass_not_null(tmp1, tmp2); +void MacroAssembler::cmp_klass_compressed(Register oop, Register trial_klass, Register tmp, Label &L, bool equal) { + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(tmp, oop); + } else if (UseCompressedClassPointers) { + lwu(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); } else { - ld(tmp1, Address(oop, oopDesc::klass_offset_in_bytes())); + ld(tmp, Address(oop, oopDesc::klass_offset_in_bytes())); + } + if (equal) { + beq(trial_klass, tmp, L); + } else { + bne(trial_klass, tmp, L); } - beq(trial_klass, tmp1, L); } // Move an oop into a register. @@ -2722,10 +2721,19 @@ void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { } } +void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) { + assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders"); + ld(dst, Address(src, oopDesc::mark_offset_in_bytes())); + srli(dst, dst, markWord::klass_shift); +} + void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { assert_different_registers(dst, tmp); assert_different_registers(src, tmp); - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(dst, src); + decode_klass_not_null(dst, tmp); + } else if (UseCompressedClassPointers) { lwu(dst, Address(src, oopDesc::klass_offset_in_bytes())); decode_klass_not_null(dst, tmp); } else { @@ -2736,6 +2744,7 @@ void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { // FIXME: Should this be a store release? concurrent gcs assumes // klass length is valid if klass field is not null. + assert(!UseCompactObjectHeaders, "not with compact headers"); if (UseCompressedClassPointers) { encode_klass_not_null(src, tmp); sw(src, Address(dst, oopDesc::klass_offset_in_bytes())); @@ -2745,6 +2754,7 @@ void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { } void MacroAssembler::store_klass_gap(Register dst, Register src) { + assert(!UseCompactObjectHeaders, "not with compact headers"); if (UseCompressedClassPointers) { // Store to klass gap in destination sw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); @@ -2761,8 +2771,7 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src, Register if (CompressedKlassPointers::base() == nullptr) { if (CompressedKlassPointers::shift() != 0) { - assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); - slli(dst, src, LogKlassAlignmentInBytes); + slli(dst, src, CompressedKlassPointers::shift()); } else { mv(dst, src); } @@ -2778,9 +2787,9 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src, Register mv(xbase, (uintptr_t)CompressedKlassPointers::base()); if (CompressedKlassPointers::shift() != 0) { - assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); - assert_different_registers(t0, xbase); - shadd(dst, src, xbase, t0, LogKlassAlignmentInBytes); + Register t = src == dst ? dst : t0; + assert_different_registers(t, xbase); + shadd(dst, src, xbase, t, CompressedKlassPointers::shift()); } else { add(dst, xbase, src); } @@ -2796,8 +2805,7 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src, Register if (CompressedKlassPointers::base() == nullptr) { if (CompressedKlassPointers::shift() != 0) { - assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); - srli(dst, src, LogKlassAlignmentInBytes); + srli(dst, src, CompressedKlassPointers::shift()); } else { mv(dst, src); } @@ -2819,8 +2827,7 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src, Register mv(xbase, (uintptr_t)CompressedKlassPointers::base()); sub(dst, src, xbase); if (CompressedKlassPointers::shift() != 0) { - assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); - srli(dst, dst, LogKlassAlignmentInBytes); + srli(dst, dst, CompressedKlassPointers::shift()); } } @@ -4315,7 +4322,7 @@ address MacroAssembler::ic_call(address entry, jint method_index) { int MacroAssembler::ic_check_size() { // No compressed return (MacroAssembler::instruction_size * (2 /* 2 loads */ + 1 /* branch */)) + - far_branch_size(); + far_branch_size() + (UseCompactObjectHeaders ? MacroAssembler::instruction_size * 1 : 0); } int MacroAssembler::ic_check(int end_alignment) { @@ -4335,7 +4342,10 @@ int MacroAssembler::ic_check(int end_alignment) { align(end_alignment, ic_check_size()); int uep_offset = offset(); - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(tmp1, receiver); + lwu(tmp2, Address(data, CompiledICData::speculated_klass_offset())); + } else if (UseCompressedClassPointers) { lwu(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); lwu(tmp2, Address(data, CompiledICData::speculated_klass_offset())); } else { diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp index 8fca6357627..17b0956b3ef 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp @@ -195,8 +195,9 @@ class MacroAssembler: public Assembler { void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, Register tmp1, Register tmp2, Register tmp3); void load_klass(Register dst, Register src, Register tmp = t0); + void load_narrow_klass_compact(Register dst, Register src); void store_klass(Register dst, Register src, Register tmp = t0); - void cmp_klass(Register oop, Register trial_klass, Register tmp1, Register tmp2, Label &L); + void cmp_klass_compressed(Register oop, Register trial_klass, Register tmp, Label &L, bool equal); void encode_klass_not_null(Register r, Register tmp = t0); void decode_klass_not_null(Register r, Register tmp = t0); diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad index 6df41722d86..162fbcc802f 100644 --- a/src/hotspot/cpu/riscv/riscv.ad +++ b/src/hotspot/cpu/riscv/riscv.ad @@ -4817,6 +4817,7 @@ instruct loadKlass(iRegPNoSp dst, memory mem) // Load Narrow Klass Pointer instruct loadNKlass(iRegNNoSp dst, memory mem) %{ + predicate(!UseCompactObjectHeaders); match(Set dst (LoadNKlass mem)); ins_cost(LOAD_COST); @@ -4829,6 +4830,21 @@ instruct loadNKlass(iRegNNoSp dst, memory mem) ins_pipe(iload_reg_mem); %} +instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory mem) +%{ + predicate(UseCompactObjectHeaders); + match(Set dst (LoadNKlass mem)); + + ins_cost(LOAD_COST); + format %{ "lwu $dst, $mem\t# loadNKlass, compressed class ptr, #@loadNKlass" %} + + ins_encode %{ + __ load_narrow_klass_compact_c2(as_Register($dst$$reg), Address(as_Register($mem$$base), $mem$$disp)); + %} + + ins_pipe(iload_reg_mem); +%} + // Load Float instruct loadF(fRegF dst, memory mem) %{ diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.cpp b/src/hotspot/cpu/riscv/templateTable_riscv.cpp index a204c9fdd45..52b33c62616 100644 --- a/src/hotspot/cpu/riscv/templateTable_riscv.cpp +++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp @@ -3546,12 +3546,22 @@ void TemplateTable::_new() { // The object is initialized before the header. If the object size is // zero, go directly to the header initialization. - __ sub(x13, x13, sizeof(oopDesc)); + if (UseCompactObjectHeaders) { + assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned"); + __ sub(x13, x13, oopDesc::base_offset_in_bytes()); + } else { + __ sub(x13, x13, sizeof(oopDesc)); + } __ beqz(x13, initialize_header); // Initialize object fields { - __ add(x12, x10, sizeof(oopDesc)); + if (UseCompactObjectHeaders) { + assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned"); + __ add(x12, x10, oopDesc::base_offset_in_bytes()); + } else { + __ add(x12, x10, sizeof(oopDesc)); + } Label loop; __ bind(loop); __ sd(zr, Address(x12)); @@ -3562,10 +3572,15 @@ void TemplateTable::_new() { // initialize object hader only. __ bind(initialize_header); - __ mv(t0, (intptr_t)markWord::prototype().value()); - __ sd(t0, Address(x10, oopDesc::mark_offset_in_bytes())); - __ store_klass_gap(x10, zr); // zero klass gap for compressed oops - __ store_klass(x10, x14); // store klass last + if (UseCompactObjectHeaders) { + __ ld(t0, Address(x14, Klass::prototype_header_offset())); + __ sd(t0, Address(x10, oopDesc::mark_offset_in_bytes())); + } else { + __ mv(t0, (intptr_t)markWord::prototype().value()); + __ sd(t0, Address(x10, oopDesc::mark_offset_in_bytes())); + __ store_klass_gap(x10, zr); // zero klass gap for compressed oops + __ store_klass(x10, x14); // store klass last + } if (DTraceAllocProbes) { // Trigger dtrace event for fastpath diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp index d2e860aa320..213aa5efe1e 100644 --- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp @@ -2047,8 +2047,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); - Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); - Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); // Length and pos's are all sign extended at this point on 64bit. @@ -2112,13 +2110,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // We don't know the array types are compatible. if (basic_type != T_OBJECT) { // Simple test for basic type arrays. - if (UseCompressedClassPointers) { - __ z_l(tmp, src_klass_addr); - __ z_c(tmp, dst_klass_addr); - } else { - __ z_lg(tmp, src_klass_addr); - __ z_cg(tmp, dst_klass_addr); - } + __ cmp_klasses_from_objects(src, dst, tmp, Z_R1_scratch); __ branch_optimized(Assembler::bcondNotEqual, *stub->entry()); } else { // For object arrays, if src is a sub class of dst then we can @@ -2252,15 +2244,13 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { } if (basic_type != T_OBJECT) { - if (UseCompressedClassPointers) { __ z_c (tmp, dst_klass_addr); } - else { __ z_cg(tmp, dst_klass_addr); } + __ cmp_klass(tmp, dst, Z_R1_scratch); __ branch_optimized(Assembler::bcondNotEqual, halt); - if (UseCompressedClassPointers) { __ z_c (tmp, src_klass_addr); } - else { __ z_cg(tmp, src_klass_addr); } + + __ cmp_klass(tmp, src, Z_R1_scratch); __ branch_optimized(Assembler::bcondEqual, known_ok); } else { - if (UseCompressedClassPointers) { __ z_c (tmp, dst_klass_addr); } - else { __ z_cg(tmp, dst_klass_addr); } + __ cmp_klass(tmp, dst, Z_R1_scratch); __ branch_optimized(Assembler::bcondEqual, known_ok); __ compareU64_and_branch(src, dst, Assembler::bcondEqual, known_ok); } @@ -2755,12 +2745,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { add_debug_info_for_null_check_here(info); } - if (UseCompressedClassPointers) { - __ z_llgf(result, Address(obj, oopDesc::klass_offset_in_bytes())); - __ decode_klass_not_null(result); - } else { - __ z_lg(result, Address(obj, oopDesc::klass_offset_in_bytes())); - } + __ load_klass(result, obj); } void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { ciMethod* method = op->profiled_method(); diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp index 41c2ae260a6..bc269f9353c 100644 --- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp @@ -177,17 +177,21 @@ void C1_MacroAssembler::try_allocate( void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register Rzero, Register t1) { assert_different_registers(obj, klass, len, t1, Rzero); - // This assumes that all prototype bits fit in an int32_t. - load_const_optimized(t1, (intx)markWord::prototype().value()); - z_stg(t1, Address(obj, oopDesc::mark_offset_in_bytes())); + if (UseCompactObjectHeaders) { + z_lg(t1, Address(klass, in_bytes(Klass::prototype_header_offset()))); + z_stg(t1, Address(obj, oopDesc::mark_offset_in_bytes())); + } else { + load_const_optimized(t1, (intx)markWord::prototype().value()); + z_stg(t1, Address(obj, oopDesc::mark_offset_in_bytes())); + store_klass(klass, obj, t1); + } if (len->is_valid()) { // Length will be in the klass gap, if one exists. z_st(len, Address(obj, arrayOopDesc::length_offset_in_bytes())); - } else if (UseCompressedClassPointers) { + } else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { store_klass_gap(Rzero, obj); // Zero klass gap for compressed oops. } - store_klass(klass, obj, t1); } void C1_MacroAssembler::initialize_body(Register objectFields, Register len_in_bytes, Register Rzero) { diff --git a/src/hotspot/cpu/s390/c2_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c2_MacroAssembler_s390.cpp index 025ef4c8915..378d5e4cfe1 100644 --- a/src/hotspot/cpu/s390/c2_MacroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c2_MacroAssembler_s390.cpp @@ -42,6 +42,13 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Regi compiler_fast_unlock_lightweight_object(obj, box, temp1, temp2); } +void C2_MacroAssembler::load_narrow_klass_compact_c2(Register dst, Address src) { + // The incoming address is pointing into obj-start + klass_offset_in_bytes. We need to extract + // obj-start, so that we can load from the object's mark-word instead. + z_lg(dst, src.plus_disp(-oopDesc::klass_offset_in_bytes())); + z_srlg(dst, dst, markWord::klass_shift); // TODO: could be z_sra +} + //------------------------------------------------------ // Special String Intrinsics. Implementation //------------------------------------------------------ diff --git a/src/hotspot/cpu/s390/c2_MacroAssembler_s390.hpp b/src/hotspot/cpu/s390/c2_MacroAssembler_s390.hpp index aecb483f0a6..26c3c9a2bb5 100644 --- a/src/hotspot/cpu/s390/c2_MacroAssembler_s390.hpp +++ b/src/hotspot/cpu/s390/c2_MacroAssembler_s390.hpp @@ -33,6 +33,8 @@ void fast_lock_lightweight(Register obj, Register box, Register temp1, Register temp2); void fast_unlock_lightweight(Register obj, Register box, Register temp1, Register temp2); + void load_narrow_klass_compact_c2(Register dst, Address src); + //------------------------------------------- // Special String Intrinsics Implementation. //------------------------------------------- diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp index 84d09b9c1dc..ef7f3e22a04 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -2160,7 +2160,16 @@ void MacroAssembler::call_VM_leaf_base(address entry_point) { } int MacroAssembler::ic_check_size() { - return 30 + (ImplicitNullChecks ? 0 : 6); + int ic_size = 24; + if (!ImplicitNullChecks) { + ic_size += 6; + } + if (UseCompactObjectHeaders) { + ic_size += 12; + } else { + ic_size += 6; // either z_llgf or z_lg + } + return ic_size; } int MacroAssembler::ic_check(int end_alignment) { @@ -2181,7 +2190,9 @@ int MacroAssembler::ic_check(int end_alignment) { z_cgij(R2_receiver, 0, Assembler::bcondEqual, failure); } - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(R1_scratch, R2_receiver); + } else if (UseCompressedClassPointers) { z_llgf(R1_scratch, Address(R2_receiver, oopDesc::klass_offset_in_bytes())); } else { z_lg(R1_scratch, Address(R2_receiver, oopDesc::klass_offset_in_bytes())); @@ -3852,7 +3863,7 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) { #ifdef ASSERT Label ok; - z_tmll(current, KlassAlignmentInBytes-1); // Check alignment. + z_tmll(current, CompressedKlassPointers::klass_alignment_in_bytes() - 1); // Check alignment. z_brc(Assembler::bcondAllZero, ok); // The plain disassembler does not recognize illtrap. It instead displays // a 32-bit value. Issuing two illtraps assures the disassembler finds @@ -3866,7 +3877,6 @@ void MacroAssembler::encode_klass_not_null(Register dst, Register src) { // We then can be sure we calculate an offset that fits into 32 bit. // More generally speaking: all subsequent calculations are purely 32-bit. if (shift != 0) { - assert (LogKlassAlignmentInBytes == shift, "decode alg wrong"); z_srlg(dst, current, shift); current = dst; } @@ -3996,7 +4006,7 @@ void MacroAssembler::decode_klass_not_null(Register dst) { #ifdef ASSERT Label ok; - z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment. + z_tmll(dst, CompressedKlassPointers::klass_alignment_in_bytes() - 1); // Check alignment. z_brc(Assembler::bcondAllZero, ok); // The plain disassembler does not recognize illtrap. It instead displays // a 32-bit value. Issuing two illtraps assures the disassembler finds @@ -4043,7 +4053,7 @@ void MacroAssembler::decode_klass_not_null(Register dst, Register src) { #ifdef ASSERT Label ok; - z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment. + z_tmll(dst, CompressedKlassPointers::klass_alignment_in_bytes() - 1); // Check alignment. z_brc(Assembler::bcondAllZero, ok); // The plain disassembler does not recognize illtrap. It instead displays // a 32-bit value. Issuing two illtraps assures the disassembler finds @@ -4065,10 +4075,58 @@ void MacroAssembler::load_klass(Register klass, Address mem) { } } +// Loads the obj's Klass* into dst. +// Input: +// src - the oop we want to load the klass from. +// dst - output nklass. +void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) { + BLOCK_COMMENT("load_narrow_klass_compact {"); + assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders"); + z_lg(dst, Address(src, oopDesc::mark_offset_in_bytes())); + z_srlg(dst, dst, markWord::klass_shift); + BLOCK_COMMENT("} load_narrow_klass_compact"); +} + +void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) { + BLOCK_COMMENT("cmp_klass {"); + assert_different_registers(obj, klass, tmp); + if (UseCompactObjectHeaders) { + assert(tmp != noreg, "required"); + assert_different_registers(klass, obj, tmp); + load_narrow_klass_compact(tmp, obj); + z_cr(klass, tmp); + } else if (UseCompressedClassPointers) { + z_c(klass, Address(obj, oopDesc::klass_offset_in_bytes())); + } else { + z_cg(klass, Address(obj, oopDesc::klass_offset_in_bytes())); + } + BLOCK_COMMENT("} cmp_klass"); +} + +void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) { + BLOCK_COMMENT("cmp_klasses_from_objects {"); + if (UseCompactObjectHeaders) { + assert(tmp1 != noreg && tmp2 != noreg, "required"); + assert_different_registers(obj1, obj2, tmp1, tmp2); + load_narrow_klass_compact(tmp1, obj1); + load_narrow_klass_compact(tmp2, obj2); + z_cr(tmp1, tmp2); + } else if (UseCompressedClassPointers) { + z_l(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); + z_c(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes())); + } else { + z_lg(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); + z_cg(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes())); + } + BLOCK_COMMENT("} cmp_klasses_from_objects"); +} + void MacroAssembler::load_klass(Register klass, Register src_oop) { - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(klass, src_oop); + decode_klass_not_null(klass); + } else if (UseCompressedClassPointers) { z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop); - // Attention: no null check here! decode_klass_not_null(klass); } else { z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop); @@ -4076,6 +4134,7 @@ void MacroAssembler::load_klass(Register klass, Register src_oop) { } void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) { + assert(!UseCompactObjectHeaders, "Don't use with compact headers"); if (UseCompressedClassPointers) { assert_different_registers(dst_oop, klass, Z_R0); if (ck == noreg) ck = klass; @@ -4087,6 +4146,7 @@ void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) } void MacroAssembler::store_klass_gap(Register s, Register d) { + assert(!UseCompactObjectHeaders, "Don't use with compact headers"); if (UseCompressedClassPointers) { assert(s != d, "not enough registers"); // Support s = noreg. @@ -4112,7 +4172,11 @@ void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rba const int shift = CompressedKlassPointers::shift(); address base = CompressedKlassPointers::base(); - assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift"); + if (UseCompactObjectHeaders) { + assert(shift >= 3, "cKlass encoder detected bad shift"); + } else { + assert((shift == 0) || (shift == 3), "cKlass encoder detected bad shift"); + } assert_different_registers(Rop1, Z_R0); assert_different_registers(Rop1, Rbase, Z_R1); diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.hpp b/src/hotspot/cpu/s390/macroAssembler_s390.hpp index 061817a1289..7806fef3ce8 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp @@ -803,6 +803,13 @@ class MacroAssembler: public Assembler { void load_klass(Register klass, Register src_oop); void store_klass(Register klass, Register dst_oop, Register ck = noreg); // Klass will get compressed if ck not provided. void store_klass_gap(Register s, Register dst_oop); + void load_narrow_klass_compact(Register dst, Register src); + // Compares the Klass pointer of an object to a given Klass (which might be narrow, + // depending on UseCompressedClassPointers). + void cmp_klass(Register klass, Register obj, Register tmp); + // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags. + // Uses tmp1 and tmp2 as temporary registers. + void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2); // This function calculates the size of the code generated by // decode_klass_not_null(register dst) diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad index 561ffc9993c..63e150c9e9c 100644 --- a/src/hotspot/cpu/s390/s390.ad +++ b/src/hotspot/cpu/s390/s390.ad @@ -4410,6 +4410,7 @@ instruct loadN(iRegN dst, memory mem) %{ // Load narrow Klass Pointer instruct loadNKlass(iRegN dst, memory mem) %{ + predicate(!UseCompactObjectHeaders); match(Set dst (LoadNKlass mem)); ins_cost(MEMORY_REF_COST); size(Z_DISP3_SIZE); @@ -4419,6 +4420,21 @@ instruct loadNKlass(iRegN dst, memory mem) %{ ins_pipe(pipe_class_dummy); %} +instruct loadNKlassCompactHeaders(iRegN dst, memory mem, flagsReg cr) %{ + match(Set dst (LoadNKlass mem)); + predicate(UseCompactObjectHeaders); + effect(KILL cr); + ins_cost(MEMORY_REF_COST); + format %{ "load_narrow_klass_compact $dst,$mem \t# compressed class ptr" %} + // TODO: size() + ins_encode %{ + __ block_comment("load_narrow_klass_compact_c2 {"); + __ load_narrow_klass_compact_c2($dst$$Register, $mem$$Address); + __ block_comment("} load_narrow_klass_compact"); + %} + ins_pipe(pipe_class_dummy); +%} + // Load constant Compressed Pointer instruct loadConN(iRegN dst, immN src) %{ diff --git a/src/hotspot/cpu/s390/templateTable_s390.cpp b/src/hotspot/cpu/s390/templateTable_s390.cpp index 0c9f9e031b0..3cb1aba810d 100644 --- a/src/hotspot/cpu/s390/templateTable_s390.cpp +++ b/src/hotspot/cpu/s390/templateTable_s390.cpp @@ -3952,7 +3952,12 @@ void TemplateTable::_new() { if (!ZeroTLAB) { // The object is initialized before the header. If the object size is // zero, go directly to the header initialization. - __ z_aghi(Rsize, (int)-sizeof(oopDesc)); // Subtract header size, set CC. + if (UseCompactObjectHeaders) { + assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned"); + __ z_aghi(Rsize, (int)-oopDesc::base_offset_in_bytes()); + } else { + __ z_aghi(Rsize, (int)-sizeof(oopDesc)); // Subtract header size, set CC. + } __ z_bre(initialize_header); // Jump if size of fields is zero. // Initialize object fields. @@ -3964,17 +3969,25 @@ void TemplateTable::_new() { // Set Rzero to 0 and use it as src length, then mvcle will copy nothing // and fill the object with the padding value 0. - __ add2reg(RobjectFields, sizeof(oopDesc), RallocatedObject); + if (UseCompactObjectHeaders) { + __ add2reg(RobjectFields, oopDesc::base_offset_in_bytes(), RallocatedObject); + } else { + __ add2reg(RobjectFields, sizeof(oopDesc), RallocatedObject); + } __ move_long_ext(RobjectFields, as_Register(Rzero->encoding() - 1), 0); } // Initialize object header only. __ bind(initialize_header); - __ store_const(Address(RallocatedObject, oopDesc::mark_offset_in_bytes()), - (long)markWord::prototype().value()); - - __ store_klass_gap(Rzero, RallocatedObject); // Zero klass gap for compressed oops. - __ store_klass(iklass, RallocatedObject); // Store klass last. + if (UseCompactObjectHeaders) { + __ z_lg(tmp, Address(iklass, in_bytes(Klass::prototype_header_offset()))); + __ z_stg(tmp, Address(RallocatedObject, oopDesc::mark_offset_in_bytes())); + } else { + __ store_const(Address(RallocatedObject, oopDesc::mark_offset_in_bytes()), + (long) markWord::prototype().value()); + __ store_klass_gap(Rzero, RallocatedObject); // Zero klass gap for compressed oops. + __ store_klass(iklass, RallocatedObject); // Store klass last. + } if (DTraceAllocProbes) { // Trigger dtrace event for fastpath. diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp index 64265a96909..ff6d18e48e1 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -3046,6 +3046,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { Register length = op->length()->as_register(); Register tmp = op->tmp()->as_register(); Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg); + Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg; CodeStub* stub = op->stub(); int flags = op->flags(); @@ -3170,8 +3171,6 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); - Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); - Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); // length and pos's are all sign extended at this point on 64bit @@ -3237,13 +3236,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { // We don't know the array types are compatible if (basic_type != T_OBJECT) { // Simple test for basic type arrays - if (UseCompressedClassPointers) { - __ movl(tmp, src_klass_addr); - __ cmpl(tmp, dst_klass_addr); - } else { - __ movptr(tmp, src_klass_addr); - __ cmpptr(tmp, dst_klass_addr); - } + __ cmp_klasses_from_objects(src, dst, tmp, tmp2); __ jcc(Assembler::notEqual, *stub->entry()); } else { // For object arrays, if src is a sub class of dst then we can @@ -3302,6 +3295,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { store_parameter(src, 4); #ifndef _LP64 + Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); __ movptr(tmp, dst_klass_addr); __ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset())); __ push(tmp); @@ -3405,16 +3399,12 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { #endif if (basic_type != T_OBJECT) { - - if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); - else __ cmpptr(tmp, dst_klass_addr); + __ cmp_klass(tmp, dst, tmp2); __ jcc(Assembler::notEqual, halt); - if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr); - else __ cmpptr(tmp, src_klass_addr); + __ cmp_klass(tmp, src, tmp2); __ jcc(Assembler::equal, known_ok); } else { - if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); - else __ cmpptr(tmp, dst_klass_addr); + __ cmp_klass(tmp, dst, tmp2); __ jcc(Assembler::equal, known_ok); __ cmpptr(src, dst); __ jcc(Assembler::equal, known_ok); @@ -3511,13 +3501,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) { add_debug_info_for_null_check_here(info); } -#ifdef _LP64 - if (UseCompressedClassPointers) { - __ movl(result, Address(obj, oopDesc::klass_offset_in_bytes())); - __ decode_klass_not_null(result, rscratch1); - } else -#endif - __ movptr(result, Address(obj, oopDesc::klass_offset_in_bytes())); + __ load_klass(result, obj, rscratch1); } void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index bf5b90db5fc..a0efde9b816 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -170,16 +170,20 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { - assert_different_registers(obj, klass, len); - movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast(markWord::prototype().value())); + assert_different_registers(obj, klass, len, t1, t2); #ifdef _LP64 - if (UseCompressedClassPointers) { // Take care not to kill klass + if (UseCompactObjectHeaders) { + movptr(t1, Address(klass, Klass::prototype_header_offset())); + movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1); + } else if (UseCompressedClassPointers) { // Take care not to kill klass + movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast(markWord::prototype().value())); movptr(t1, klass); encode_klass_not_null(t1, rscratch1); movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); } else #endif { + movptr(Address(obj, oopDesc::mark_offset_in_bytes()), checked_cast(markWord::prototype().value())); movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); } @@ -196,7 +200,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register #endif } #ifdef _LP64 - else if (UseCompressedClassPointers) { + else if (UseCompressedClassPointers && !UseCompactObjectHeaders) { xorptr(t1, t1); store_klass_gap(obj, t1); } @@ -230,7 +234,9 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "con_size_in_bytes is not multiple of alignment"); const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; - + if (UseCompactObjectHeaders) { + assert(hdr_size_in_bytes == 8, "check object headers size"); + } initialize_header(obj, klass, noreg, t1, t2); if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) { diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp index 3a8f1ab839d..13200252bef 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp @@ -7093,3 +7093,13 @@ void C2_MacroAssembler::vector_saturating_op(int ideal_opc, BasicType elem_bt, X vector_saturating_op(ideal_opc, elem_bt, dst, src1, src2, vlen_enc); } } + +#ifdef _LP64 +void C2_MacroAssembler::load_narrow_klass_compact_c2(Register dst, Address src) { + // The incoming address is pointing into obj-start + klass_offset_in_bytes. We need to extract + // obj-start, so that we can load from the object's mark-word instead. Usually the address + // comes as obj-start in obj and klass_offset_in_bytes in disp. + movq(dst, src.plus_disp(-oopDesc::klass_offset_in_bytes())); + shrq(dst, markWord::klass_shift); +} +#endif diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp index 3a36fd75e3f..523200486cc 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp @@ -583,4 +583,8 @@ public: void select_from_two_vectors_evex(BasicType elem_bt, XMMRegister dst, XMMRegister src1, XMMRegister src2, int vlen_enc); +#ifdef _LP64 + void load_narrow_klass_compact_c2(Register dst, Address src); +#endif + #endif // CPU_X86_C2_MACROASSEMBLER_X86_HPP diff --git a/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp b/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp index 34f8bec8d11..2837a85800f 100644 --- a/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp +++ b/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp @@ -26,6 +26,7 @@ #include "precompiled.hpp" #include "macroAssembler_x86.hpp" #include "stubGenerator_x86_64.hpp" +#include "oops/arrayOop.hpp" #include "opto/c2_MacroAssembler.hpp" #include "opto/intrinsicnode.hpp" @@ -160,6 +161,9 @@ static void highly_optimized_short_cases(StrIntrinsicNode::ArgEncoding ae, Regis Register needle_len, XMMRegister XMM0, XMMRegister XMM1, Register mask, Register tmp, MacroAssembler *_masm); +static void copy_to_stack(Register haystack, Register haystack_len, bool isU, Register tmp, + XMMRegister xtmp, MacroAssembler *_masm); + static void setup_jump_tables(StrIntrinsicNode::ArgEncoding ae, Label &L_error, Label &L_checkRange, Label &L_fixup, address *big_jump_table, address *small_jump_table, MacroAssembler *_masm); @@ -395,41 +399,21 @@ static void generate_string_indexof_stubs(StubGenerator *stubgen, address *fnptr // Do "big switch" if haystack size > 32 __ cmpq(haystack_len, 0x20); - __ ja_b(L_bigSwitchTop); + __ ja(L_bigSwitchTop); // Copy the small (< 32 byte) haystack to the stack. Allows for vector reads without page fault // Only done for small haystacks // // NOTE: This code assumes that the haystack points to a java array type AND there are - // at least 16 bytes of header preceeding the haystack pointer. + // at least 8 bytes of header preceeding the haystack pointer. // - // This means that we're copying up to 15 bytes of the header onto the stack along + // This means that we're copying up to 7 bytes of the header onto the stack along // with the haystack bytes. After the copy completes, we adjust the haystack pointer // to the valid haystack bytes on the stack. { - Label L_moreThan16, L_adjustHaystack; - - const Register index = rax; + const Register tmp = rax; const Register haystack = rbx; - - // Only a single vector load/store of either 16 or 32 bytes - __ cmpq(haystack_len, 0x10); - __ ja_b(L_moreThan16); - - __ movq(index, COPIED_HAYSTACK_STACK_OFFSET + 0x10); - __ movdqu(XMM_TMP1, Address(haystack, haystack_len, Address::times_1, -0x10)); - __ movdqu(Address(rsp, COPIED_HAYSTACK_STACK_OFFSET), XMM_TMP1); - __ jmpb(L_adjustHaystack); - - __ bind(L_moreThan16); - __ movq(index, COPIED_HAYSTACK_STACK_OFFSET + 0x20); - __ vmovdqu(XMM_TMP1, Address(haystack, haystack_len, Address::times_1, -0x20)); - __ vmovdqu(Address(rsp, COPIED_HAYSTACK_STACK_OFFSET), XMM_TMP1); - - // Point the haystack at the correct location of the first byte of the "real" haystack on the stack - __ bind(L_adjustHaystack); - __ subq(index, haystack_len); - __ leaq(haystack, Address(rsp, index, Address::times_1)); + copy_to_stack(haystack, haystack_len, false, tmp, XMM_TMP1, _masm); } // Dispatch to handlers for small needle and small haystack @@ -760,39 +744,39 @@ static void generate_string_indexof_stubs(StubGenerator *stubgen, address *fnptr __ ja(L_wideNoExpand); // - // Reads of existing needle are 16-byte chunks - // Writes to copied needle are 32-byte chunks + // Reads of existing needle are 8-byte chunks + // Writes to copied needle are 16-byte chunks // Don't read past the end of the existing needle // - // Start first read at [((ndlLen % 16) - 16) & 0xf] - // outndx += 32 - // inndx += 16 + // Start first read at [((ndlLen % 8) - 8) & 0x7] + // outndx += 16 + // inndx += 8 // cmp nndx, ndlLen // jae done // - // Final index of start of needle at ((16 - (ndlLen %16)) & 0xf) << 1 + // Final index of start of needle at ((8 - (ndlLen % 8)) & 0x7) << 1 // - // Starting read for needle at -(16 - (nLen % 16)) - // Offset of needle in stack should be (16 - (nLen % 16)) * 2 + // Starting read for needle at -(8 - (nLen % 8)) + // Offset of needle in stack should be (8 - (nLen % 8)) * 2 __ movq(index, needle_len); - __ andq(index, 0xf); // nLen % 16 - __ movq(offset, 0x10); - __ subq(offset, index); // 16 - (nLen % 16) + __ andq(index, 0x7); // nLen % 8 + __ movq(offset, 0x8); + __ subq(offset, index); // 8 - (nLen % 8) __ movq(index, offset); __ shlq(offset, 1); // * 2 - __ negq(index); // -(16 - (nLen % 16)) + __ negq(index); // -(8 - (nLen % 8)) __ xorq(wr_index, wr_index); __ bind(L_top); // load needle and expand - __ vpmovzxbw(xmm0, Address(needle, index, Address::times_1), Assembler::AVX_256bit); + __ vpmovzxbw(xmm0, Address(needle, index, Address::times_1), Assembler::AVX_128bit); // store expanded needle to stack - __ vmovdqu(Address(rsp, wr_index, Address::times_1, EXPANDED_NEEDLE_STACK_OFFSET), xmm0); - __ addq(index, 0x10); + __ movdqu(Address(rsp, wr_index, Address::times_1, EXPANDED_NEEDLE_STACK_OFFSET), xmm0); + __ addq(index, 0x8); __ cmpq(index, needle_len); __ jae(L_finished); - __ addq(wr_index, 32); + __ addq(wr_index, 16); __ jmpb(L_top); // adjust pointer and length of needle @@ -1582,35 +1566,9 @@ static void highly_optimized_short_cases(StrIntrinsicNode::ArgEncoding ae, Regis assert((COPIED_HAYSTACK_STACK_OFFSET == 0), "Must be zero!"); assert((COPIED_HAYSTACK_STACK_SIZE == 64), "Must be 64!"); - // Copy incoming haystack onto stack - { - Label L_adjustHaystack, L_moreThan16; - - // Copy haystack to stack (haystack <= 32 bytes) - __ subptr(rsp, COPIED_HAYSTACK_STACK_SIZE); - __ cmpq(haystack_len, isU ? 0x8 : 0x10); - __ ja_b(L_moreThan16); - - __ movq(tmp, COPIED_HAYSTACK_STACK_OFFSET + 0x10); - __ movdqu(XMM0, Address(haystack, haystack_len, isU ? Address::times_2 : Address::times_1, -0x10)); - __ movdqu(Address(rsp, COPIED_HAYSTACK_STACK_OFFSET), XMM0); - __ jmpb(L_adjustHaystack); - - __ bind(L_moreThan16); - __ movq(tmp, COPIED_HAYSTACK_STACK_OFFSET + 0x20); - __ vmovdqu(XMM0, Address(haystack, haystack_len, isU ? Address::times_2 : Address::times_1, -0x20)); - __ vmovdqu(Address(rsp, COPIED_HAYSTACK_STACK_OFFSET), XMM0); - - __ bind(L_adjustHaystack); - __ subptr(tmp, haystack_len); - - if (isU) { - // For UTF-16, lengths are half - __ subptr(tmp, haystack_len); - } - // Point the haystack to the stack - __ leaq(haystack, Address(rsp, tmp, Address::times_1)); - } + // Copy incoming haystack onto stack (haystack <= 32 bytes) + __ subptr(rsp, COPIED_HAYSTACK_STACK_SIZE); + copy_to_stack(haystack, haystack_len, isU, tmp, XMM0, _masm); // Creates a mask of (n - k + 1) ones. This prevents recognizing any false-positives // past the end of the valid haystack. @@ -1672,6 +1630,86 @@ static void highly_optimized_short_cases(StrIntrinsicNode::ArgEncoding ae, Regis __ jmpb(L_out); } + + +// Copy the small (<= 32 byte) haystack to the stack. Allows for vector reads without page fault +// Only done for small haystacks +// NOTE: This code assumes that the haystack points to a java array type AND there are +// at least 8 bytes of header preceeding the haystack pointer. +// We're copying up to 7 bytes of the header onto the stack along with the haystack bytes. +// After the copy completes, we adjust the haystack pointer +// to the valid haystack bytes on the stack. +// +// Copy haystack array elements to stack at region +// (COPIED_HAYSTACK_STACK_OFFSET - COPIED_HAYSTACK_STACK_OFFSET+63) with the following conditions: +// It may copy up to 7 bytes that precede the array +// It doesn't read beyond the end of the array +// There are atleast 31 bytes of stack region beyond the end of array +// Inputs: +// haystack - Address of haystack +// haystack_len - Number of elements in haystack +// isU - Boolean indicating if each element is Latin1 or UTF16 +// tmp, xtmp - Scratch registers +// Output: +// haystack - Address of copied string on stack + +static void copy_to_stack(Register haystack, Register haystack_len, bool isU, + Register tmp, XMMRegister xtmp, MacroAssembler *_masm) { + Label L_moreThan8, L_moreThan16, L_moreThan24, L_adjustHaystack; + + assert(arrayOopDesc::base_offset_in_bytes(isU ? T_CHAR : T_BYTE) >= 8, + "Needs at least 8 bytes preceding the array body"); + + // Copy haystack to stack (haystack <= 32 bytes) + int scale = isU ? 2 : 1; // bytes per char + Address::ScaleFactor addrScale = isU ? Address::times_2 : Address::times_1; + + __ cmpq(haystack_len, 16/scale); + __ ja_b(L_moreThan16); + + __ cmpq(haystack_len, 8/scale); + __ ja_b(L_moreThan8); + // haystack length <= 8 bytes, copy 8 bytes upto haystack end reading at most 7 bytes into the header + __ movq(tmp, COPIED_HAYSTACK_STACK_OFFSET + 8); + __ movq(xtmp, Address(haystack, haystack_len, addrScale, -8)); + __ movq(Address(rsp, COPIED_HAYSTACK_STACK_OFFSET), xtmp); + __ jmpb(L_adjustHaystack); + + __ bind(L_moreThan8); + // haystack length > 8 and <=16 bytes, copy 16 bytes upto haystack end reading at most 7 bytes into the header + __ movq(tmp, COPIED_HAYSTACK_STACK_OFFSET + 16); + __ movdqu(xtmp, Address(haystack, haystack_len, addrScale, -16)); + __ movdqu(Address(rsp, COPIED_HAYSTACK_STACK_OFFSET), xtmp); + __ jmpb(L_adjustHaystack); + + __ bind(L_moreThan16); + __ cmpq(haystack_len, 24/scale); + __ ja_b(L_moreThan24); + // haystack length > 16 and <=24 bytes, copy 24 bytes upto haystack end reading at most 7 bytes into the header + __ movq(tmp, COPIED_HAYSTACK_STACK_OFFSET + 24); + __ movdqu(xtmp, Address(haystack, haystack_len, addrScale, -24)); + __ movdqu(Address(rsp, COPIED_HAYSTACK_STACK_OFFSET), xtmp); + __ movq(xtmp, Address(haystack, haystack_len, addrScale, -8)); + __ movq(Address(rsp, COPIED_HAYSTACK_STACK_OFFSET + 16), xtmp); + __ jmpb(L_adjustHaystack); + + __ bind(L_moreThan24); + // haystack length > 24 and < 32 bytes, copy 32 bytes upto haystack end reading at most 7 bytes into the header + __ movq(tmp, COPIED_HAYSTACK_STACK_OFFSET + 32); + __ vmovdqu(xtmp, Address(haystack, haystack_len, addrScale, -32)); + __ vmovdqu(Address(rsp, COPIED_HAYSTACK_STACK_OFFSET), xtmp); + + __ bind(L_adjustHaystack); + __ subptr(tmp, haystack_len); + + if (isU) { + __ subptr(tmp, haystack_len); + } + + // Point the haystack to the stack + __ leaq(haystack, Address(rsp, tmp, Address::times_1)); +} + //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index d6c6735abc3..344759bd0a8 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -1350,7 +1350,8 @@ void MacroAssembler::ic_call(address entry, jint method_index) { } int MacroAssembler::ic_check_size() { - return LP64_ONLY(14) NOT_LP64(12); + return + LP64_ONLY(UseCompactObjectHeaders ? 17 : 14) NOT_LP64(12); } int MacroAssembler::ic_check(int end_alignment) { @@ -1366,6 +1367,12 @@ int MacroAssembler::ic_check(int end_alignment) { int uep_offset = offset(); +#ifdef _LP64 + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(temp, receiver); + cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); + } else +#endif if (UseCompressedClassPointers) { movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); @@ -1376,7 +1383,7 @@ int MacroAssembler::ic_check(int end_alignment) { // if inline cache check fails, then jump to runtime routine jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); - assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); + assert((offset() % end_alignment) == 0, "Misaligned verified entry point (%d, %d, %d)", uep_offset, offset(), end_alignment); return uep_offset; } @@ -5948,19 +5955,33 @@ void MacroAssembler::load_method_holder(Register holder, Register method) { movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* } +#ifdef _LP64 +void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) { + assert(UseCompactObjectHeaders, "expect compact object headers"); + movq(dst, Address(src, oopDesc::mark_offset_in_bytes())); + shrq(dst, markWord::klass_shift); +} +#endif + void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { assert_different_registers(src, tmp); assert_different_registers(dst, tmp); #ifdef _LP64 - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + load_narrow_klass_compact(dst, src); + decode_klass_not_null(dst, tmp); + } else if (UseCompressedClassPointers) { movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); decode_klass_not_null(dst, tmp); } else #endif + { movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); + } } void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { + assert(!UseCompactObjectHeaders, "not with compact headers"); assert_different_registers(src, tmp); assert_different_registers(dst, tmp); #ifdef _LP64 @@ -5972,6 +5993,41 @@ void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); } +void MacroAssembler::cmp_klass(Register klass, Register obj, Register tmp) { +#ifdef _LP64 + if (UseCompactObjectHeaders) { + assert(tmp != noreg, "need tmp"); + assert_different_registers(klass, obj, tmp); + load_narrow_klass_compact(tmp, obj); + cmpl(klass, tmp); + } else if (UseCompressedClassPointers) { + cmpl(klass, Address(obj, oopDesc::klass_offset_in_bytes())); + } else +#endif + { + cmpptr(klass, Address(obj, oopDesc::klass_offset_in_bytes())); + } +} + +void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) { +#ifdef _LP64 + if (UseCompactObjectHeaders) { + assert(tmp2 != noreg, "need tmp2"); + assert_different_registers(obj1, obj2, tmp1, tmp2); + load_narrow_klass_compact(tmp1, obj1); + load_narrow_klass_compact(tmp2, obj2); + cmpl(tmp1, tmp2); + } else if (UseCompressedClassPointers) { + movl(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); + cmpl(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes())); + } else +#endif + { + movptr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); + cmpptr(tmp1, Address(obj2, oopDesc::klass_offset_in_bytes())); + } +} + void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, Register tmp1, Register thread_tmp) { BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); @@ -6019,6 +6075,7 @@ void MacroAssembler::store_heap_oop_null(Address dst) { #ifdef _LP64 void MacroAssembler::store_klass_gap(Register dst, Register src) { + assert(!UseCompactObjectHeaders, "Don't use with compact headers"); if (UseCompressedClassPointers) { // Store to klass gap in destination movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); @@ -6183,8 +6240,7 @@ void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { subq(r, tmp); } if (CompressedKlassPointers::shift() != 0) { - assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); - shrq(r, LogKlassAlignmentInBytes); + shrq(r, CompressedKlassPointers::shift()); } } @@ -6197,8 +6253,7 @@ void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) movptr(dst, src); } if (CompressedKlassPointers::shift() != 0) { - assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); - shrq(dst, LogKlassAlignmentInBytes); + shrq(dst, CompressedKlassPointers::shift()); } } @@ -6210,8 +6265,7 @@ void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { // vtableStubs also counts instructions in pd_code_size_limit. // Also do not verify_oop as this is called by verify_oop. if (CompressedKlassPointers::shift() != 0) { - assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); - shlq(r, LogKlassAlignmentInBytes); + shlq(r, CompressedKlassPointers::shift()); } if (CompressedKlassPointers::base() != nullptr) { mov64(tmp, (int64_t)CompressedKlassPointers::base()); @@ -6233,17 +6287,28 @@ void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) // a pointer that needs nothing but a register rename. movl(dst, src); } else { - if (CompressedKlassPointers::base() != nullptr) { - mov64(dst, (int64_t)CompressedKlassPointers::base()); - } else { - xorq(dst, dst); - } - if (CompressedKlassPointers::shift() != 0) { - assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); - assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); - leaq(dst, Address(dst, src, Address::times_8, 0)); + if (CompressedKlassPointers::shift() <= Address::times_8) { + if (CompressedKlassPointers::base() != nullptr) { + mov64(dst, (int64_t)CompressedKlassPointers::base()); + } else { + xorq(dst, dst); + } + if (CompressedKlassPointers::shift() != 0) { + assert(CompressedKlassPointers::shift() == Address::times_8, "klass not aligned on 64bits?"); + leaq(dst, Address(dst, src, Address::times_8, 0)); + } else { + addq(dst, src); + } } else { + if (CompressedKlassPointers::base() != nullptr) { + const uint64_t base_right_shifted = + (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); + mov64(dst, base_right_shifted); + } else { + xorq(dst, dst); + } addq(dst, src); + shlq(dst, CompressedKlassPointers::shift()); } } } diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp index 618ec87da86..42c2d6b64cd 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp @@ -363,9 +363,20 @@ class MacroAssembler: public Assembler { void load_method_holder(Register holder, Register method); // oop manipulations +#ifdef _LP64 + void load_narrow_klass_compact(Register dst, Register src); +#endif void load_klass(Register dst, Register src, Register tmp); void store_klass(Register dst, Register src, Register tmp); + // Compares the Klass pointer of an object to a given Klass (which might be narrow, + // depending on UseCompressedClassPointers). + void cmp_klass(Register klass, Register obj, Register tmp); + + // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags. + // Uses tmp1 and tmp2 as temporary registers. + void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2); + void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, Register tmp1, Register thread_tmp); void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, diff --git a/src/hotspot/cpu/x86/matcher_x86.hpp b/src/hotspot/cpu/x86/matcher_x86.hpp index 192e959451f..3d8b0ed092f 100644 --- a/src/hotspot/cpu/x86/matcher_x86.hpp +++ b/src/hotspot/cpu/x86/matcher_x86.hpp @@ -93,7 +93,7 @@ static bool narrow_klass_use_complex_address() { NOT_LP64(ShouldNotCallThis();) assert(UseCompressedClassPointers, "only for compressed klass code"); - return (LogKlassAlignmentInBytes <= 3); + return (CompressedKlassPointers::shift() <= 3); } // Prefer ConN+DecodeN over ConP. diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.hpp b/src/hotspot/cpu/x86/stubRoutines_x86.hpp index b5ed3719897..f866423d123 100644 --- a/src/hotspot/cpu/x86/stubRoutines_x86.hpp +++ b/src/hotspot/cpu/x86/stubRoutines_x86.hpp @@ -37,7 +37,7 @@ enum platform_dependent_constants { _continuation_stubs_code_size = 1000 LP64_ONLY(+1000), // AVX512 intrinsics add more code in 64-bit VM, // Windows have more code to save/restore registers - _compiler_stubs_code_size = 20000 LP64_ONLY(+46000) WINDOWS_ONLY(+2000), + _compiler_stubs_code_size = 20000 LP64_ONLY(+47000) WINDOWS_ONLY(+2000), _final_stubs_code_size = 10000 LP64_ONLY(+20000) WINDOWS_ONLY(+2000) ZGC_ONLY(+20000) }; diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp index 527d961259e..441e4c8a0b8 100644 --- a/src/hotspot/cpu/x86/templateTable_x86.cpp +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp @@ -4084,7 +4084,12 @@ void TemplateTable::_new() { // The object is initialized before the header. If the object size is // zero, go directly to the header initialization. - __ decrement(rdx, sizeof(oopDesc)); + if (UseCompactObjectHeaders) { + assert(is_aligned(oopDesc::base_offset_in_bytes(), BytesPerLong), "oop base offset must be 8-byte-aligned"); + __ decrement(rdx, oopDesc::base_offset_in_bytes()); + } else { + __ decrement(rdx, sizeof(oopDesc)); + } __ jcc(Assembler::zero, initialize_header); // Initialize topmost object field, divide rdx by 8, check if odd and @@ -4106,22 +4111,30 @@ void TemplateTable::_new() { // initialize remaining object fields: rdx was a multiple of 8 { Label loop; __ bind(loop); - __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx); - NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx)); + int header_size_bytes = oopDesc::header_size() * HeapWordSize; + assert(is_aligned(header_size_bytes, BytesPerLong), "oop header size must be 8-byte-aligned"); + __ movptr(Address(rax, rdx, Address::times_8, header_size_bytes - 1*oopSize), rcx); + NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, header_size_bytes - 2*oopSize), rcx)); __ decrement(rdx); __ jcc(Assembler::notZero, loop); } // initialize object header only. __ bind(initialize_header); - __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), - (intptr_t)markWord::prototype().value()); // header - __ pop(rcx); // get saved klass back in the register. + if (UseCompactObjectHeaders) { + __ pop(rcx); // get saved klass back in the register. + __ movptr(rbx, Address(rcx, Klass::prototype_header_offset())); + __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rbx); + } else { + __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), + (intptr_t)markWord::prototype().value()); // header + __ pop(rcx); // get saved klass back in the register. #ifdef _LP64 - __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code) - __ store_klass_gap(rax, rsi); // zero klass gap for compressed oops + __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code) + __ store_klass_gap(rax, rsi); // zero klass gap for compressed oops #endif - __ store_klass(rax, rcx, rscratch1); // klass + __ store_klass(rax, rcx, rscratch1); // klass + } if (DTraceAllocProbes) { // Trigger dtrace event for fastpath diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad index b4eb4c313d3..fc083ecfa24 100644 --- a/src/hotspot/cpu/x86/x86_64.ad +++ b/src/hotspot/cpu/x86/x86_64.ad @@ -4351,6 +4351,7 @@ instruct loadKlass(rRegP dst, memory mem) // Load narrow Klass Pointer instruct loadNKlass(rRegN dst, memory mem) %{ + predicate(!UseCompactObjectHeaders); match(Set dst (LoadNKlass mem)); ins_cost(125); // XXX @@ -4361,6 +4362,19 @@ instruct loadNKlass(rRegN dst, memory mem) ins_pipe(ialu_reg_mem); // XXX %} +instruct loadNKlassCompactHeaders(rRegN dst, memory mem, rFlagsReg cr) +%{ + predicate(UseCompactObjectHeaders); + match(Set dst (LoadNKlass mem)); + effect(KILL cr); + ins_cost(125); // XXX + format %{ "load_narrow_klass_compact $dst, $mem\t# compressed klass ptr" %} + ins_encode %{ + __ load_narrow_klass_compact_c2($dst$$Register, $mem$$Address); + %} + ins_pipe(pipe_slow); // XXX +%} + // Load Float instruct loadF(regF dst, memory mem) %{ @@ -11665,6 +11679,7 @@ instruct compN_rReg_imm_klass(rFlagsRegU cr, rRegN op1, immNKlass op2) %{ instruct compN_mem_imm_klass(rFlagsRegU cr, memory mem, immNKlass src) %{ + predicate(!UseCompactObjectHeaders); match(Set cr (CmpN src (LoadNKlass mem))); format %{ "cmpl $mem, $src\t# compressed klass ptr" %} diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp index d68ce10d1c1..a181257a519 100644 --- a/src/hotspot/share/cds/archiveBuilder.cpp +++ b/src/hotspot/share/cds/archiveBuilder.cpp @@ -227,8 +227,10 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re if (!is_excluded(klass)) { _klasses->append(klass); } - // See RunTimeClassInfo::get_for() - _estimated_metaspaceobj_bytes += align_up(BytesPerWord, SharedSpaceObjectAlignment); + // See RunTimeClassInfo::get_for(): make sure we have enough space for both maximum + // Klass alignment as well as the RuntimeInfo* pointer we will embed in front of a Klass. + _estimated_metaspaceobj_bytes += align_up(BytesPerWord, CompressedKlassPointers::klass_alignment_in_bytes()) + + align_up(sizeof(void*), SharedSpaceObjectAlignment); } else if (ref->msotype() == MetaspaceObj::SymbolType) { // Make sure the symbol won't be GC'ed while we are dumping the archive. Symbol* sym = (Symbol*)ref->obj(); @@ -661,7 +663,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s oldtop = dump_region->top(); if (src_info->msotype() == MetaspaceObj::ClassType) { - // Save a pointer immediate in front of an InstanceKlass, so + // Allocate space for a pointer directly in front of the future InstanceKlass, so // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo* // without building another hashtable. See RunTimeClassInfo::get_for() // in systemDictionaryShared.cpp. @@ -670,8 +672,19 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); dump_region->allocate(sizeof(address)); } + // Allocate space for the future InstanceKlass with proper alignment + const size_t alignment = +#ifdef _LP64 + UseCompressedClassPointers ? + nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift()) : + SharedSpaceObjectAlignment; +#else + SharedSpaceObjectAlignment; +#endif + dest = dump_region->allocate(bytes, alignment); + } else { + dest = dump_region->allocate(bytes); } - dest = dump_region->allocate(bytes); newtop = dump_region->top(); memcpy(dest, src, bytes); @@ -702,6 +715,8 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s src_info->set_buffered_addr((address)dest); _alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only()); + + DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only())); } // This is used by code that hand-assembles data structures, such as the LambdaProxyClassKey, that are @@ -780,6 +795,15 @@ void ArchiveBuilder::make_klasses_shareable() { const char* generated = ""; Klass* k = get_buffered_addr(klasses()->at(i)); k->remove_java_mirror(); +#ifdef _LP64 + if (UseCompactObjectHeaders) { + Klass* requested_k = to_requested(k); + address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start + const int narrow_klass_shift = precomputed_narrow_klass_shift(); + narrowKlass nk = CompressedKlassPointers::encode_not_null_without_asserts(requested_k, narrow_klass_base, narrow_klass_shift); + k->set_prototype_header(markWord::prototype().set_narrow_klass(nk)); + } +#endif //_LP64 if (k->is_objArray_klass()) { // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info // on their array classes. @@ -884,9 +908,15 @@ narrowKlass ArchiveBuilder::get_requested_narrow_klass(Klass* k) { assert(CDSConfig::is_dumping_heap(), "sanity"); k = get_buffered_klass(k); Klass* requested_k = to_requested(k); + const int narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift(); +#ifdef ASSERT + const size_t klass_alignment = MAX2(SharedSpaceObjectAlignment, (size_t)nth_bit(narrow_klass_shift)); + assert(is_aligned(k, klass_alignment), "Klass " PTR_FORMAT " misaligned.", p2i(k)); +#endif address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start - const int narrow_klass_shift = ArchiveHeapWriter::precomputed_narrow_klass_shift; - return CompressedKlassPointers::encode_not_null(requested_k, narrow_klass_base, narrow_klass_shift); + // Note: use the "raw" version of encode that takes explicit narrow klass base and shift. Don't use any + // of the variants that do sanity checks, nor any of those that use the current - dump - JVM's encoding setting. + return CompressedKlassPointers::encode_not_null_without_asserts(requested_k, narrow_klass_base, narrow_klass_shift); } #endif // INCLUDE_CDS_JAVA_HEAP @@ -966,6 +996,20 @@ class RelocateBufferToRequested : public BitMapClosure { } }; +#ifdef _LP64 +int ArchiveBuilder::precomputed_narrow_klass_shift() { + // Legacy Mode: + // We use 32 bits for narrowKlass, which should cover the full 4G Klass range. Shift can be 0. + // CompactObjectHeader Mode: + // narrowKlass is much smaller, and we use the highest possible shift value to later get the maximum + // Klass encoding range. + // + // Note that all of this may change in the future, if we decide to correct the pre-calculated + // narrow Klass IDs at archive load time. + assert(UseCompressedClassPointers, "Only needed for compressed class pointers"); + return UseCompactObjectHeaders ? CompressedKlassPointers::max_shift() : 0; +} +#endif // _LP64 void ArchiveBuilder::relocate_to_requested() { ro_region()->pack(); diff --git a/src/hotspot/share/cds/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp index 4b3aa72611e..f306e4676b3 100644 --- a/src/hotspot/share/cds/archiveBuilder.hpp +++ b/src/hotspot/share/cds/archiveBuilder.hpp @@ -27,6 +27,7 @@ #include "cds/archiveUtils.hpp" #include "cds/dumpAllocStats.hpp" +#include "memory/metaspace.hpp" #include "memory/metaspaceClosure.hpp" #include "oops/array.hpp" #include "oops/klass.hpp" @@ -43,9 +44,9 @@ class Klass; class MemRegion; class Symbol; -// Metaspace::allocate() requires that all blocks must be aligned with KlassAlignmentInBytes. -// We enforce the same alignment rule in blocks allocated from the shared space. -const int SharedSpaceObjectAlignment = KlassAlignmentInBytes; +// The minimum alignment for non-Klass objects inside the CDS archive. Klass objects need +// to follow CompressedKlassPointers::klass_alignment_in_bytes(). +constexpr size_t SharedSpaceObjectAlignment = Metaspace::min_allocation_alignment_bytes; // Overview of CDS archive creation (for both static and dynamic dump): // @@ -468,6 +469,29 @@ public: void print_stats(); void report_out_of_space(const char* name, size_t needed_bytes); + +#ifdef _LP64 + // The CDS archive contains pre-computed narrow Klass IDs. It carries them in the headers of + // archived heap objects. With +UseCompactObjectHeaders, it also carries them in prototypes + // in Klass. + // When generating the archive, these narrow Klass IDs are computed using the following scheme: + // 1) The future encoding base is assumed to point to the first address of the generated mapping. + // That means that at runtime, the narrow Klass encoding must be set up with base pointing to + // the start address of the mapped CDS metadata archive (wherever that may be). This precludes + // zero-based encoding. + // 2) The shift must be large enough to result in an encoding range that covers the future assumed + // runtime Klass range. That future Klass range will contain both the CDS metadata archive and + // the future runtime class space. Since we do not know the size of the future class space, we + // need to chose an encoding base/shift combination that will result in a "large enough" size. + // The details depend on whether we use compact object headers or legacy object headers. + // In Legacy Mode, a narrow Klass ID is 32 bit. This gives us an encoding range size of 4G even + // with shift = 0, which is all we need. Therefore, we use a shift=0 for pre-calculating the + // narrow Klass IDs. + // TinyClassPointer Mode: + // We use the highest possible shift value to maximize the encoding range size. + static int precomputed_narrow_klass_shift(); +#endif // _LP64 + }; #endif // SHARE_CDS_ARCHIVEBUILDER_HPP diff --git a/src/hotspot/share/cds/archiveHeapWriter.cpp b/src/hotspot/share/cds/archiveHeapWriter.cpp index a55325978de..636b368117d 100644 --- a/src/hotspot/share/cds/archiveHeapWriter.cpp +++ b/src/hotspot/share/cds/archiveHeapWriter.cpp @@ -186,8 +186,12 @@ objArrayOop ArchiveHeapWriter::allocate_root_segment(size_t offset, int element_ memset(mem, 0, objArrayOopDesc::object_size(element_count)); // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize. - oopDesc::set_mark(mem, markWord::prototype()); - oopDesc::release_set_klass(mem, Universe::objectArrayKlass()); + if (UseCompactObjectHeaders) { + oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header()); + } else { + oopDesc::set_mark(mem, markWord::prototype()); + oopDesc::release_set_klass(mem, Universe::objectArrayKlass()); + } arrayOopDesc::set_length(mem, element_count); return objArrayOop(cast_to_oop(mem)); } @@ -350,9 +354,13 @@ HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, s Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass HeapWord* mem = offset_to_buffered_address(_buffer_used); memset(mem, 0, fill_bytes); - oopDesc::set_mark(mem, markWord::prototype()); narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak); - cast_to_oop(mem)->set_narrow_klass(nk); + if (UseCompactObjectHeaders) { + oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk)); + } else { + oopDesc::set_mark(mem, markWord::prototype()); + cast_to_oop(mem)->set_narrow_klass(nk); + } arrayOopDesc::set_length(mem, array_length); return mem; } @@ -556,7 +564,11 @@ void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop s address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop
(requested_obj)); oop fake_oop = cast_to_oop(buffered_addr); - fake_oop->set_narrow_klass(nk); + if (UseCompactObjectHeaders) { + fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk)); + } else { + fake_oop->set_narrow_klass(nk); + } if (src_obj == nullptr) { return; @@ -565,7 +577,11 @@ void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop s // in the shared heap. if (!src_obj->fast_no_hash_check()) { intptr_t src_hash = src_obj->identity_hash(); - fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash)); + if (UseCompactObjectHeaders) { + fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash)); + } else { + fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash)); + } assert(fake_oop->mark().is_unlocked(), "sanity"); DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash()); diff --git a/src/hotspot/share/cds/archiveHeapWriter.hpp b/src/hotspot/share/cds/archiveHeapWriter.hpp index 29ea50ba5fe..70c1207bb91 100644 --- a/src/hotspot/share/cds/archiveHeapWriter.hpp +++ b/src/hotspot/share/cds/archiveHeapWriter.hpp @@ -240,17 +240,6 @@ public: static oop buffered_addr_to_source_obj(address buffered_addr); static address buffered_addr_to_requested_addr(address buffered_addr); - // Archived heap object headers carry pre-computed narrow Klass ids calculated with the - // following scheme: - // 1) the encoding base must be the mapping start address. - // 2) shift must be large enough to result in an encoding range that covers the runtime Klass range. - // That Klass range is defined by CDS archive size and runtime class space size. Luckily, the maximum - // size can be predicted: archive size is assumed to be <1G, class space size capped at 3G, and at - // runtime we put both regions adjacent to each other. Therefore, runtime Klass range size < 4G. - // Since nKlass itself is 32 bit, our encoding range len is 4G, and since we set the base directly - // at mapping start, these 4G are enough. Therefore, we don't need to shift at all (shift=0). - static constexpr int precomputed_narrow_klass_shift = 0; - }; #endif // INCLUDE_CDS_JAVA_HEAP #endif // SHARE_CDS_ARCHIVEHEAPWRITER_HPP diff --git a/src/hotspot/share/cds/archiveUtils.cpp b/src/hotspot/share/cds/archiveUtils.cpp index 430de362f9b..c7282f7d97c 100644 --- a/src/hotspot/share/cds/archiveUtils.cpp +++ b/src/hotspot/share/cds/archiveUtils.cpp @@ -241,9 +241,10 @@ void DumpRegion::commit_to(char* newtop) { which, commit, _vs->actual_committed_size(), _vs->high()); } - -char* DumpRegion::allocate(size_t num_bytes) { - char* p = (char*)align_up(_top, (size_t)SharedSpaceObjectAlignment); +char* DumpRegion::allocate(size_t num_bytes, size_t alignment) { + // Always align to at least minimum alignment + alignment = MAX2(SharedSpaceObjectAlignment, alignment); + char* p = (char*)align_up(_top, alignment); char* newtop = p + align_up(num_bytes, (size_t)SharedSpaceObjectAlignment); expand_top_to(newtop); memset(p, 0, newtop - p); @@ -343,7 +344,7 @@ void ReadClosure::do_tag(int tag) { int old_tag; old_tag = (int)(intptr_t)nextPtr(); // do_int(&old_tag); - assert(tag == old_tag, "old tag doesn't match"); + assert(tag == old_tag, "tag doesn't match (%d, expected %d)", old_tag, tag); FileMapInfo::assert_mark(tag == old_tag); } diff --git a/src/hotspot/share/cds/archiveUtils.hpp b/src/hotspot/share/cds/archiveUtils.hpp index f00895078ef..7fb8e538084 100644 --- a/src/hotspot/share/cds/archiveUtils.hpp +++ b/src/hotspot/share/cds/archiveUtils.hpp @@ -158,10 +158,11 @@ private: public: DumpRegion(const char* name, uintx max_delta = 0) : _name(name), _base(nullptr), _top(nullptr), _end(nullptr), - _max_delta(max_delta), _is_packed(false) {} + _max_delta(max_delta), _is_packed(false), + _rs(NULL), _vs(NULL) {} char* expand_top_to(char* newtop); - char* allocate(size_t num_bytes); + char* allocate(size_t num_bytes, size_t alignment = 0); void append_intptr_t(intptr_t n, bool need_to_mark = false) NOT_CDS_RETURN; diff --git a/src/hotspot/share/cds/cdsConfig.cpp b/src/hotspot/share/cds/cdsConfig.cpp index 9e8a46e105e..691a1983732 100644 --- a/src/hotspot/share/cds/cdsConfig.cpp +++ b/src/hotspot/share/cds/cdsConfig.cpp @@ -82,13 +82,20 @@ char* CDSConfig::default_archive_path() { os::jvm_path(jvm_path, sizeof(jvm_path)); char *end = strrchr(jvm_path, *os::file_separator()); if (end != nullptr) *end = '\0'; - size_t jvm_path_len = strlen(jvm_path); - size_t file_sep_len = strlen(os::file_separator()); - const size_t len = jvm_path_len + file_sep_len + 20; - _default_archive_path = NEW_C_HEAP_ARRAY(char, len, mtArguments); - jio_snprintf(_default_archive_path, len, - LP64_ONLY(!UseCompressedOops ? "%s%sclasses_nocoops.jsa":) "%s%sclasses.jsa", - jvm_path, os::file_separator()); + stringStream tmp; + tmp.print("%s%sclasses", jvm_path, os::file_separator()); +#ifdef _LP64 + if (!UseCompressedOops) { + tmp.print_raw("_nocoops"); + } + if (UseCompactObjectHeaders) { + // Note that generation of xxx_coh.jsa variants require + // --enable-cds-archive-coh at build time + tmp.print_raw("_coh"); + } +#endif + tmp.print_raw(".jsa"); + _default_archive_path = os::strdup(tmp.base()); } return _default_archive_path; } diff --git a/src/hotspot/share/cds/dumpAllocStats.cpp b/src/hotspot/share/cds/dumpAllocStats.cpp index a1dac41e322..25351f78d84 100644 --- a/src/hotspot/share/cds/dumpAllocStats.cpp +++ b/src/hotspot/share/cds/dumpAllocStats.cpp @@ -115,3 +115,15 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all) { percent_of(_num_method_cp_entries_archived, _num_method_cp_entries), _num_method_cp_entries_reverted); } + +#ifdef ASSERT +void DumpAllocStats::verify(int expected_byte_size, bool read_only) const { + int bytes = 0; + const int what = (int)(read_only ? RO : RW); + for (int type = 0; type < int(_number_of_types); type ++) { + bytes += _bytes[what][type]; + } + assert(bytes == expected_byte_size, "counter mismatch (%s: %d vs %d)", + (read_only ? "RO" : "RW"), bytes, expected_byte_size); +} +#endif // ASSERT diff --git a/src/hotspot/share/cds/dumpAllocStats.hpp b/src/hotspot/share/cds/dumpAllocStats.hpp index 0332be73120..2ff81c52392 100644 --- a/src/hotspot/share/cds/dumpAllocStats.hpp +++ b/src/hotspot/share/cds/dumpAllocStats.hpp @@ -135,6 +135,9 @@ public: } void print_stats(int ro_all, int rw_all); + + DEBUG_ONLY(void verify(int expected_byte_size, bool read_only) const); + }; #endif // SHARE_CDS_DUMPALLOCSTATS_HPP diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp index 469f93d1e0c..5e88b3c47e4 100644 --- a/src/hotspot/share/cds/filemap.cpp +++ b/src/hotspot/share/cds/filemap.cpp @@ -55,6 +55,7 @@ #include "nmt/memTracker.hpp" #include "oops/compressedOops.hpp" #include "oops/compressedOops.inline.hpp" +#include "oops/compressedKlass.hpp" #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" @@ -204,6 +205,7 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment, _core_region_alignment = core_region_alignment; _obj_alignment = ObjectAlignmentInBytes; _compact_strings = CompactStrings; + _compact_headers = UseCompactObjectHeaders; if (CDSConfig::is_dumping_heap()) { _narrow_oop_mode = CompressedOops::mode(); _narrow_oop_base = CompressedOops::base(); @@ -211,6 +213,14 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment, } _compressed_oops = UseCompressedOops; _compressed_class_ptrs = UseCompressedClassPointers; + if (UseCompressedClassPointers) { +#ifdef _LP64 + _narrow_klass_pointer_bits = CompressedKlassPointers::narrow_klass_pointer_bits(); + _narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift(); +#endif + } else { + _narrow_klass_pointer_bits = _narrow_klass_shift = -1; + } _max_heap_size = MaxHeapSize; _use_optimized_module_handling = CDSConfig::is_using_optimized_module_handling(); _has_full_module_graph = CDSConfig::is_dumping_full_module_graph(); @@ -269,10 +279,13 @@ void FileMapHeader::print(outputStream* st) { st->print_cr("- narrow_oop_base: " INTPTR_FORMAT, p2i(_narrow_oop_base)); st->print_cr("- narrow_oop_shift %d", _narrow_oop_shift); st->print_cr("- compact_strings: %d", _compact_strings); + st->print_cr("- compact_headers: %d", _compact_headers); st->print_cr("- max_heap_size: " UINTX_FORMAT, _max_heap_size); st->print_cr("- narrow_oop_mode: %d", _narrow_oop_mode); st->print_cr("- compressed_oops: %d", _compressed_oops); st->print_cr("- compressed_class_ptrs: %d", _compressed_class_ptrs); + st->print_cr("- narrow_klass_pointer_bits: %d", _narrow_klass_pointer_bits); + st->print_cr("- narrow_klass_shift: %d", _narrow_klass_shift); st->print_cr("- cloned_vtables_offset: " SIZE_FORMAT_X, _cloned_vtables_offset); st->print_cr("- early_serialized_data_offset: " SIZE_FORMAT_X, _early_serialized_data_offset); st->print_cr("- serialized_data_offset: " SIZE_FORMAT_X, _serialized_data_offset); @@ -2083,22 +2096,23 @@ bool FileMapInfo::can_use_heap_region() { } // We pre-compute narrow Klass IDs with the runtime mapping start intended to be the base, and a shift of - // ArchiveHeapWriter::precomputed_narrow_klass_shift. We enforce this encoding at runtime (see + // ArchiveBuilder::precomputed_narrow_klass_shift. We enforce this encoding at runtime (see // CompressedKlassPointers::initialize_for_given_encoding()). Therefore, the following assertions must // hold: address archive_narrow_klass_base = (address)header()->mapped_base_address(); - const int archive_narrow_klass_shift = ArchiveHeapWriter::precomputed_narrow_klass_shift; + const int archive_narrow_klass_pointer_bits = header()->narrow_klass_pointer_bits(); + const int archive_narrow_klass_shift = header()->narrow_klass_shift(); log_info(cds)("CDS archive was created with max heap size = " SIZE_FORMAT "M, and the following configuration:", max_heap_size()/M); - log_info(cds)(" narrow_klass_base at mapping start address, narrow_klass_shift = %d", - archive_narrow_klass_shift); + log_info(cds)(" narrow_klass_base at mapping start address, narrow_klass_pointer_bits = %d, narrow_klass_shift = %d", + archive_narrow_klass_pointer_bits, archive_narrow_klass_shift); log_info(cds)(" narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d", narrow_oop_mode(), p2i(narrow_oop_base()), narrow_oop_shift()); log_info(cds)("The current max heap size = " SIZE_FORMAT "M, G1HeapRegion::GrainBytes = " SIZE_FORMAT, MaxHeapSize/M, G1HeapRegion::GrainBytes); - log_info(cds)(" narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d", - p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift()); + log_info(cds)(" narrow_klass_base = " PTR_FORMAT ", arrow_klass_pointer_bits = %d, narrow_klass_shift = %d", + p2i(CompressedKlassPointers::base()), CompressedKlassPointers::narrow_klass_pointer_bits(), CompressedKlassPointers::shift()); log_info(cds)(" narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d", CompressedOops::mode(), p2i(CompressedOops::base()), CompressedOops::shift()); log_info(cds)(" heap range = [" PTR_FORMAT " - " PTR_FORMAT "]", @@ -2107,10 +2121,35 @@ bool FileMapInfo::can_use_heap_region() { UseCompressedOops ? p2i(CompressedOops::end()) : UseG1GC ? p2i((address)G1CollectedHeap::heap()->reserved().end()) : 0L); - assert(archive_narrow_klass_base == CompressedKlassPointers::base(), "Unexpected encoding base encountered " - "(" PTR_FORMAT ", expected " PTR_FORMAT ")", p2i(CompressedKlassPointers::base()), p2i(archive_narrow_klass_base)); - assert(archive_narrow_klass_shift == CompressedKlassPointers::shift(), "Unexpected encoding shift encountered " - "(%d, expected %d)", CompressedKlassPointers::shift(), archive_narrow_klass_shift); + int err = 0; + if ( archive_narrow_klass_base != CompressedKlassPointers::base() || + (err = 1, archive_narrow_klass_pointer_bits != CompressedKlassPointers::narrow_klass_pointer_bits()) || + (err = 2, archive_narrow_klass_shift != CompressedKlassPointers::shift()) ) { + stringStream ss; + switch (err) { + case 0: + ss.print("Unexpected encoding base encountered (" PTR_FORMAT ", expected " PTR_FORMAT ")", + p2i(CompressedKlassPointers::base()), p2i(archive_narrow_klass_base)); + break; + case 1: + ss.print("Unexpected narrow Klass bit length encountered (%d, expected %d)", + CompressedKlassPointers::narrow_klass_pointer_bits(), archive_narrow_klass_pointer_bits); + break; + case 2: + ss.print("Unexpected narrow Klass shift encountered (%d, expected %d)", + CompressedKlassPointers::shift(), archive_narrow_klass_shift); + break; + default: + ShouldNotReachHere(); + }; + LogTarget(Info, cds) lt; + if (lt.is_enabled()) { + LogStream ls(lt); + ls.print_raw(ss.base()); + header()->print(&ls); + } + assert(false, "%s", ss.base()); + } return true; } @@ -2482,14 +2521,22 @@ bool FileMapHeader::validate() { "for testing purposes only and should not be used in a production environment"); } - log_info(cds)("Archive was created with UseCompressedOops = %d, UseCompressedClassPointers = %d", - compressed_oops(), compressed_class_pointers()); + log_info(cds)("Archive was created with UseCompressedOops = %d, UseCompressedClassPointers = %d, UseCompactObjectHeaders = %d", + compressed_oops(), compressed_class_pointers(), compact_headers()); if (compressed_oops() != UseCompressedOops || compressed_class_pointers() != UseCompressedClassPointers) { - log_info(cds)("Unable to use shared archive.\nThe saved state of UseCompressedOops and UseCompressedClassPointers is " + log_warning(cds)("Unable to use shared archive.\nThe saved state of UseCompressedOops and UseCompressedClassPointers is " "different from runtime, CDS will be disabled."); return false; } + if (compact_headers() != UseCompactObjectHeaders) { + log_warning(cds)("Unable to use shared archive.\nThe shared archive file's UseCompactObjectHeaders setting (%s)" + " does not equal the current UseCompactObjectHeaders setting (%s).", + _compact_headers ? "enabled" : "disabled", + UseCompactObjectHeaders ? "enabled" : "disabled"); + return false; + } + if (!_use_optimized_module_handling) { CDSConfig::stop_using_optimized_module_handling(); log_info(cds)("optimized module handling: disabled because archive was created without optimized module handling"); diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp index f3aa389c30e..6a15ff03c3a 100644 --- a/src/hotspot/share/cds/filemap.hpp +++ b/src/hotspot/share/cds/filemap.hpp @@ -188,10 +188,13 @@ private: address _narrow_oop_base; // compressed oop encoding base int _narrow_oop_shift; // compressed oop encoding shift bool _compact_strings; // value of CompactStrings + bool _compact_headers; // value of UseCompactObjectHeaders uintx _max_heap_size; // java max heap size during dumping CompressedOops::Mode _narrow_oop_mode; // compressed oop encoding mode bool _compressed_oops; // save the flag UseCompressedOops bool _compressed_class_ptrs; // save the flag UseCompressedClassPointers + int _narrow_klass_pointer_bits; // save number of bits in narrowKlass + int _narrow_klass_shift; // save shift width used to pre-compute narrowKlass IDs in archived heap objects size_t _cloned_vtables_offset; // The address of the first cloned vtable size_t _early_serialized_data_offset; // Data accessed using {ReadClosure,WriteClosure}::serialize() size_t _serialized_data_offset; // Data accessed using {ReadClosure,WriteClosure}::serialize() @@ -259,6 +262,7 @@ public: address narrow_oop_base() const { return _narrow_oop_base; } int narrow_oop_shift() const { return _narrow_oop_shift; } bool compact_strings() const { return _compact_strings; } + bool compact_headers() const { return _compact_headers; } uintx max_heap_size() const { return _max_heap_size; } CompressedOops::Mode narrow_oop_mode() const { return _narrow_oop_mode; } char* cloned_vtables() const { return from_mapped_offset(_cloned_vtables_offset); } @@ -271,6 +275,8 @@ public: bool has_non_jar_in_classpath() const { return _has_non_jar_in_classpath; } bool compressed_oops() const { return _compressed_oops; } bool compressed_class_pointers() const { return _compressed_class_ptrs; } + int narrow_klass_pointer_bits() const { return _narrow_klass_pointer_bits; } + int narrow_klass_shift() const { return _narrow_klass_shift; } HeapRootSegments heap_root_segments() const { return _heap_root_segments; } bool has_full_module_graph() const { return _has_full_module_graph; } size_t heap_oopmap_start_pos() const { return _heap_oopmap_start_pos; } diff --git a/src/hotspot/share/cds/metaspaceShared.cpp b/src/hotspot/share/cds/metaspaceShared.cpp index 1d17fb7cfe3..3cb1374efb1 100644 --- a/src/hotspot/share/cds/metaspaceShared.cpp +++ b/src/hotspot/share/cds/metaspaceShared.cpp @@ -87,6 +87,7 @@ #include "utilities/align.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/defaultStream.hpp" +#include "utilities/macros.hpp" #include "utilities/ostream.hpp" #include "utilities/resourceHash.hpp" @@ -1247,19 +1248,25 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File address cds_base = (address)static_mapinfo->mapped_base(); address ccs_end = (address)class_space_rs.end(); assert(ccs_end > cds_base, "Sanity check"); -#if INCLUDE_CDS_JAVA_HEAP - // We archived objects with pre-computed narrow Klass id. Set up encoding such that these Ids stay valid. - address precomputed_narrow_klass_base = cds_base; - const int precomputed_narrow_klass_shift = ArchiveHeapWriter::precomputed_narrow_klass_shift; - CompressedKlassPointers::initialize_for_given_encoding( - cds_base, ccs_end - cds_base, // Klass range - precomputed_narrow_klass_base, precomputed_narrow_klass_shift // precomputed encoding, see ArchiveHeapWriter + if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) { + // The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time: + // - every archived java object header (only if INCLUDE_CDS_JAVA_HEAP) + // - every archived Klass' prototype (only if +UseCompactObjectHeaders) + // + // In order for those IDs to still be valid, we need to dictate base and shift: base should be the + // mapping start, shift the shift used at archive generation time. + address precomputed_narrow_klass_base = cds_base; + const int precomputed_narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift(); + CompressedKlassPointers::initialize_for_given_encoding( + cds_base, ccs_end - cds_base, // Klass range + precomputed_narrow_klass_base, precomputed_narrow_klass_shift // precomputed encoding, see ArchiveBuilder ); -#else - CompressedKlassPointers::initialize ( - cds_base, ccs_end - cds_base // Klass range - ); -#endif // INCLUDE_CDS_JAVA_HEAP + } else { + // Let JVM freely chose encoding base and shift + CompressedKlassPointers::initialize ( + cds_base, ccs_end - cds_base // Klass range + ); + } // map_or_load_heap_region() compares the current narrow oop and klass encodings // with the archived ones, so it must be done after all encodings are determined. static_mapinfo->map_or_load_heap_region(); @@ -1314,7 +1321,7 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File // // If UseCompressedClassPointers=1, the range encompassing both spaces will be // suitable to en/decode narrow Klass pointers: the base will be valid for -// encoding, the range [Base, End) not surpass KlassEncodingMetaspaceMax. +// encoding, the range [Base, End) and not surpass the max. range for that encoding. // // Return: // @@ -1435,7 +1442,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma } else { // We did not manage to reserve at the preferred address, or were instructed to relocate. In that // case we reserve wherever possible, but the start address needs to be encodable as narrow Klass - // encoding base since the archived heap objects contain nKlass IDs pre-calculated toward the start + // encoding base since the archived heap objects contain narrow Klass IDs pre-calculated toward the start // of the shared Metaspace. That prevents us from using zero-based encoding and therefore we won't // try allocating in low-address regions. total_space_rs = Metaspace::reserve_address_space_for_compressed_classes(total_range_size, false /* optimize_for_zero_base */); diff --git a/src/hotspot/share/ci/ciKlass.cpp b/src/hotspot/share/ci/ciKlass.cpp index efdd2512f90..f65d4a0311c 100644 --- a/src/hotspot/share/ci/ciKlass.cpp +++ b/src/hotspot/share/ci/ciKlass.cpp @@ -258,3 +258,23 @@ const char* ciKlass::external_name() const { return get_Klass()->external_name(); ) } + +// ------------------------------------------------------------------ +// ciKlass::prototype_header_offset +juint ciKlass::prototype_header_offset() { + assert(is_loaded(), "must be loaded"); + + VM_ENTRY_MARK; + Klass* this_klass = get_Klass(); + return in_bytes(this_klass->prototype_header_offset()); +} + +// ------------------------------------------------------------------ +// ciKlass::prototype_header +uintptr_t ciKlass::prototype_header() { + assert(is_loaded(), "must be loaded"); + + VM_ENTRY_MARK; + Klass* this_klass = get_Klass(); + return (uintptr_t)this_klass->prototype_header().to_pointer(); +} diff --git a/src/hotspot/share/ci/ciKlass.hpp b/src/hotspot/share/ci/ciKlass.hpp index 10d8395ed7f..7b8d871eb56 100644 --- a/src/hotspot/share/ci/ciKlass.hpp +++ b/src/hotspot/share/ci/ciKlass.hpp @@ -139,6 +139,9 @@ public: void print_name_on(outputStream* st); const char* external_name() const; + + juint prototype_header_offset(); + uintptr_t prototype_header(); }; #endif // SHARE_CI_CIKLASS_HPP diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp index 93dd0af65e7..eb2d1b684d2 100644 --- a/src/hotspot/share/classfile/classFileParser.cpp +++ b/src/hotspot/share/classfile/classFileParser.cpp @@ -5836,6 +5836,15 @@ bool ClassFileParser::is_java_lang_ref_Reference_subclass() const { return _super_klass->reference_type() != REF_NONE; } +// Returns true if the future Klass will need to be addressable with a narrow Klass ID. +bool ClassFileParser::klass_needs_narrow_id() const { + // Classes that are never instantiated need no narrow Klass Id, since the + // only point of having a narrow id is to put it into an object header. Keeping + // never instantiated classes out of class space lessens the class space pressure. + // For more details, see JDK-8338526. + return !is_interface() && !is_abstract(); +} + // ---------------------------------------------------------------------------- // debugging diff --git a/src/hotspot/share/classfile/classFileParser.hpp b/src/hotspot/share/classfile/classFileParser.hpp index a8a388f8ded..18cdeec7d8e 100644 --- a/src/hotspot/share/classfile/classFileParser.hpp +++ b/src/hotspot/share/classfile/classFileParser.hpp @@ -511,6 +511,10 @@ class ClassFileParser { bool is_interface() const { return _access_flags.is_interface(); } bool is_abstract() const { return _access_flags.is_abstract(); } + // Returns true if the Klass to be generated will need to be addressable + // with a narrow Klass ID. + bool klass_needs_narrow_id() const; + ClassLoaderData* loader_data() const { return _loader_data; } const Symbol* class_name() const { return _class_name; } const InstanceKlass* super_klass() const { return _super_klass; } diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp index 5b4e6163bc1..08f224d969f 100644 --- a/src/hotspot/share/classfile/systemDictionaryShared.cpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp @@ -59,6 +59,7 @@ #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "oops/compressedKlass.inline.hpp" #include "oops/instanceKlass.hpp" #include "oops/klass.inline.hpp" #include "oops/objArrayKlass.hpp" @@ -83,6 +84,16 @@ DumpTimeLambdaProxyClassDictionary* SystemDictionaryShared::_dumptime_lambda_pro // Used by NoClassLoadingMark DEBUG_ONLY(bool SystemDictionaryShared::_class_loading_may_happen = true;) +#ifdef ASSERT +static void check_klass_after_loading(const Klass* k) { +#ifdef _LP64 + if (k != nullptr && UseCompressedClassPointers && k->needs_narrow_id()) { + CompressedKlassPointers::check_encodable(k); + } +#endif +} +#endif + InstanceKlass* SystemDictionaryShared::load_shared_class_for_builtin_loader( Symbol* class_name, Handle class_loader, TRAPS) { assert(CDSConfig::is_using_archive(), "must be"); @@ -425,6 +436,9 @@ InstanceKlass* SystemDictionaryShared::find_or_load_shared_class( } } } + + DEBUG_ONLY(check_klass_after_loading(k);) + return k; } @@ -1345,7 +1359,7 @@ InstanceKlass* SystemDictionaryShared::find_builtin_class(Symbol* name) { name); if (record != nullptr) { assert(!record->klass()->is_hidden(), "hidden class cannot be looked up by name"); - assert(check_alignment(record->klass()), "Address not aligned"); + DEBUG_ONLY(check_klass_after_loading(record->klass());) // We did not save the classfile data of the generated LambdaForm invoker classes, // so we cannot support CLFH for such classes. if (record->klass()->is_generated_shared_class() && JvmtiExport::should_post_class_file_load_hook()) { diff --git a/src/hotspot/share/gc/g1/g1Arguments.cpp b/src/hotspot/share/gc/g1/g1Arguments.cpp index 3d4ce0d780d..35dfbb7290e 100644 --- a/src/hotspot/share/gc/g1/g1Arguments.cpp +++ b/src/hotspot/share/gc/g1/g1Arguments.cpp @@ -34,6 +34,7 @@ #include "gc/g1/g1HeapRegionRemSet.hpp" #include "gc/g1/g1HeapVerifier.hpp" #include "gc/shared/cardTable.hpp" +#include "gc/shared/fullGCForwarding.hpp" #include "gc/shared/gcArguments.hpp" #include "gc/shared/workerPolicy.hpp" #include "runtime/globals.hpp" @@ -247,6 +248,7 @@ void G1Arguments::initialize() { void G1Arguments::initialize_heap_flags_and_sizes() { GCArguments::initialize_heap_flags_and_sizes(); + FullGCForwarding::initialize_flags(heap_reserved_size_bytes()); } CollectedHeap* G1Arguments::create_heap() { diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index 57236d6f6db..daf7eb5371b 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -77,6 +77,7 @@ #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp" #include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/concurrentGCBreakpoints.hpp" +#include "gc/shared/fullGCForwarding.hpp" #include "gc/shared/gcBehaviours.hpp" #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcId.hpp" @@ -85,7 +86,6 @@ #include "gc/shared/isGCActiveMark.hpp" #include "gc/shared/locationPrinter.inline.hpp" #include "gc/shared/oopStorageParState.hpp" -#include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/referenceProcessor.inline.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/shared/taskqueue.inline.hpp" @@ -1435,6 +1435,8 @@ jint G1CollectedHeap::initialize() { G1InitLogger::print(); + FullGCForwarding::initialize(heap_rs.region()); + return JNI_OK; } diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp index 05f66959243..bee3656ead5 100644 --- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "gc/g1/g1FullGCCompactionPoint.hpp" #include "gc/g1/g1FullGCCompactTask.hpp" #include "gc/g1/g1HeapRegion.inline.hpp" +#include "gc/shared/fullGCForwarding.inline.hpp" #include "gc/shared/gcTraceTime.inline.hpp" #include "logging/log.hpp" #include "oops/oop.inline.hpp" @@ -41,7 +42,7 @@ void G1FullGCCompactTask::G1CompactRegionClosure::clear_in_bitmap(oop obj) { size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) { size_t size = obj->size(); - if (obj->is_forwarded()) { + if (FullGCForwarding::is_forwarded(obj)) { G1FullGCCompactTask::copy_object_to_new_location(obj); } @@ -52,13 +53,13 @@ size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) { } void G1FullGCCompactTask::copy_object_to_new_location(oop obj) { - assert(obj->is_forwarded(), "Sanity!"); - assert(obj->forwardee() != obj, "Object must have a new location"); + assert(FullGCForwarding::is_forwarded(obj), "Sanity!"); + assert(FullGCForwarding::forwardee(obj) != obj, "Object must have a new location"); size_t size = obj->size(); // Copy object and reinit its mark. HeapWord* obj_addr = cast_from_oop(obj); - HeapWord* destination = cast_from_oop(obj->forwardee()); + HeapWord* destination = cast_from_oop(FullGCForwarding::forwardee(obj)); Copy::aligned_conjoint_words(obj_addr, destination, size); // There is no need to transform stack chunks - marking already did that. @@ -121,7 +122,7 @@ void G1FullGCCompactTask::compact_humongous_obj(G1HeapRegion* src_hr) { size_t word_size = obj->size(); uint num_regions = (uint)G1CollectedHeap::humongous_obj_size_in_regions(word_size); - HeapWord* destination = cast_from_oop(obj->forwardee()); + HeapWord* destination = cast_from_oop(FullGCForwarding::forwardee(obj)); assert(collector()->mark_bitmap()->is_marked(obj), "Should only compact marked objects"); collector()->mark_bitmap()->clear(obj); diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp index 019484c810a..ddd1b7c0999 100644 --- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "gc/g1/g1FullCollector.inline.hpp" #include "gc/g1/g1FullGCCompactionPoint.hpp" #include "gc/g1/g1HeapRegion.hpp" +#include "gc/shared/fullGCForwarding.inline.hpp" #include "gc/shared/preservedMarks.inline.hpp" #include "oops/oop.inline.hpp" #include "utilities/debug.hpp" @@ -106,10 +107,10 @@ void G1FullGCCompactionPoint::forward(oop object, size_t size) { if (!object->is_forwarded()) { preserved_stack()->push_if_necessary(object, object->mark()); } - object->forward_to(cast_to_oop(_compaction_top)); - assert(object->is_forwarded(), "must be forwarded"); + FullGCForwarding::forward_to(object, cast_to_oop(_compaction_top)); + assert(FullGCForwarding::is_forwarded(object), "must be forwarded"); } else { - assert(!object->is_forwarded(), "must not be forwarded"); + assert(!FullGCForwarding::is_forwarded(object), "must not be forwarded"); } // Update compaction values. @@ -172,8 +173,8 @@ void G1FullGCCompactionPoint::forward_humongous(G1HeapRegion* hr) { preserved_stack()->push_if_necessary(obj, obj->mark()); G1HeapRegion* dest_hr = _compaction_regions->at(range_begin); - obj->forward_to(cast_to_oop(dest_hr->bottom())); - assert(obj->is_forwarded(), "Object must be forwarded!"); + FullGCForwarding::forward_to(obj, cast_to_oop(dest_hr->bottom())); + assert(FullGCForwarding::is_forwarded(obj), "Object must be forwarded!"); // Add the humongous object regions to the compaction point. add_humongous(hr); diff --git a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp index f10f884b242..b20593ac290 100644 --- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ #include "gc/g1/g1FullCollector.inline.hpp" #include "gc/g1/g1FullGCMarker.inline.hpp" #include "gc/g1/g1HeapRegionRemSet.inline.hpp" +#include "gc/shared/fullGCForwarding.inline.hpp" #include "memory/iterator.inline.hpp" #include "memory/universe.hpp" #include "oops/access.inline.hpp" @@ -65,8 +66,8 @@ template inline void G1AdjustClosure::adjust_pointer(T* p) { return; } - if (obj->is_forwarded()) { - oop forwardee = obj->forwardee(); + if (FullGCForwarding::is_forwarded(obj)) { + oop forwardee = FullGCForwarding::forwardee(obj); // Forwarded, just update. assert(G1CollectedHeap::heap()->is_in_reserved(forwardee), "should be in object space"); RawAccess::oop_store(p, forwardee); diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp index 2fb61cc5934..1d2a023ed3a 100644 --- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp +++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ #include "gc/g1/g1FullGCCompactionPoint.hpp" #include "gc/g1/g1FullGCScope.hpp" #include "gc/g1/g1HeapRegion.inline.hpp" +#include "gc/shared/fullGCForwarding.inline.hpp" void G1DetermineCompactionQueueClosure::free_empty_humongous_region(G1HeapRegion* hr) { _g1h->free_humongous_region(hr, nullptr); @@ -114,10 +115,10 @@ inline bool G1DetermineCompactionQueueClosure::do_heap_region(G1HeapRegion* hr) } inline size_t G1SerialRePrepareClosure::apply(oop obj) { - if (obj->is_forwarded()) { + if (FullGCForwarding::is_forwarded(obj)) { // We skip objects compiled into the first region or // into regions not part of the serial compaction point. - if (cast_from_oop(obj->forwardee()) < _dense_prefix_top) { + if (cast_from_oop(FullGCForwarding::forwardee(obj)) < _dense_prefix_top) { return obj->size(); } } diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp index 32a56d71205..9e48a16018e 100644 --- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp +++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -105,7 +105,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) : _gc_par_phases[UpdateDerivedPointers] = new WorkerDataArray("UpdateDerivedPointers", "Update Derived Pointers (ms):", max_gc_threads); #endif _gc_par_phases[EagerlyReclaimHumongousObjects] = new WorkerDataArray("EagerlyReclaimHumongousObjects", "Eagerly Reclaim Humongous Objects (ms):", max_gc_threads); - _gc_par_phases[RestorePreservedMarks] = new WorkerDataArray("RestorePreservedMarks", "Restore Preserved Marks (ms):", max_gc_threads); _gc_par_phases[ProcessEvacuationFailedRegions] = new WorkerDataArray("ProcessEvacuationFailedRegions", "Process Evacuation Failed Regions (ms):", max_gc_threads); _gc_par_phases[ScanHR]->create_thread_work_items("Scanned Cards:", ScanHRScannedCards); @@ -512,7 +511,6 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set(bool evacuation_failed debug_time("Post Evacuate Cleanup 2", _cur_post_evacuate_cleanup_2_time_ms); if (evacuation_failed) { debug_phase(_gc_par_phases[RecalculateUsed], 1); - debug_phase(_gc_par_phases[RestorePreservedMarks], 1); debug_phase(_gc_par_phases[ProcessEvacuationFailedRegions], 1); } #if COMPILER2_OR_JVMCI diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp index 40abfd60533..a54ef431abd 100644 --- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp +++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -87,7 +87,6 @@ class G1GCPhaseTimes : public CHeapObj { UpdateDerivedPointers, #endif EagerlyReclaimHumongousObjects, - RestorePreservedMarks, ProcessEvacuationFailedRegions, ResetMarkingState, NoteStartOfMark, diff --git a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp index 26cc88de0be..5b3bbedfeb2 100644 --- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp +++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp @@ -228,7 +228,7 @@ void G1ParCopyClosure::do_oop_work(T* p) { oop forwardee; markWord m = obj->mark(); if (m.is_forwarded()) { - forwardee = m.forwardee(); + forwardee = obj->forwardee(m); } else { forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m); } diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp index f81c3241a1a..ad924b2fad4 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp @@ -37,7 +37,6 @@ #include "gc/shared/continuationGCSupport.inline.hpp" #include "gc/shared/partialArrayState.hpp" #include "gc/shared/partialArrayTaskStepper.inline.hpp" -#include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/stringdedup/stringDedup.hpp" #include "gc/shared/taskqueue.inline.hpp" #include "memory/allocation.inline.hpp" @@ -59,7 +58,6 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, G1RedirtyCardsQueueSet* rdcqs, - PreservedMarks* preserved_marks, uint worker_id, uint num_workers, G1CollectionSet* collection_set, @@ -90,7 +88,6 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, _numa(g1h->numa()), _obj_alloc_stat(nullptr), ALLOCATION_FAILURE_INJECTOR_ONLY(_allocation_failure_inject_counter(0) COMMA) - _preserved_marks(preserved_marks), _evacuation_failed_info(), _evac_failure_regions(evac_failure_regions), _evac_failure_enqueued_cards(0) @@ -216,7 +213,7 @@ void G1ParScanThreadState::do_oop_evac(T* p) { markWord m = obj->mark(); if (m.is_forwarded()) { - obj = m.forwardee(); + obj = obj->forwardee(m); } else { obj = do_copy_to_survivor_space(region_attr, obj, m); } @@ -232,7 +229,6 @@ void G1ParScanThreadState::do_partial_array(PartialArrayState* state) { #ifdef ASSERT oop from_obj = state->source(); assert(_g1h->is_in_reserved(from_obj), "must be in heap."); - assert(from_obj->is_objArray(), "must be obj array"); assert(from_obj->is_forwarded(), "must be forwarded"); assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); assert(to_obj->is_objArray(), "must be obj array"); @@ -265,7 +261,6 @@ MAYBE_INLINE_EVACUATION void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr, oop from_obj, oop to_obj) { - assert(from_obj->is_objArray(), "precondition"); assert(from_obj->is_forwarded(), "precondition"); assert(from_obj->forwardee() == to_obj, "precondition"); assert(to_obj->is_objArray(), "precondition"); @@ -401,22 +396,22 @@ G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const r } void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr, - oop const old, size_t word_sz, uint age, + Klass* klass, size_t word_sz, uint age, HeapWord * const obj_ptr, uint node_index) const { PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index); if (alloc_buf->contains(obj_ptr)) { - _g1h->gc_tracer_stw()->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age, + _g1h->gc_tracer_stw()->report_promotion_in_new_plab_event(klass, word_sz * HeapWordSize, age, dest_attr.type() == G1HeapRegionAttr::Old, alloc_buf->word_sz() * HeapWordSize); } else { - _g1h->gc_tracer_stw()->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age, + _g1h->gc_tracer_stw()->report_promotion_outside_plab_event(klass, word_sz * HeapWordSize, age, dest_attr.type() == G1HeapRegionAttr::Old); } } NOINLINE HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr, - oop old, + Klass* klass, size_t word_sz, uint age, uint node_index) { @@ -439,7 +434,7 @@ HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr, update_numa_stats(node_index); if (_g1h->gc_tracer_stw()->should_report_promotion_events()) { // The events are checked individually as part of the actual commit - report_promotion_event(*dest_attr, old, word_sz, age, obj_ptr, node_index); + report_promotion_event(*dest_attr, klass, word_sz, age, obj_ptr, node_index); } } return obj_ptr; @@ -474,9 +469,17 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio assert(region_attr.is_in_cset(), "Unexpected region attr type: %s", region_attr.get_type_str()); - // Get the klass once. We'll need it again later, and this avoids - // re-decoding when it's compressed. - Klass* klass = old->klass(); + // NOTE: With compact headers, it is not safe to load the Klass* from old, because + // that would access the mark-word, that might change at any time by concurrent + // workers. + // This mark word would refer to a forwardee, which may not yet have completed + // copying. Therefore we must load the Klass* from the mark-word that we already + // loaded. This is safe, because we only enter here if not yet forwarded. + assert(!old_mark.is_forwarded(), "precondition"); + Klass* klass = UseCompactObjectHeaders + ? old_mark.klass() + : old->klass(); + const size_t word_sz = old->size_given_klass(klass); // JNI only allows pinning of typeArrays, so we only need to keep those in place. @@ -494,7 +497,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio // PLAB allocations should succeed most of the time, so we'll // normally check against null once and that's it. if (obj_ptr == nullptr) { - obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index); + obj_ptr = allocate_copy_slow(&dest_attr, klass, word_sz, age, node_index); if (obj_ptr == nullptr) { // This will either forward-to-self, or detect that someone else has // installed a forwarding pointer. @@ -595,7 +598,6 @@ G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) if (_states[worker_id] == nullptr) { _states[worker_id] = new G1ParScanThreadState(_g1h, rdcqs(), - _preserved_marks_set.get(worker_id), worker_id, _num_workers, _collection_set, @@ -655,7 +657,7 @@ NOINLINE oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, size_t word_sz, bool cause_pinned) { assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old)); - oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed); + oop forward_ptr = old->forward_to_self_atomic(m, memory_order_relaxed); if (forward_ptr == nullptr) { // Forward-to-self succeeded. We are the "owner" of the object. G1HeapRegion* r = _g1h->heap_region_containing(old); @@ -668,8 +670,6 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, siz // evacuation failure recovery. _g1h->mark_evac_failure_object(_worker_id, old, word_sz); - _preserved_marks->push_if_necessary(old, m); - ContinuationGCSupport::transform_stack_chunk(old); _evacuation_failed_info.register_copy_failure(word_sz); @@ -727,7 +727,6 @@ G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, _g1h(g1h), _collection_set(collection_set), _rdcqs(G1BarrierSet::dirty_card_queue_set().allocator()), - _preserved_marks_set(true /* in_c_heap */), _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, num_workers, mtGC)), _rdc_buffers(NEW_C_HEAP_ARRAY(BufferNodeList, num_workers, mtGC)), _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, collection_set->young_region_length() + 1, mtGC)), @@ -736,7 +735,6 @@ G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, _evac_failure_regions(evac_failure_regions), _partial_array_state_allocator(num_workers) { - _preserved_marks_set.init(num_workers); for (uint i = 0; i < num_workers; ++i) { _states[i] = nullptr; _rdc_buffers[i] = BufferNodeList(); @@ -749,5 +747,4 @@ G1ParScanThreadStateSet::~G1ParScanThreadStateSet() { FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states); FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total); FREE_C_HEAP_ARRAY(BufferNodeList, _rdc_buffers); - _preserved_marks_set.reclaim(); } diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp index 3f7fefd8a07..f61f993f028 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp @@ -34,7 +34,6 @@ #include "gc/shared/gc_globals.hpp" #include "gc/shared/partialArrayState.hpp" #include "gc/shared/partialArrayTaskStepper.hpp" -#include "gc/shared/preservedMarks.hpp" #include "gc/shared/stringdedup/stringDedup.hpp" #include "gc/shared/taskqueue.hpp" #include "memory/allocation.hpp" @@ -48,8 +47,6 @@ class G1EvacuationRootClosures; class G1OopStarChunkedList; class G1PLABAllocator; class G1HeapRegion; -class PreservedMarks; -class PreservedMarksSet; class outputStream; class G1ParScanThreadState : public CHeapObj { @@ -106,7 +103,6 @@ class G1ParScanThreadState : public CHeapObj { // Per-thread evacuation failure data structures. ALLOCATION_FAILURE_INJECTOR_ONLY(size_t _allocation_failure_inject_counter;) - PreservedMarks* _preserved_marks; EvacuationFailedInfo _evacuation_failed_info; G1EvacFailureRegions* _evac_failure_regions; // Number of additional cards into evacuation failed regions enqueued into @@ -125,7 +121,6 @@ class G1ParScanThreadState : public CHeapObj { public: G1ParScanThreadState(G1CollectedHeap* g1h, G1RedirtyCardsQueueSet* rdcqs, - PreservedMarks* preserved_marks, uint worker_id, uint num_workers, G1CollectionSet* collection_set, @@ -174,7 +169,7 @@ private: void start_partial_objarray(G1HeapRegionAttr dest_dir, oop from, oop to); HeapWord* allocate_copy_slow(G1HeapRegionAttr* dest_attr, - oop old, + Klass* klass, size_t word_sz, uint age, uint node_index); @@ -209,7 +204,7 @@ private: inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age); void report_promotion_event(G1HeapRegionAttr const dest_attr, - oop const old, size_t word_sz, uint age, + Klass* klass, size_t word_sz, uint age, HeapWord * const obj_ptr, uint node_index) const; void trim_queue_to_threshold(uint threshold); @@ -246,7 +241,6 @@ class G1ParScanThreadStateSet : public StackObj { G1CollectedHeap* _g1h; G1CollectionSet* _collection_set; G1RedirtyCardsQueueSet _rdcqs; - PreservedMarksSet _preserved_marks_set; G1ParScanThreadState** _states; BufferNodeList* _rdc_buffers; size_t* _surviving_young_words_total; @@ -264,7 +258,6 @@ class G1ParScanThreadStateSet : public StackObj { G1RedirtyCardsQueueSet* rdcqs() { return &_rdcqs; } BufferNodeList* rdc_buffers() { return _rdc_buffers; } - PreservedMarksSet* preserved_marks_set() { return &_preserved_marks_set; } void flush_stats(); void record_unused_optional_region(G1HeapRegion* hr); diff --git a/src/hotspot/share/gc/g1/g1YoungCollector.cpp b/src/hotspot/share/gc/g1/g1YoungCollector.cpp index f2fe93015c5..f3590aa2ff6 100644 --- a/src/hotspot/share/gc/g1/g1YoungCollector.cpp +++ b/src/hotspot/share/gc/g1/g1YoungCollector.cpp @@ -53,7 +53,6 @@ #include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gc_globals.hpp" -#include "gc/shared/preservedMarks.hpp" #include "gc/shared/referenceProcessor.hpp" #include "gc/shared/weakProcessor.inline.hpp" #include "gc/shared/workerPolicy.hpp" diff --git a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp index a0e9a9b1569..1d76a44f8f8 100644 --- a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp +++ b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp @@ -42,7 +42,6 @@ #include "gc/g1/g1RemSet.hpp" #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp" #include "gc/shared/bufferNode.hpp" -#include "gc/shared/preservedMarks.inline.hpp" #include "jfr/jfrEvents.hpp" #include "oops/access.inline.hpp" #include "oops/compressedOops.inline.hpp" @@ -251,8 +250,8 @@ class G1PostEvacuateCollectionSetCleanupTask1::RestoreEvacFailureRegionsTask : p { // Process marked object. - assert(obj->is_forwarded() && obj->forwardee() == obj, "must be self-forwarded"); - obj->init_mark(); + assert(obj->is_self_forwarded(), "must be self-forwarded"); + obj->unset_self_forwarded(); hr->update_bot_for_block(obj_addr, obj_end_addr); // Statistics @@ -477,27 +476,6 @@ public: } }; -class G1PostEvacuateCollectionSetCleanupTask2::RestorePreservedMarksTask : public G1AbstractSubTask { - PreservedMarksSet* _preserved_marks; - WorkerTask* _task; - -public: - RestorePreservedMarksTask(PreservedMarksSet* preserved_marks) : - G1AbstractSubTask(G1GCPhaseTimes::RestorePreservedMarks), - _preserved_marks(preserved_marks), - _task(preserved_marks->create_task()) { } - - virtual ~RestorePreservedMarksTask() { - delete _task; - } - - double worker_cost() const override { - return _preserved_marks->num(); - } - - void do_work(uint worker_id) override { _task->work(worker_id); } -}; - class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure { size_t _num_dirtied; G1CollectedHeap* _g1h; @@ -979,7 +957,6 @@ G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2 } if (evac_failure_regions->has_regions_evac_failed()) { - add_parallel_task(new RestorePreservedMarksTask(per_thread_states->preserved_marks_set())); add_parallel_task(new ProcessEvacuationFailedRegionsTask(evac_failure_regions)); } add_parallel_task(new RedirtyLoggedCardsTask(evac_failure_regions, diff --git a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp index 868ab788b53..96eeaf27de1 100644 --- a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp +++ b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,7 +56,6 @@ public: // - Update Derived Pointers (s) // - Clear Retained Region Data (on evacuation failure) // - Redirty Logged Cards -// - Restore Preserved Marks (on evacuation failure) // - Free Collection Set // - Resize TLABs class G1PostEvacuateCollectionSetCleanupTask2 : public G1BatchedTask { @@ -67,7 +66,6 @@ class G1PostEvacuateCollectionSetCleanupTask2 : public G1BatchedTask { class ProcessEvacuationFailedRegionsTask; class RedirtyLoggedCardsTask; - class RestorePreservedMarksTask; class FreeCollectionSetTask; class ResizeTLABsTask; diff --git a/src/hotspot/share/gc/parallel/mutableSpace.cpp b/src/hotspot/share/gc/parallel/mutableSpace.cpp index 01d4e6bb04d..74801f4870b 100644 --- a/src/hotspot/share/gc/parallel/mutableSpace.cpp +++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp @@ -218,15 +218,16 @@ void MutableSpace::object_iterate(ObjectClosure* cl) { // When promotion-failure occurs during Young GC, eden/from space is not cleared, // so we can encounter objects with "forwarded" markword. // They are essentially dead, so skipping them - if (!obj->is_forwarded()) { + if (obj->is_forwarded()) { + assert(!obj->is_self_forwarded(), "must not be self-forwarded"); + // It is safe to use the forwardee here. Parallel GC only uses + // header-based forwarding during promotion. Full GC doesn't + // use the object header for forwarding at all. + p += obj->forwardee()->size(); + } else { cl->do_object(obj); + p += obj->size(); } -#ifdef ASSERT - else { - assert(obj->forwardee() != obj, "must not be self-forwarded"); - } -#endif - p += obj->size(); } } diff --git a/src/hotspot/share/gc/parallel/parallelArguments.cpp b/src/hotspot/share/gc/parallel/parallelArguments.cpp index 313716752d5..4035259e6d6 100644 --- a/src/hotspot/share/gc/parallel/parallelArguments.cpp +++ b/src/hotspot/share/gc/parallel/parallelArguments.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -27,6 +27,7 @@ #include "gc/parallel/parallelArguments.hpp" #include "gc/parallel/parallelScavengeHeap.hpp" #include "gc/shared/adaptiveSizePolicy.hpp" +#include "gc/shared/fullGCForwarding.hpp" #include "gc/shared/gcArguments.hpp" #include "gc/shared/genArguments.hpp" #include "gc/shared/workerPolicy.hpp" @@ -127,6 +128,7 @@ void ParallelArguments::initialize_heap_flags_and_sizes() { // Redo everything from the start initialize_heap_flags_and_sizes_one_pass(); } + FullGCForwarding::initialize_flags(heap_reserved_size_bytes()); } size_t ParallelArguments::heap_reserved_size_bytes() { diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 5883b1cd607..52d031b6dd2 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -33,6 +33,7 @@ #include "gc/parallel/psPromotionManager.hpp" #include "gc/parallel/psScavenge.hpp" #include "gc/parallel/psVMOperations.hpp" +#include "gc/shared/fullGCForwarding.inline.hpp" #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcWhen.hpp" @@ -129,6 +130,8 @@ jint ParallelScavengeHeap::initialize() { ParallelInitLogger::print(); + FullGCForwarding::initialize(heap_rs.region()); + return JNI_OK; } diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index f6ef195abfc..8321c3efb9c 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -44,6 +44,7 @@ #include "gc/parallel/psStringDedup.hpp" #include "gc/parallel/psYoungGen.hpp" #include "gc/shared/classUnloadingContext.hpp" +#include "gc/shared/fullGCForwarding.inline.hpp" #include "gc/shared/gcCause.hpp" #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcId.hpp" @@ -773,6 +774,8 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id) { // // The size of the filler (min-obj-size) is 2 heap words with the default // MinObjAlignment, since both markword and klass take 1 heap word. + // With +UseCompactObjectHeaders, the minimum filler size is only one word, + // because the Klass* gets encoded in the mark-word. // // The size of the gap (if any) right before dense-prefix-end is // MinObjAlignment. @@ -780,16 +783,11 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id) { // Need to fill in the gap only if it's smaller than min-obj-size, and the // filler obj will extend to next region. - // Note: If min-fill-size decreases to 1, this whole method becomes redundant. - assert(CollectedHeap::min_fill_size() >= 2, "inv"); -#ifndef _LP64 - // In 32-bit system, each heap word is 4 bytes, so MinObjAlignment == 2. - // The gap is always equal to min-fill-size, so nothing to do. - return; -#endif - if (MinObjAlignment > 1) { + if (MinObjAlignment >= checked_cast(CollectedHeap::min_fill_size())) { return; } + + assert(!UseCompactObjectHeaders, "Compact headers can allocate small objects"); assert(CollectedHeap::min_fill_size() == 2, "inv"); HeapWord* const dense_prefix_end = dense_prefix(id); assert(_summary_data.is_region_aligned(dense_prefix_end), "precondition"); @@ -1593,7 +1591,7 @@ void PSParallelCompact::forward_to_new_addr() { oop obj = cast_to_oop(cur_addr); if (new_addr != cur_addr) { cm->preserved_marks()->push_if_necessary(obj, obj->mark()); - obj->forward_to(cast_to_oop(new_addr)); + FullGCForwarding::forward_to(obj, cast_to_oop(new_addr)); } size_t obj_size = obj->size(); live_words += obj_size; @@ -1636,7 +1634,7 @@ void PSParallelCompact::verify_forward() { } oop obj = cast_to_oop(cur_addr); if (cur_addr != bump_ptr) { - assert(obj->forwardee() == cast_to_oop(bump_ptr), "inv"); + assert(FullGCForwarding::forwardee(obj) == cast_to_oop(bump_ptr), "inv"); } bump_ptr += obj->size(); cur_addr += obj->size(); @@ -2395,8 +2393,8 @@ void MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) { if (copy_destination() != source()) { DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());) assert(source() != destination(), "inv"); - assert(cast_to_oop(source())->is_forwarded(), "inv"); - assert(cast_to_oop(source())->forwardee() == cast_to_oop(destination()), "inv"); + assert(FullGCForwarding::is_forwarded(cast_to_oop(source())), "inv"); + assert(FullGCForwarding::forwardee(cast_to_oop(source())) == cast_to_oop(destination()), "inv"); Copy::aligned_conjoint_words(source(), copy_destination(), words); cast_to_oop(copy_destination())->init_mark(); } diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp b/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp index e5f1b9d30ae..aa62d474a55 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,7 @@ #include "gc/parallel/parMarkBitMap.inline.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/continuationGCSupport.inline.hpp" +#include "gc/shared/fullGCForwarding.inline.hpp" #include "oops/access.inline.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/klass.hpp" @@ -79,7 +80,7 @@ inline void PSParallelCompact::adjust_pointer(T* p) { if (!obj->is_forwarded()) { return; } - oop new_obj = obj->forwardee(); + oop new_obj = FullGCForwarding::forwardee(obj); assert(new_obj != nullptr, "non-null address for live objects"); assert(new_obj != obj, "inv"); assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj), diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.cpp b/src/hotspot/share/gc/parallel/psPromotionManager.cpp index 5f6629e6cc2..c740b1488d7 100644 --- a/src/hotspot/share/gc/parallel/psPromotionManager.cpp +++ b/src/hotspot/share/gc/parallel/psPromotionManager.cpp @@ -321,7 +321,6 @@ void PSPromotionManager::process_array_chunk(PartialArrayState* state) { } void PSPromotionManager::push_objArray(oop old_obj, oop new_obj) { - assert(old_obj->is_objArray(), "precondition"); assert(old_obj->is_forwarded(), "precondition"); assert(old_obj->forwardee() == new_obj, "precondition"); assert(new_obj->is_objArray(), "precondition"); @@ -357,7 +356,7 @@ oop PSPromotionManager::oop_promotion_failed(oop obj, markWord obj_mark) { // this started. If it is the same (i.e., no forwarding // pointer has been installed), then this thread owns // it. - if (obj->forward_to_atomic(obj, obj_mark) == nullptr) { + if (obj->forward_to_self_atomic(obj_mark) == nullptr) { // We won any races, we "own" this object. assert(obj == obj->forwardee(), "Sanity"); diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.hpp index 8d412234614..a69d975956d 100644 --- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp +++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp @@ -111,7 +111,7 @@ class PSPromotionManager { void push_depth(ScannerTask task); - inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size, + inline void promotion_trace_event(oop new_obj, Klass* klass, size_t obj_size, uint age, bool tenured, const PSPromotionLAB* lab); diff --git a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp index f38cbaecf43..ed517c06a40 100644 --- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp +++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp @@ -66,7 +66,7 @@ inline void PSPromotionManager::claim_or_forward_depth(T* p) { } } -inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj, +inline void PSPromotionManager::promotion_trace_event(oop new_obj, Klass* klass, size_t obj_size, uint age, bool tenured, const PSPromotionLAB* lab) { @@ -79,14 +79,14 @@ inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj, if (gc_tracer->should_report_promotion_in_new_plab_event()) { size_t obj_bytes = obj_size * HeapWordSize; size_t lab_size = lab->capacity(); - gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes, + gc_tracer->report_promotion_in_new_plab_event(klass, obj_bytes, age, tenured, lab_size); } } else { // Promotion of object directly to heap if (gc_tracer->should_report_promotion_outside_plab_event()) { size_t obj_bytes = obj_size * HeapWordSize; - gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes, + gc_tracer->report_promotion_outside_plab_event(klass, obj_bytes, age, tenured); } } @@ -149,7 +149,7 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) { return copy_unmarked_to_survivor_space(o, m); } else { // Return the already installed forwardee. - return m.forwardee(); + return o->forwardee(m); } } @@ -165,7 +165,19 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, oop new_obj = nullptr; bool new_obj_is_tenured = false; - size_t new_obj_size = o->size(); + + // NOTE: With compact headers, it is not safe to load the Klass* from old, because + // that would access the mark-word, that might change at any time by concurrent + // workers. + // This mark word would refer to a forwardee, which may not yet have completed + // copying. Therefore we must load the Klass* from the mark-word that we already + // loaded. This is safe, because we only enter here if not yet forwarded. + assert(!test_mark.is_forwarded(), "precondition"); + Klass* klass = UseCompactObjectHeaders + ? test_mark.klass() + : o->klass(); + + size_t new_obj_size = o->size_given_klass(klass); // Find the objects age, MT safe. uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ? @@ -180,7 +192,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, if (new_obj_size > (YoungPLABSize / 2)) { // Allocate this object directly new_obj = cast_to_oop(young_space()->cas_allocate(new_obj_size)); - promotion_trace_event(new_obj, o, new_obj_size, age, false, nullptr); + promotion_trace_event(new_obj, klass, new_obj_size, age, false, nullptr); } else { // Flush and fill _young_lab.flush(); @@ -190,7 +202,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); // Try the young lab allocation again. new_obj = cast_to_oop(_young_lab.allocate(new_obj_size)); - promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab); + promotion_trace_event(new_obj, klass, new_obj_size, age, false, &_young_lab); } else { _young_gen_is_full = true; } @@ -216,7 +228,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, if (new_obj_size > (OldPLABSize / 2)) { // Allocate this object directly new_obj = cast_to_oop(old_gen()->allocate(new_obj_size)); - promotion_trace_event(new_obj, o, new_obj_size, age, true, nullptr); + promotion_trace_event(new_obj, klass, new_obj_size, age, true, nullptr); } else { // Flush and fill _old_lab.flush(); @@ -226,7 +238,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o, _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); // Try the old lab allocation again. new_obj = cast_to_oop(_old_lab.allocate(new_obj_size)); - promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab); + promotion_trace_event(new_obj, klass, new_obj_size, age, true, &_old_lab); } } } diff --git a/src/hotspot/share/gc/serial/defNewGeneration.cpp b/src/hotspot/share/gc/serial/defNewGeneration.cpp index f3b3c8952b9..3792bb5a721 100644 --- a/src/hotspot/share/gc/serial/defNewGeneration.cpp +++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp @@ -39,7 +39,6 @@ #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTraceTime.inline.hpp" -#include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/referencePolicy.hpp" #include "gc/shared/referenceProcessorPhaseTimes.hpp" #include "gc/shared/space.hpp" @@ -227,7 +226,6 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs, const char* policy) : Generation(rs, initial_size), _promotion_failed(false), - _preserved_marks_set(false /* in_c_heap */), _promo_failure_drain_in_progress(false), _string_dedup_requests() { @@ -609,8 +607,6 @@ bool DefNewGeneration::collect(bool clear_all_soft_refs) { age_table()->clear(); to()->clear(SpaceDecorator::Mangle); - // The preserved marks should be empty at the start of the GC. - _preserved_marks_set.init(1); YoungGenScanClosure young_gen_cl(this); OldGenScanClosure old_gen_cl(this); @@ -681,8 +677,6 @@ bool DefNewGeneration::collect(bool clear_all_soft_refs) { // Reset the PromotionFailureALot counters. NOT_PRODUCT(heap->reset_promotion_should_fail();) } - // We should have processed and cleared all the preserved marks. - _preserved_marks_set.reclaim(); heap->trace_heap_after_gc(_gc_tracer); @@ -706,19 +700,17 @@ void DefNewGeneration::remove_forwarding_pointers() { // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.) struct ResetForwardedMarkWord : ObjectClosure { void do_object(oop obj) override { - if (obj->is_forwarded()) { - obj->init_mark(); + if (obj->is_self_forwarded()) { + obj->unset_self_forwarded(); + } else if (obj->is_forwarded()) { + // To restore the klass-bits in the header. + // Needed for object iteration to work properly. + obj->set_mark(obj->forwardee()->prototype_mark()); } } } cl; eden()->object_iterate(&cl); from()->object_iterate(&cl); - - restore_preserved_marks(); -} - -void DefNewGeneration::restore_preserved_marks() { - _preserved_marks_set.restore(nullptr); } void DefNewGeneration::handle_promotion_failure(oop old) { @@ -726,12 +718,11 @@ void DefNewGeneration::handle_promotion_failure(oop old) { _promotion_failed = true; _promotion_failed_info.register_copy_failure(old->size()); - _preserved_marks_set.get()->push_if_necessary(old, old->mark()); ContinuationGCSupport::transform_stack_chunk(old); // forward to self - old->forward_to(old); + old->forward_to_self(); _promo_failure_scan_stack.push(old); diff --git a/src/hotspot/share/gc/serial/defNewGeneration.hpp b/src/hotspot/share/gc/serial/defNewGeneration.hpp index e86ea6b9747..c5e1c2b152e 100644 --- a/src/hotspot/share/gc/serial/defNewGeneration.hpp +++ b/src/hotspot/share/gc/serial/defNewGeneration.hpp @@ -32,7 +32,6 @@ #include "gc/shared/copyFailedInfo.hpp" #include "gc/shared/gc_globals.hpp" #include "gc/shared/generationCounters.hpp" -#include "gc/shared/preservedMarks.hpp" #include "gc/shared/stringdedup/stringDedup.hpp" #include "gc/shared/tlab_globals.hpp" #include "utilities/align.hpp" @@ -99,11 +98,6 @@ class DefNewGeneration: public Generation { // therefore we must remove their forwarding pointers. void remove_forwarding_pointers(); - virtual void restore_preserved_marks(); - - // Preserved marks - PreservedMarksSet _preserved_marks_set; - Stack _promo_failure_scan_stack; void drain_promo_failure_scan_stack(void); bool _promo_failure_drain_in_progress; diff --git a/src/hotspot/share/gc/serial/serialArguments.cpp b/src/hotspot/share/gc/serial/serialArguments.cpp index ac6dd24fdbf..f8efa192807 100644 --- a/src/hotspot/share/gc/serial/serialArguments.cpp +++ b/src/hotspot/share/gc/serial/serialArguments.cpp @@ -23,10 +23,16 @@ */ #include "precompiled.hpp" +#include "gc/shared/fullGCForwarding.hpp" #include "gc/shared/genArguments.hpp" #include "gc/serial/serialArguments.hpp" #include "gc/serial/serialHeap.hpp" +void SerialArguments::initialize_heap_flags_and_sizes() { + GenArguments::initialize_heap_flags_and_sizes(); + FullGCForwarding::initialize_flags(MaxHeapSize); +} + CollectedHeap* SerialArguments::create_heap() { return new SerialHeap(); } diff --git a/src/hotspot/share/gc/serial/serialArguments.hpp b/src/hotspot/share/gc/serial/serialArguments.hpp index 3ed4df5f41b..d12bd7d8e59 100644 --- a/src/hotspot/share/gc/serial/serialArguments.hpp +++ b/src/hotspot/share/gc/serial/serialArguments.hpp @@ -32,6 +32,7 @@ class CollectedHeap; class SerialArguments : public GenArguments { private: virtual CollectedHeap* create_heap(); + virtual void initialize_heap_flags_and_sizes(); }; #endif // SHARE_GC_SERIAL_SERIALARGUMENTS_HPP diff --git a/src/hotspot/share/gc/serial/serialFullGC.cpp b/src/hotspot/share/gc/serial/serialFullGC.cpp index 897437e33c9..0df28fa7bd5 100644 --- a/src/hotspot/share/gc/serial/serialFullGC.cpp +++ b/src/hotspot/share/gc/serial/serialFullGC.cpp @@ -43,6 +43,7 @@ #include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/continuationGCSupport.inline.hpp" +#include "gc/shared/fullGCForwarding.inline.hpp" #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" @@ -230,7 +231,7 @@ class Compacter { static void forward_obj(oop obj, HeapWord* new_addr) { prefetch_write_scan(obj); if (cast_from_oop(obj) != new_addr) { - obj->forward_to(cast_to_oop(new_addr)); + FullGCForwarding::forward_to(obj, cast_to_oop(new_addr)); } else { assert(obj->is_gc_marked(), "inv"); // This obj will stay in-place. Fix the markword. @@ -255,7 +256,7 @@ class Compacter { prefetch_read_scan(addr); oop obj = cast_to_oop(addr); - oop new_obj = obj->forwardee(); + oop new_obj = FullGCForwarding::forwardee(obj); HeapWord* new_addr = cast_from_oop(new_obj); assert(addr != new_addr, "inv"); prefetch_write_copy(new_addr); @@ -352,13 +353,13 @@ public: HeapWord* top = space->top(); // Check if the first obj inside this space is forwarded. - if (!cast_to_oop(cur_addr)->is_forwarded()) { + if (!FullGCForwarding::is_forwarded(cast_to_oop(cur_addr))) { // Jump over consecutive (in-place) live-objs-chunk cur_addr = get_first_dead(i); } while (cur_addr < top) { - if (!cast_to_oop(cur_addr)->is_forwarded()) { + if (!FullGCForwarding::is_forwarded(cast_to_oop(cur_addr))) { cur_addr = *(HeapWord**) cur_addr; continue; } @@ -593,7 +594,7 @@ void SerialFullGC::mark_object(oop obj) { // some marks may contain information we need to preserve so we store them away // and overwrite the mark. We'll restore it at the end of serial full GC. markWord mark = obj->mark(); - obj->set_mark(markWord::prototype().set_marked()); + obj->set_mark(obj->prototype_mark().set_marked()); ContinuationGCSupport::transform_stack_chunk(obj); @@ -624,8 +625,8 @@ template void SerialFullGC::adjust_pointer(T* p) { oop obj = CompressedOops::decode_not_null(heap_oop); assert(Universe::heap()->is_in(obj), "should be in heap"); - if (obj->is_forwarded()) { - oop new_obj = obj->forwardee(); + if (FullGCForwarding::is_forwarded(obj)) { + oop new_obj = FullGCForwarding::forwardee(obj); assert(is_object_aligned(new_obj), "oop must be aligned"); RawAccess::oop_store(p, new_obj); } diff --git a/src/hotspot/share/gc/serial/serialHeap.cpp b/src/hotspot/share/gc/serial/serialHeap.cpp index 9dcfb5b6092..f5e6a46df29 100644 --- a/src/hotspot/share/gc/serial/serialHeap.cpp +++ b/src/hotspot/share/gc/serial/serialHeap.cpp @@ -40,6 +40,7 @@ #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/collectorCounters.hpp" #include "gc/shared/continuationGCSupport.inline.hpp" +#include "gc/shared/fullGCForwarding.hpp" #include "gc/shared/gcId.hpp" #include "gc/shared/gcInitLogger.hpp" #include "gc/shared/gcLocker.inline.hpp" @@ -200,6 +201,8 @@ jint SerialHeap::initialize() { GCInitLogger::print(); + FullGCForwarding::initialize(_reserved); + return JNI_OK; } diff --git a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp index 643a7936b9b..b3a3c01240d 100644 --- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp +++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -710,11 +710,12 @@ int BarrierSetC2::arraycopy_payload_base_offset(bool is_array) { int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() : instanceOopDesc::base_offset_in_bytes(); // base_off: - // 8 - 32-bit VM + // 8 - 32-bit VM or 64-bit VM, compact headers // 12 - 64-bit VM, compressed klass // 16 - 64-bit VM, normal klass if (base_off % BytesPerLong != 0) { assert(UseCompressedClassPointers, ""); + assert(!UseCompactObjectHeaders, ""); if (is_array) { // Exclude length to copy by 8 bytes words. base_off += sizeof(int); diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index 82eaaf9a396..3f0447b6558 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -220,6 +220,26 @@ bool CollectedHeap::supports_concurrent_gc_breakpoints() const { return false; } +static bool klass_is_sane(oop object) { + if (UseCompactObjectHeaders) { + // With compact headers, we can't safely access the Klass* when + // the object has been forwarded, because non-full-GC-forwarding + // temporarily overwrites the mark-word, and thus the Klass*, with + // the forwarding pointer, and here we have no way to make a + // distinction between Full-GC and regular GC forwarding. + markWord mark = object->mark(); + if (mark.is_forwarded()) { + // We can't access the Klass*. We optimistically assume that + // it is ok. This happens very rarely. + return true; + } + + return Metaspace::contains(mark.klass_without_asserts()); + } + + return Metaspace::contains(object->klass_without_asserts()); +} + bool CollectedHeap::is_oop(oop object) const { if (!is_object_aligned(object)) { return false; @@ -229,7 +249,7 @@ bool CollectedHeap::is_oop(oop object) const { return false; } - if (!Metaspace::contains(object->klass_without_asserts())) { + if (!klass_is_sane(object)) { return false; } diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp index b413e3dfb43..036bc0230c8 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -306,7 +306,7 @@ protected: } virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap); - static constexpr size_t min_dummy_object_size() { + static size_t min_dummy_object_size() { return oopDesc::header_size(); } diff --git a/src/hotspot/share/gc/shared/fullGCForwarding.cpp b/src/hotspot/share/gc/shared/fullGCForwarding.cpp new file mode 100644 index 00000000000..4880b08887e --- /dev/null +++ b/src/hotspot/share/gc/shared/fullGCForwarding.cpp @@ -0,0 +1,57 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shared/fullGCForwarding.hpp" +#include "memory/memRegion.hpp" +#include "runtime/globals_extension.hpp" + +HeapWord* FullGCForwarding::_heap_base = nullptr; +int FullGCForwarding::_num_low_bits = 0; + +void FullGCForwarding::initialize_flags(size_t max_heap_size) { +#ifdef _LP64 + size_t max_narrow_heap_size = right_n_bits(NumLowBitsNarrow - Shift); + if (UseCompactObjectHeaders && max_heap_size > max_narrow_heap_size * HeapWordSize) { + warning("Compact object headers require a java heap size smaller than " SIZE_FORMAT + "%s (given: " SIZE_FORMAT "%s). Disabling compact object headers.", + byte_size_in_proper_unit(max_narrow_heap_size * HeapWordSize), + proper_unit_for_byte_size(max_narrow_heap_size * HeapWordSize), + byte_size_in_proper_unit(max_heap_size), + proper_unit_for_byte_size(max_heap_size)); + FLAG_SET_ERGO(UseCompactObjectHeaders, false); + } +#endif +} + +void FullGCForwarding::initialize(MemRegion heap) { +#ifdef _LP64 + _heap_base = heap.start(); + if (UseCompactObjectHeaders) { + _num_low_bits = NumLowBitsNarrow; + } else { + _num_low_bits = NumLowBitsWide; + } +#endif +} diff --git a/src/hotspot/share/gc/shared/fullGCForwarding.hpp b/src/hotspot/share/gc/shared/fullGCForwarding.hpp new file mode 100644 index 00000000000..a6ca182428b --- /dev/null +++ b/src/hotspot/share/gc/shared/fullGCForwarding.hpp @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHARED_FULLGCFORWARDING_HPP +#define SHARE_GC_SHARED_FULLGCFORWARDING_HPP + +#include "memory/allStatic.hpp" +#include "memory/memRegion.hpp" +#include "oops/markWord.hpp" +#include "oops/oopsHierarchy.hpp" + +/* + * Implements forwarding for the Full GCs of Serial, Parallel, G1 and Shenandoah in + * a way that preserves upper N bits of object mark-words, which contain crucial + * Klass* information when running with compact headers. The encoding is similar to + * compressed-oops encoding: it basically subtracts the forwardee address from the + * heap-base, shifts that difference into the right place, and sets the lowest two + * bits (to indicate 'forwarded' state as usual). + * With compact-headers, we have 40 bits to encode forwarding pointers. This is + * enough to address 8TB of heap. If the heap size exceeds that limit, we turn off + * compact headers. + */ +class FullGCForwarding : public AllStatic { + static const int NumLowBitsNarrow = LP64_ONLY(markWord::klass_shift) NOT_LP64(0 /*unused*/); + static const int NumLowBitsWide = BitsPerWord; + static const int Shift = markWord::lock_bits + markWord::lock_shift; + + static HeapWord* _heap_base; + static int _num_low_bits; +public: + static void initialize_flags(size_t max_heap_size); + static void initialize(MemRegion heap); + static inline void forward_to(oop from, oop to); + static inline oop forwardee(oop from); + static inline bool is_forwarded(oop obj); +}; + +#endif // SHARE_GC_SHARED_FULLGCFORWARDING_HPP diff --git a/src/hotspot/share/gc/shared/fullGCForwarding.inline.hpp b/src/hotspot/share/gc/shared/fullGCForwarding.inline.hpp new file mode 100644 index 00000000000..ebd280a454f --- /dev/null +++ b/src/hotspot/share/gc/shared/fullGCForwarding.inline.hpp @@ -0,0 +1,60 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef GC_SHARED_FULLGCFORWARDING_INLINE_HPP +#define GC_SHARED_FULLGCFORWARDING_INLINE_HPP + +#include "gc/shared/fullGCForwarding.hpp" + +#include "oops/oop.inline.hpp" +#include "utilities/globalDefinitions.hpp" + +void FullGCForwarding::forward_to(oop from, oop to) { +#ifdef _LP64 + uintptr_t encoded = pointer_delta(cast_from_oop(to), _heap_base) << Shift; + assert(encoded <= static_cast(right_n_bits(_num_low_bits)), "encoded forwardee must fit"); + uintptr_t mark = from->mark().value(); + mark &= ~right_n_bits(_num_low_bits); + mark |= (encoded | markWord::marked_value); + from->set_mark(markWord(mark)); +#else + from->forward_to(to); +#endif +} + +oop FullGCForwarding::forwardee(oop from) { +#ifdef _LP64 + uintptr_t mark = from->mark().value(); + HeapWord* decoded = _heap_base + ((mark & right_n_bits(_num_low_bits)) >> Shift); + return cast_to_oop(decoded); +#else + return from->forwardee(); +#endif +} + +bool FullGCForwarding::is_forwarded(oop obj) { + return obj->mark().is_forwarded(); +} + +#endif // GC_SHARED_FULLGCFORWARDING_INLINE_HPP diff --git a/src/hotspot/share/gc/shared/memAllocator.cpp b/src/hotspot/share/gc/shared/memAllocator.cpp index 318ab00188b..f96ec50e3b0 100644 --- a/src/hotspot/share/gc/shared/memAllocator.cpp +++ b/src/hotspot/share/gc/shared/memAllocator.cpp @@ -361,18 +361,23 @@ void MemAllocator::mem_clear(HeapWord* mem) const { assert(mem != nullptr, "cannot initialize null object"); const size_t hs = oopDesc::header_size(); assert(_word_size >= hs, "unexpected object size"); - oopDesc::set_klass_gap(mem, 0); + if (oopDesc::has_klass_gap()) { + oopDesc::set_klass_gap(mem, 0); + } Copy::fill_to_aligned_words(mem + hs, _word_size - hs); } oop MemAllocator::finish(HeapWord* mem) const { assert(mem != nullptr, "null object pointer"); - // May be bootstrapping - oopDesc::set_mark(mem, markWord::prototype()); // Need a release store to ensure array/class length, mark word, and // object zeroing are visible before setting the klass non-null, for // concurrent collectors. - oopDesc::release_set_klass(mem, _klass); + if (UseCompactObjectHeaders) { + oopDesc::release_set_mark(mem, _klass->prototype_header()); + } else { + oopDesc::set_mark(mem, markWord::prototype()); + oopDesc::release_set_klass(mem, _klass); + } return cast_to_oop(mem); } diff --git a/src/hotspot/share/gc/shared/preservedMarks.cpp b/src/hotspot/share/gc/shared/preservedMarks.cpp index 9889dbc3690..4daba541d29 100644 --- a/src/hotspot/share/gc/shared/preservedMarks.cpp +++ b/src/hotspot/share/gc/shared/preservedMarks.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc/shared/fullGCForwarding.inline.hpp" #include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/workerThread.hpp" #include "gc/shared/workerUtils.hpp" @@ -42,8 +43,8 @@ void PreservedMarks::restore() { void PreservedMarks::adjust_preserved_mark(PreservedMark* elem) { oop obj = elem->get_oop(); - if (obj->is_forwarded()) { - elem->set_oop(obj->forwardee()); + if (FullGCForwarding::is_forwarded(obj)) { + elem->set_oop(FullGCForwarding::forwardee(obj)); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp index bcc370eeb31..ca95cde83b1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc/shared/fullGCForwarding.hpp" #include "gc/shared/gcArguments.hpp" #include "gc/shared/tlab_globals.hpp" #include "gc/shared/workerPolicy.hpp" @@ -198,6 +199,11 @@ void ShenandoahArguments::initialize_alignments() { HeapAlignment = align; } +void ShenandoahArguments::initialize_heap_flags_and_sizes() { + GCArguments::initialize_heap_flags_and_sizes(); + FullGCForwarding::initialize_flags(MaxHeapSize); +} + CollectedHeap* ShenandoahArguments::create_heap() { return new ShenandoahHeap(new ShenandoahCollectorPolicy()); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahArguments.hpp b/src/hotspot/share/gc/shenandoah/shenandoahArguments.hpp index bc73d9a2d12..ad54b1d235c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.hpp @@ -35,6 +35,7 @@ private: virtual void initialize(); virtual size_t conservative_max_heap_alignment(); + virtual void initialize_heap_flags_and_sizes(); virtual CollectedHeap* create_heap(); }; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp index 0bbced2e8d3..22822ad3fec 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp @@ -197,7 +197,7 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* file, line); } - Klass* obj_klass = obj->klass_or_null(); + Klass* obj_klass = ShenandoahForwarding::klass(obj); if (obj_klass == nullptr) { print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_correct failed", "Object klass pointer should not be null", @@ -235,7 +235,7 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* file, line); } - if (obj_klass != fwd->klass()) { + if (obj_klass != ShenandoahForwarding::klass(fwd)) { print_failure(_safe_oop, obj, interior_loc, nullptr, "Shenandoah assert_correct failed", "Forwardee klass disagrees with object class", file, line); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.hpp b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.hpp index 95152017a69..fb57c55e09a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.hpp @@ -62,6 +62,8 @@ public: */ static inline oop try_update_forwardee(oop obj, oop update); + static inline size_t size(oop obj); + static inline Klass* klass(oop obj); }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp index 01294f9c890..ccdbb81f33b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp @@ -90,4 +90,21 @@ inline oop ShenandoahForwarding::try_update_forwardee(oop obj, oop update) { } } +inline Klass* ShenandoahForwarding::klass(oop obj) { + if (UseCompactObjectHeaders) { + markWord mark = obj->mark(); + if (mark.is_marked()) { + oop fwd = cast_to_oop(mark.clear_lock_bits().to_pointer()); + mark = fwd->mark(); + } + return mark.klass(); + } else { + return obj->klass(); + } +} + +inline size_t ShenandoahForwarding::size(oop obj) { + return obj->size_given_klass(klass(obj)); +} + #endif // SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_INLINE_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index c2d94353d54..53cb8e5d20f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -26,6 +26,7 @@ #include "compiler/oopMap.hpp" #include "gc/shared/continuationGCSupport.hpp" +#include "gc/shared/fullGCForwarding.inline.hpp" #include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/tlab_globals.hpp" @@ -369,7 +370,7 @@ public: shenandoah_assert_not_forwarded(nullptr, p); if (_compact_point != cast_from_oop(p)) { _preserved_marks->push_if_necessary(p, p->mark()); - p->forward_to(cast_to_oop(_compact_point)); + FullGCForwarding::forward_to(p, cast_to_oop(_compact_point)); } _compact_point += obj_size; } @@ -492,7 +493,7 @@ void ShenandoahFullGC::calculate_target_humongous_objects() { if (start >= to_begin && start != r->index()) { // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark()); - old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom())); + FullGCForwarding::forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom())); to_end = start; continue; } @@ -752,8 +753,8 @@ private: if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); assert(_ctx->is_marked(obj), "must be marked"); - if (obj->is_forwarded()) { - oop forw = obj->forwardee(); + if (FullGCForwarding::is_forwarded(obj)) { + oop forw = FullGCForwarding::forwardee(obj); RawAccess::oop_store(p, forw); } } @@ -863,9 +864,9 @@ public: void do_object(oop p) { assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); size_t size = p->size(); - if (p->is_forwarded()) { + if (FullGCForwarding::is_forwarded(p)) { HeapWord* compact_from = cast_from_oop(p); - HeapWord* compact_to = cast_from_oop(p->forwardee()); + HeapWord* compact_to = cast_from_oop(FullGCForwarding::forwardee(p)); assert(compact_from != compact_to, "Forwarded object should move"); Copy::aligned_conjoint_words(compact_from, compact_to, size); oop new_obj = cast_to_oop(compact_to); @@ -970,7 +971,7 @@ void ShenandoahFullGC::compact_humongous_objects() { ShenandoahHeapRegion* r = heap->get_region(c - 1); if (r->is_humongous_start()) { oop old_obj = cast_to_oop(r->bottom()); - if (!old_obj->is_forwarded()) { + if (!FullGCForwarding::is_forwarded(old_obj)) { // No need to move the object, it stays at the same slot continue; } @@ -979,7 +980,7 @@ void ShenandoahFullGC::compact_humongous_objects() { size_t old_start = r->index(); size_t old_end = old_start + num_regions - 1; - size_t new_start = heap->heap_region_index_containing(old_obj->forwardee()); + size_t new_start = heap->heap_region_index_containing(FullGCForwarding::forwardee(old_obj)); size_t new_end = new_start + num_regions - 1; assert(old_start != new_start, "must be real move"); assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index()); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 07914947ead..0f7139691a3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -28,6 +28,7 @@ #include "memory/universe.hpp" #include "gc/shared/classUnloadingContext.hpp" +#include "gc/shared/fullGCForwarding.hpp" #include "gc/shared/gcArguments.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTraceTime.inline.hpp" @@ -421,6 +422,8 @@ jint ShenandoahHeap::initialize() { ShenandoahInitLogger::print(); + FullGCForwarding::initialize(_heap_region); + return JNI_OK; } @@ -1129,7 +1132,7 @@ oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope"); - size_t size = p->size(); + size_t size = ShenandoahForwarding::size(p); assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects"); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp index cd2cc2eb209..6131d079590 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp @@ -434,7 +434,7 @@ inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, oop obj = cast_to_oop(cs); assert(oopDesc::is_oop(obj), "sanity"); assert(ctx->is_marked(obj), "object expected to be marked"); - size_t size = obj->size(); + size_t size = ShenandoahForwarding::size(obj); cl->do_object(obj); cs += size; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 51878772889..b653a06b877 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -102,7 +102,7 @@ private: T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); - if (is_instance_ref_klass(obj->klass())) { + if (is_instance_ref_klass(ShenandoahForwarding::klass(obj))) { obj = ShenandoahForwarding::get_forwardee(obj); } // Single threaded verification can use faster non-atomic stack and bitmap @@ -129,7 +129,7 @@ private: "oop must be aligned"); ShenandoahHeapRegion *obj_reg = _heap->heap_region_containing(obj); - Klass* obj_klass = obj->klass_or_null(); + Klass* obj_klass = ShenandoahForwarding::klass(obj); // Verify that obj is not in dead space: { @@ -144,11 +144,11 @@ private: "Object start should be within the region"); if (!obj_reg->is_humongous()) { - check(ShenandoahAsserts::_safe_unknown, obj, (obj_addr + obj->size()) <= obj_reg->top(), + check(ShenandoahAsserts::_safe_unknown, obj, (obj_addr + ShenandoahForwarding::size(obj)) <= obj_reg->top(), "Object end should be within the region"); } else { size_t humongous_start = obj_reg->index(); - size_t humongous_end = humongous_start + (obj->size() >> ShenandoahHeapRegion::region_size_words_shift()); + size_t humongous_end = humongous_start + (ShenandoahForwarding::size(obj) >> ShenandoahHeapRegion::region_size_words_shift()); for (size_t idx = humongous_start + 1; idx < humongous_end; idx++) { check(ShenandoahAsserts::_safe_unknown, obj, _heap->get_region(idx)->is_humongous_continuation(), "Humongous object is in continuation that fits it"); @@ -165,7 +165,7 @@ private: // skip break; case ShenandoahVerifier::_verify_liveness_complete: - Atomic::add(&_ld[obj_reg->index()], (uint) obj->size(), memory_order_relaxed); + Atomic::add(&_ld[obj_reg->index()], (uint) ShenandoahForwarding::size(obj), memory_order_relaxed); // fallthrough for fast failure for un-live regions: case ShenandoahVerifier::_verify_liveness_conservative: check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live(), @@ -209,7 +209,7 @@ private: HeapWord *fwd_addr = cast_from_oop(fwd); check(ShenandoahAsserts::_safe_oop, obj, fwd_addr < fwd_reg->top(), "Forwardee start should be within the region"); - check(ShenandoahAsserts::_safe_oop, obj, (fwd_addr + fwd->size()) <= fwd_reg->top(), + check(ShenandoahAsserts::_safe_oop, obj, (fwd_addr + ShenandoahForwarding::size(fwd)) <= fwd_reg->top(), "Forwardee end should be within the region"); oop fwd2 = ShenandoahForwarding::get_forwardee_raw_unchecked(fwd); @@ -327,7 +327,11 @@ public: */ void verify_oops_from(oop obj) { _loc = obj; - obj->oop_iterate(this); + // oop_iterate() can not deal with forwarded objects, because + // it needs to load klass(), which may be overridden by the + // forwarding pointer. + oop fwd = ShenandoahForwarding::get_forwardee_raw(obj); + fwd->oop_iterate(this); _loc = nullptr; } @@ -591,7 +595,7 @@ public: while (addr < limit) { verify_and_follow(addr, stack, cl, &processed); - addr += cast_to_oop(addr)->size(); + addr += ShenandoahForwarding::size(cast_to_oop(addr)); } } @@ -607,7 +611,7 @@ public: // Verify everything reachable from that object too, hopefully realizing // everything was already marked, and never touching further: - if (!is_instance_ref_klass(obj->klass())) { + if (!is_instance_ref_klass(ShenandoahForwarding::klass(obj))) { cl.verify_oops_from(obj); (*processed)++; } diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp index 12ee400eb2d..3f9c09f1c8c 100644 --- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp +++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp @@ -445,7 +445,7 @@ void ZBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* a assert(src_offset == dest_offset, "should be equal"); const jlong offset = src_offset->get_long(); if (offset != arrayOopDesc::base_offset_in_bytes(T_OBJECT)) { - assert(!UseCompressedClassPointers, "should only happen without compressed class pointers"); + assert(!UseCompressedClassPointers || UseCompactObjectHeaders, "should only happen without compressed class pointers"); assert((arrayOopDesc::base_offset_in_bytes(T_OBJECT) - offset) == BytesPerLong, "unexpected offset"); length = phase->transform_later(new SubLNode(length, phase->longcon(1))); // Size is in longs src_offset = phase->longcon(arrayOopDesc::base_offset_in_bytes(T_OBJECT)); diff --git a/src/hotspot/share/gc/z/zObjArrayAllocator.cpp b/src/hotspot/share/gc/z/zObjArrayAllocator.cpp index ada8351a9f6..c63b989dc4e 100644 --- a/src/hotspot/share/gc/z/zObjArrayAllocator.cpp +++ b/src/hotspot/share/gc/z/zObjArrayAllocator.cpp @@ -63,8 +63,12 @@ oop ZObjArrayAllocator::initialize(HeapWord* mem) const { // Signal to the ZIterator that this is an invisible root, by setting // the mark word to "marked". Reset to prototype() after the clearing. - arrayOopDesc::set_mark(mem, markWord::prototype().set_marked()); - arrayOopDesc::release_set_klass(mem, _klass); + if (UseCompactObjectHeaders) { + oopDesc::release_set_mark(mem, _klass->prototype_header().set_marked()); + } else { + arrayOopDesc::set_mark(mem, markWord::prototype().set_marked()); + arrayOopDesc::release_set_klass(mem, _klass); + } assert(_length >= 0, "length should be non-negative"); arrayOopDesc::set_length(mem, _length); @@ -152,7 +156,11 @@ oop ZObjArrayAllocator::initialize(HeapWord* mem) const { ZThreadLocalData::clear_invisible_root(_thread); // Signal to the ZIterator that this is no longer an invisible root - oopDesc::release_set_mark(mem, markWord::prototype()); + if (UseCompactObjectHeaders) { + oopDesc::release_set_mark(mem, _klass->prototype_header()); + } else { + oopDesc::release_set_mark(mem, markWord::prototype()); + } return cast_to_oop(mem); } diff --git a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp index fbdf8f9ca71..ee50b2eb3d8 100644 --- a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp +++ b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp @@ -2021,10 +2021,13 @@ run: } // Initialize header, mirrors MemAllocator. - oopDesc::set_mark(result, markWord::prototype()); - oopDesc::set_klass_gap(result, 0); - oopDesc::release_set_klass(result, ik); - + if (UseCompactObjectHeaders) { + oopDesc::release_set_mark(result, ik->prototype_header()); + } else { + oopDesc::set_mark(result, markWord::prototype()); + oopDesc::set_klass_gap(result, 0); + oopDesc::release_set_klass(result, ik); + } oop obj = cast_to_oop(result); // Must prevent reordering of stores for object initialization diff --git a/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp b/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp index 13b55c34e23..2eca6b5eb71 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/objectSampleMarker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,7 +70,7 @@ class ObjectSampleMarker : public StackObj { // now we will set the mark word to "marked" in order to quickly // identify sample objects during the reachability search from gc roots. assert(!obj->mark().is_marked(), "should only mark an object once"); - obj->set_mark(markWord::prototype().set_marked()); + obj->set_mark(obj->prototype_mark().set_marked()); assert(obj->mark().is_marked(), "invariant"); } }; diff --git a/src/hotspot/share/jfr/support/jfrObjectAllocationSample.cpp b/src/hotspot/share/jfr/support/jfrObjectAllocationSample.cpp index e31a96153a4..55ea5225e24 100644 --- a/src/hotspot/share/jfr/support/jfrObjectAllocationSample.cpp +++ b/src/hotspot/share/jfr/support/jfrObjectAllocationSample.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. +* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,7 +47,7 @@ inline bool send_allocation_sample(const Klass* klass, int64_t allocated_bytes, inline int64_t estimate_tlab_size_bytes(Thread* thread) { const size_t desired_tlab_size_bytes = thread->tlab().desired_size() * HeapWordSize; const size_t alignment_reserve_bytes = thread->tlab().alignment_reserve_in_bytes(); - assert(desired_tlab_size_bytes > alignment_reserve_bytes, "invariant"); + assert(desired_tlab_size_bytes >= alignment_reserve_bytes, "invariant"); return static_cast(desired_tlab_size_bytes - alignment_reserve_bytes); } @@ -66,6 +66,10 @@ static void normalize_as_tlab_and_send_allocation_samples(const Klass* klass, in return; } const int64_t tlab_size_bytes = estimate_tlab_size_bytes(thread); + if (tlab_size_bytes <= 0) { + // We don't get a TLAB, avoid endless loop below. + return; + } if (allocated_bytes - tl->last_allocated_bytes() < tlab_size_bytes) { return; } diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp index 0773de6ddba..b44b4bb9116 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp @@ -39,6 +39,9 @@ class CompilerToVM { friend class JVMCIVMStructs; private: + static int oopDesc_klass_offset_in_bytes; + static int arrayOopDesc_length_offset_in_bytes; + static int Klass_vtable_start_offset; static int Klass_vtable_length_offset; diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp index ab536a81e6e..faa23a84178 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp @@ -54,6 +54,8 @@ #include "runtime/stubRoutines.hpp" #include "utilities/resourceHash.hpp" +int CompilerToVM::Data::oopDesc_klass_offset_in_bytes; +int CompilerToVM::Data::arrayOopDesc_length_offset_in_bytes; int CompilerToVM::Data::Klass_vtable_start_offset; int CompilerToVM::Data::Klass_vtable_length_offset; @@ -147,6 +149,9 @@ int CompilerToVM::Data::data_section_item_alignment; JVMTI_ONLY( int* CompilerToVM::Data::_should_notify_object_alloc; ) void CompilerToVM::Data::initialize(JVMCI_TRAPS) { + oopDesc_klass_offset_in_bytes = oopDesc::klass_offset_in_bytes(); + arrayOopDesc_length_offset_in_bytes = arrayOopDesc::length_offset_in_bytes(); + Klass_vtable_start_offset = in_bytes(Klass::vtable_start_offset()); Klass_vtable_length_offset = in_bytes(Klass::vtable_length_offset()); diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp index da50a524243..a0ffe74873b 100644 --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -56,6 +56,9 @@ #endif #define VM_STRUCTS(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field) \ + static_field(CompilerToVM::Data, oopDesc_klass_offset_in_bytes, int) \ + static_field(CompilerToVM::Data, arrayOopDesc_length_offset_in_bytes, int) \ + \ static_field(CompilerToVM::Data, Klass_vtable_start_offset, int) \ static_field(CompilerToVM::Data, Klass_vtable_length_offset, int) \ \ @@ -275,6 +278,7 @@ nonstatic_field(Klass, _secondary_supers_bitmap, uintx) \ nonstatic_field(Klass, _hash_slot, uint8_t) \ nonstatic_field(Klass, _misc_flags._flags, u1) \ + nonstatic_field(Klass, _prototype_header, markWord) \ \ nonstatic_field(LocalVariableTableElement, start_bci, u2) \ nonstatic_field(LocalVariableTableElement, length, u2) \ @@ -480,7 +484,6 @@ declare_constant(CompLevel_full_optimization) \ declare_constant(HeapWordSize) \ declare_constant(InvocationEntryBci) \ - declare_constant(LogKlassAlignmentInBytes) \ declare_constant(JVMCINMethodData::SPECULATION_LENGTH_BITS) \ \ declare_constant(JVM_ACC_WRITTEN_FLAGS) \ @@ -796,6 +799,7 @@ declare_constant(InvocationCounter::count_increment) \ declare_constant(InvocationCounter::count_shift) \ \ + declare_constant(markWord::klass_shift) \ declare_constant(markWord::hash_shift) \ declare_constant(markWord::monitor_value) \ \ diff --git a/src/hotspot/share/memory/classLoaderMetaspace.cpp b/src/hotspot/share/memory/classLoaderMetaspace.cpp index 08bf42da8e3..4bcbb862a5a 100644 --- a/src/hotspot/share/memory/classLoaderMetaspace.cpp +++ b/src/hotspot/share/memory/classLoaderMetaspace.cpp @@ -30,17 +30,23 @@ #include "memory/metaspaceUtils.hpp" #include "memory/metaspace/chunkManager.hpp" #include "memory/metaspace/internalStats.hpp" +#include "memory/metaspace/metablock.hpp" #include "memory/metaspace/metaspaceArena.hpp" #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp" +#include "memory/metaspace/metaspaceCommon.hpp" +#include "memory/metaspace/metaspaceContext.hpp" #include "memory/metaspace/metaspaceSettings.hpp" #include "memory/metaspace/metaspaceStatistics.hpp" #include "memory/metaspace/runningCounters.hpp" #include "memory/metaspaceTracer.hpp" +#include "oops/klass.hpp" #include "runtime/mutexLocker.hpp" #include "utilities/debug.hpp" using metaspace::ChunkManager; +using metaspace::MetaBlock; using metaspace::MetaspaceArena; +using metaspace::MetaspaceContext; using metaspace::ArenaGrowthPolicy; using metaspace::RunningCounters; using metaspace::InternalStats; @@ -49,30 +55,35 @@ using metaspace::InternalStats; #define LOGFMT_ARGS p2i(this) ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType space_type) : + ClassLoaderMetaspace(lock, space_type, + MetaspaceContext::context_nonclass(), + MetaspaceContext::context_class(), + CompressedKlassPointers::klass_alignment_in_words()) +{} + +ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType space_type, + MetaspaceContext* non_class_context, + MetaspaceContext* class_context, + size_t klass_alignment_words) : _lock(lock), _space_type(space_type), _non_class_space_arena(nullptr), _class_space_arena(nullptr) { - ChunkManager* const non_class_cm = - ChunkManager::chunkmanager_nonclass(); - // Initialize non-class Arena _non_class_space_arena = new MetaspaceArena( - non_class_cm, + non_class_context, ArenaGrowthPolicy::policy_for_space_type(space_type, false), - RunningCounters::used_nonclass_counter(), - "non-class sm"); + Metaspace::min_allocation_alignment_words, + "non-class arena"); // If needed, initialize class arena - if (Metaspace::using_class_space()) { - ChunkManager* const class_cm = - ChunkManager::chunkmanager_class(); + if (class_context != nullptr) { _class_space_arena = new MetaspaceArena( - class_cm, + class_context, ArenaGrowthPolicy::policy_for_space_type(space_type, true), - RunningCounters::used_class_counter(), - "class sm"); + klass_alignment_words, + "class arena"); } UL2(debug, "born (nonclass arena: " PTR_FORMAT ", class arena: " PTR_FORMAT ".", @@ -89,12 +100,28 @@ ClassLoaderMetaspace::~ClassLoaderMetaspace() { // Allocate word_size words from Metaspace. MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdType) { + word_size = align_up(word_size, Metaspace::min_allocation_word_size); MutexLocker fcl(lock(), Mutex::_no_safepoint_check_flag); - if (Metaspace::is_class_space_allocation(mdType)) { - return class_space_arena()->allocate(word_size); + MetaBlock result, wastage; + const bool is_class = have_class_space_arena() && mdType == Metaspace::ClassType; + if (is_class) { + assert(word_size >= (sizeof(Klass)/BytesPerWord), "weird size for klass: %zu", word_size); + result = class_space_arena()->allocate(word_size, wastage); } else { - return non_class_space_arena()->allocate(word_size); + result = non_class_space_arena()->allocate(word_size, wastage); } + if (wastage.is_nonempty()) { + non_class_space_arena()->deallocate(wastage); + } +#ifdef ASSERT + if (result.is_nonempty()) { + const bool in_class_arena = class_space_arena() != nullptr ? class_space_arena()->contains(result) : false; + const bool in_nonclass_arena = non_class_space_arena()->contains(result); + assert((is_class && in_class_arena) || (!is_class && in_class_arena != in_nonclass_arena), + "block from neither arena " METABLOCKFORMAT "?", METABLOCKFORMATARGS(result)); + } +#endif + return result.base(); } // Attempt to expand the GC threshold to be good for at least another word_size words @@ -132,12 +159,15 @@ MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace: // because it is not needed anymore. void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size) { MutexLocker fcl(lock(), Mutex::_no_safepoint_check_flag); - const bool is_class = Metaspace::using_class_space() && Metaspace::is_in_class_space(ptr); - if (is_class) { - class_space_arena()->deallocate(ptr, word_size); - } else { - non_class_space_arena()->deallocate(ptr, word_size); + NOT_LP64(word_size = align_down(word_size, Metaspace::min_allocation_word_size);) + MetaBlock bl(ptr, word_size); + // Add to class arena only if block is usable for encodable Klass storage. + MetaspaceArena* receiving_arena = non_class_space_arena(); + if (Metaspace::using_class_space() && Metaspace::is_in_class_space(ptr) && + is_aligned(ptr, class_space_arena()->allocation_alignment_bytes())) { + receiving_arena = class_space_arena(); } + receiving_arena->deallocate(bl); DEBUG_ONLY(InternalStats::inc_num_deallocs();) } @@ -180,7 +210,7 @@ void ClassLoaderMetaspace::usage_numbers(size_t* p_used_words, size_t* p_committ { MutexLocker fcl(lock(), Mutex::_no_safepoint_check_flag); usage_numbers(Metaspace::MetadataType::NonClassType, &used_nc, &comm_nc, &cap_nc); - if (Metaspace::using_class_space()) { + if (have_class_space_arena()) { usage_numbers(Metaspace::MetadataType::ClassType, &used_c, &comm_c, &cap_c); } } diff --git a/src/hotspot/share/memory/classLoaderMetaspace.hpp b/src/hotspot/share/memory/classLoaderMetaspace.hpp index 176b8c5082a..aa43e171708 100644 --- a/src/hotspot/share/memory/classLoaderMetaspace.hpp +++ b/src/hotspot/share/memory/classLoaderMetaspace.hpp @@ -33,7 +33,9 @@ class outputStream; namespace metaspace { struct ClmsStats; + class ClmsTester; class MetaspaceArena; + class MetaspaceContext; } // A ClassLoaderMetaspace manages MetaspaceArena(s) for a CLD. @@ -57,6 +59,7 @@ namespace metaspace { // alloc top // class ClassLoaderMetaspace : public CHeapObj { + friend class metaspace::ClmsTester; // for gtests // A reference to an outside lock, held by the CLD. Mutex* const _lock; @@ -75,8 +78,14 @@ class ClassLoaderMetaspace : public CHeapObj { metaspace::MetaspaceArena* non_class_space_arena() const { return _non_class_space_arena; } metaspace::MetaspaceArena* class_space_arena() const { return _class_space_arena; } -public: + bool have_class_space_arena() const { return _class_space_arena != nullptr; } + ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType space_type, + metaspace::MetaspaceContext* non_class_context, + metaspace::MetaspaceContext* class_context, + size_t klass_alignment_words); + +public: ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType space_type); ~ClassLoaderMetaspace(); diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp index f86be4774d5..aa592a3a6aa 100644 --- a/src/hotspot/share/memory/metaspace.cpp +++ b/src/hotspot/share/memory/metaspace.cpp @@ -651,23 +651,41 @@ void Metaspace::ergo_initialize() { MaxMetaspaceSize = MAX2(MaxMetaspaceSize, commit_alignment()); if (UseCompressedClassPointers) { - // Let CCS size not be larger than 80% of MaxMetaspaceSize. Note that is + // Let Class Space not be larger than 80% of MaxMetaspaceSize. Note that is // grossly over-dimensioned for most usage scenarios; typical ratio of // class space : non class space usage is about 1:6. With many small classes, // it can get as low as 1:2. It is not a big deal though since ccs is only // reserved and will be committed on demand only. - size_t max_ccs_size = 8 * (MaxMetaspaceSize / 10); - size_t adjusted_ccs_size = MIN2(CompressedClassSpaceSize, max_ccs_size); + const size_t max_ccs_size = 8 * (MaxMetaspaceSize / 10); + + // Sanity check. + const size_t max_klass_range = CompressedKlassPointers::max_klass_range_size(); + assert(max_klass_range >= reserve_alignment(), + "Klass range (%zu) must cover at least a full root chunk (%zu)", + max_klass_range, reserve_alignment()); + + size_t adjusted_ccs_size = MIN3(CompressedClassSpaceSize, max_ccs_size, max_klass_range); // CCS must be aligned to root chunk size, and be at least the size of one // root chunk. adjusted_ccs_size = align_up(adjusted_ccs_size, reserve_alignment()); adjusted_ccs_size = MAX2(adjusted_ccs_size, reserve_alignment()); + // Print a warning if the adjusted size differs from the users input + if (CompressedClassSpaceSize != adjusted_ccs_size) { + #define X "CompressedClassSpaceSize adjusted from user input " \ + "%zu bytes to %zu bytes", CompressedClassSpaceSize, adjusted_ccs_size + if (FLAG_IS_CMDLINE(CompressedClassSpaceSize)) { + log_warning(metaspace)(X); + } else { + log_info(metaspace)(X); + } + #undef X + } + // Note: re-adjusting may have us left with a CompressedClassSpaceSize // larger than MaxMetaspaceSize for very small values of MaxMetaspaceSize. // Lets just live with that, its not a big deal. - if (adjusted_ccs_size != CompressedClassSpaceSize) { FLAG_SET_ERGO(CompressedClassSpaceSize, adjusted_ccs_size); log_info(metaspace)("Setting CompressedClassSpaceSize to " SIZE_FORMAT ".", @@ -778,6 +796,7 @@ void Metaspace::global_initialize() { Metaspace::initialize_class_space(rs); // Set up compressed class pointer encoding. + // In CDS=off mode, we give the JVM some leeway to choose a favorable base/shift combination. CompressedKlassPointers::initialize((address)rs.base(), rs.size()); } @@ -846,9 +865,17 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); if (result != nullptr) { +#ifdef ASSERT + if (using_class_space() && mdtype == ClassType) { + assert(is_in_class_space(result) && + is_aligned(result, CompressedKlassPointers::klass_alignment_in_bytes()), "Sanity"); + } else { + assert((is_in_class_space(result) || is_in_nonclass_metaspace(result)) && + is_aligned(result, Metaspace::min_allocation_alignment_bytes), "Sanity"); + } +#endif // Zero initialize. Copy::fill_to_words((HeapWord*)result, word_size, 0); - log_trace(metaspace)("Metaspace::allocate: type %d return " PTR_FORMAT ".", (int)type, p2i(result)); } diff --git a/src/hotspot/share/memory/metaspace.hpp b/src/hotspot/share/memory/metaspace.hpp index 7076a6a09bb..7adebfd826b 100644 --- a/src/hotspot/share/memory/metaspace.hpp +++ b/src/hotspot/share/memory/metaspace.hpp @@ -26,6 +26,7 @@ #define SHARE_MEMORY_METASPACE_HPP #include "memory/allocation.hpp" +#include "memory/virtualspace.hpp" #include "runtime/globals.hpp" #include "utilities/exceptions.hpp" #include "utilities/globalDefinitions.hpp" @@ -35,7 +36,6 @@ class MetaspaceShared; class MetaspaceTracer; class Mutex; class outputStream; -class ReservedSpace; ////////////////// Metaspace /////////////////////// @@ -108,6 +108,17 @@ public: // The largest possible single allocation static size_t max_allocation_word_size(); + // Minimum allocation alignment, in bytes. All MetaData shall be aligned correctly + // to be able to hold 64-bit data types. Unlike malloc, we don't care for larger + // data types. + static constexpr size_t min_allocation_alignment_bytes = sizeof(uint64_t); + + // Minimum allocation alignment, in words, Metaspace observes. + static constexpr size_t min_allocation_alignment_words = min_allocation_alignment_bytes / BytesPerWord; + + // Every allocation will get rounded up to the minimum word size. + static constexpr size_t min_allocation_word_size = min_allocation_alignment_words; + static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, bool use_class_space, TRAPS); diff --git a/src/hotspot/share/memory/metaspace/binList.hpp b/src/hotspot/share/memory/metaspace/binList.hpp index 76cd9e155aa..9442ea3cd52 100644 --- a/src/hotspot/share/memory/metaspace/binList.hpp +++ b/src/hotspot/share/memory/metaspace/binList.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * Copyright (c) 2023 Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -28,6 +28,7 @@ #define SHARE_MEMORY_METASPACE_BINLIST_HPP #include "memory/metaspace/counters.hpp" +#include "memory/metaspace/metablock.hpp" #include "memory/metaspace/metaspaceCommon.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" @@ -36,8 +37,7 @@ namespace metaspace { // BinList is a data structure to manage small to very small memory blocks -// (only a few words). It is used to manage deallocated blocks - see -// class FreeBlocks. +// (only a few words). It is used to manage deallocated small blocks. // Memory blocks are kept in a vector of linked lists of equi-sized blocks: // @@ -143,7 +143,10 @@ public: } } - void add_block(MetaWord* p, size_t word_size) { + void add_block(MetaBlock mb) { + assert(!mb.is_empty(), "Don't add empty blocks"); + const size_t word_size = mb.word_size(); + MetaWord* const p = mb.base(); assert(word_size >= MinWordSize && word_size <= MaxWordSize, "bad block size"); DEBUG_ONLY(write_canary(p, word_size);) @@ -155,10 +158,11 @@ public: } // Given a word_size, searches and returns a block of at least that size. - // Block may be larger. Real block size is returned in *p_real_word_size. - MetaWord* remove_block(size_t word_size, size_t* p_real_word_size) { + // Block may be larger. + MetaBlock remove_block(size_t word_size) { assert(word_size >= MinWordSize && word_size <= MaxWordSize, "bad block size " SIZE_FORMAT ".", word_size); + MetaBlock result; int index = index_for_word_size(word_size); index = index_for_next_non_empty_list(index); if (index != -1) { @@ -169,12 +173,9 @@ public: "bad block in list[%d] (" BLOCK_FORMAT ")", index, BLOCK_FORMAT_ARGS(b, real_word_size)); _blocks[index] = b->_next; _counter.sub(real_word_size); - *p_real_word_size = real_word_size; - return (MetaWord*)b; - } else { - *p_real_word_size = 0; - return nullptr; + result = MetaBlock((MetaWord*)b, real_word_size); } + return result; } // Returns number of blocks in this structure @@ -191,9 +192,12 @@ public: for (int i = 0; i < num_lists; i++) { const size_t s = word_size_for_index(i); int pos = 0; + Block* b_last = nullptr; // catch simple circularities for (Block* b = _blocks[i]; b != nullptr; b = b->_next, pos++) { assert(check_canary(b, s), ""); + assert(b != b_last, "Circle"); local_counter.add(s); + b_last = b; } } local_counter.check(_counter); diff --git a/src/hotspot/share/memory/metaspace/blockTree.cpp b/src/hotspot/share/memory/metaspace/blockTree.cpp index 1f1e54f4a46..85e77508836 100644 --- a/src/hotspot/share/memory/metaspace/blockTree.cpp +++ b/src/hotspot/share/memory/metaspace/blockTree.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2022 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -180,8 +180,8 @@ void BlockTree::verify() const { _counter.check(counter); } -void BlockTree::zap_range(MetaWord* p, size_t word_size) { - memset(p, 0xF3, word_size * sizeof(MetaWord)); +void BlockTree::zap_block(MetaBlock bl) { + memset(bl.base(), 0xF3, bl.word_size() * sizeof(MetaWord)); } void BlockTree::print_tree(outputStream* st) const { @@ -224,6 +224,12 @@ void BlockTree::print_tree(outputStream* st) const { } } + // Handle simple circularities + if (n == n->_right || n == n->_left || n == n->_next) { + st->print_cr("@" PTR_FORMAT ": circularity detected.", p2i(n)); + return; // stop printing + } + // Handle children. if (n->_right != nullptr) { walkinfo info2; diff --git a/src/hotspot/share/memory/metaspace/blockTree.hpp b/src/hotspot/share/memory/metaspace/blockTree.hpp index c5dd48926ac..8bcdd30919a 100644 --- a/src/hotspot/share/memory/metaspace/blockTree.hpp +++ b/src/hotspot/share/memory/metaspace/blockTree.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -29,13 +29,14 @@ #include "memory/allocation.hpp" #include "memory/metaspace/chunklevel.hpp" #include "memory/metaspace/counters.hpp" +#include "memory/metaspace/metablock.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" namespace metaspace { // BlockTree is a rather simple binary search tree. It is used to -// manage small to medium free memory blocks (see class FreeBlocks). +// manage medium to large free memory blocks. // // There is no separation between payload (managed blocks) and nodes: the // memory blocks themselves are the nodes, with the block size being the key. @@ -80,8 +81,7 @@ class BlockTree: public CHeapObj { NOT_LP64(0x4e4f4445) LP64_ONLY(0x4e4f44454e4f4445ULL); // "NODE" resp "NODENODE" // Note: we afford us the luxury of an always-there canary value. - // The space for that is there (these nodes are only used to manage larger blocks, - // see FreeBlocks::MaxSmallBlocksWordSize). + // The space for that is there (these nodes are only used to manage larger blocks). // It is initialized in debug and release, but only automatically tested // in debug. const intptr_t _canary; @@ -335,7 +335,7 @@ private: } #ifdef ASSERT - void zap_range(MetaWord* p, size_t word_size); + void zap_block(MetaBlock block); // Helper for verify() void verify_node_pointer(const Node* n) const; #endif // ASSERT @@ -345,10 +345,11 @@ public: BlockTree() : _root(nullptr) {} // Add a memory block to the tree. Its content will be overwritten. - void add_block(MetaWord* p, size_t word_size) { - DEBUG_ONLY(zap_range(p, word_size)); + void add_block(MetaBlock block) { + DEBUG_ONLY(zap_block(block);) + const size_t word_size = block.word_size(); assert(word_size >= MinWordSize, "invalid block size " SIZE_FORMAT, word_size); - Node* n = new(p) Node(word_size); + Node* n = new(block.base()) Node(word_size); if (_root == nullptr) { _root = n; } else { @@ -358,11 +359,11 @@ public: } // Given a word_size, search and return the smallest block that is equal or - // larger than that size. Upon return, *p_real_word_size contains the actual - // block size. - MetaWord* remove_block(size_t word_size, size_t* p_real_word_size) { + // larger than that size. + MetaBlock remove_block(size_t word_size) { assert(word_size >= MinWordSize, "invalid block size " SIZE_FORMAT, word_size); + MetaBlock result; Node* n = find_closest_fit(word_size); if (n != nullptr) { @@ -379,15 +380,13 @@ public: remove_node_from_tree(n); } - MetaWord* p = (MetaWord*)n; - *p_real_word_size = n->_word_size; + result = MetaBlock((MetaWord*)n, n->_word_size); _counter.sub(n->_word_size); - DEBUG_ONLY(zap_range(p, n->_word_size)); - return p; + DEBUG_ONLY(zap_block(result);) } - return nullptr; + return result; } // Returns number of blocks in this structure diff --git a/src/hotspot/share/memory/metaspace/freeBlocks.cpp b/src/hotspot/share/memory/metaspace/freeBlocks.cpp index b6a281f0db7..ab65387043a 100644 --- a/src/hotspot/share/memory/metaspace/freeBlocks.cpp +++ b/src/hotspot/share/memory/metaspace/freeBlocks.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -30,32 +30,23 @@ namespace metaspace { -void FreeBlocks::add_block(MetaWord* p, size_t word_size) { - if (word_size > MaxSmallBlocksWordSize) { - _tree.add_block(p, word_size); +void FreeBlocks::add_block(MetaBlock bl) { + if (bl.word_size() > _small_blocks.MaxWordSize) { + _tree.add_block(bl); } else { - _small_blocks.add_block(p, word_size); + _small_blocks.add_block(bl); } } -MetaWord* FreeBlocks::remove_block(size_t requested_word_size) { +MetaBlock FreeBlocks::remove_block(size_t requested_word_size) { size_t real_size = 0; - MetaWord* p = nullptr; - if (requested_word_size > MaxSmallBlocksWordSize) { - p = _tree.remove_block(requested_word_size, &real_size); + MetaBlock bl; + if (requested_word_size > _small_blocks.MaxWordSize) { + bl = _tree.remove_block(requested_word_size); } else { - p = _small_blocks.remove_block(requested_word_size, &real_size); + bl = _small_blocks.remove_block(requested_word_size); } - if (p != nullptr) { - // Blocks which are larger than a certain threshold are split and - // the remainder is handed back to the manager. - const size_t waste = real_size - requested_word_size; - if (waste >= MinWordSize) { - add_block(p + requested_word_size, waste); - } - } - return p; + return bl; } } // namespace metaspace - diff --git a/src/hotspot/share/memory/metaspace/freeBlocks.hpp b/src/hotspot/share/memory/metaspace/freeBlocks.hpp index d5a3c29a9d7..77ab48e17ea 100644 --- a/src/hotspot/share/memory/metaspace/freeBlocks.hpp +++ b/src/hotspot/share/memory/metaspace/freeBlocks.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -73,20 +73,16 @@ class FreeBlocks : public CHeapObj { // kept in the blocktree. STATIC_ASSERT(BinList32::MaxWordSize >= BlockTree::MinWordSize); - // Cutoff point: blocks larger than this size are kept in the - // tree, blocks smaller than or equal to this size in the bin list. - const size_t MaxSmallBlocksWordSize = BinList32::MaxWordSize; - public: // Smallest blocks we can keep in this structure. const static size_t MinWordSize = BinList32::MinWordSize; // Add a block to the deallocation management. - void add_block(MetaWord* p, size_t word_size); + void add_block(MetaBlock bl); - // Retrieve a block of at least requested_word_size. - MetaWord* remove_block(size_t requested_word_size); + // Retrieve a block of at least requested_word_size. May be larger. + MetaBlock remove_block(size_t requested_word_size); #ifdef ASSERT void verify() const { diff --git a/src/hotspot/share/memory/metaspace/metablock.hpp b/src/hotspot/share/memory/metaspace/metablock.hpp new file mode 100644 index 00000000000..96e27ff8702 --- /dev/null +++ b/src/hotspot/share/memory/metaspace/metablock.hpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2023 Red Hat, Inc. All rights reserved. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_METABLOCK_HPP +#define SHARE_MEMORY_METASPACE_METABLOCK_HPP + +#include "utilities/globalDefinitions.hpp" + +class outputStream; + +namespace metaspace { + +// Tiny structure to be passed by value +class MetaBlock { + + MetaWord* _base; + size_t _word_size; + +public: + + MetaBlock(MetaWord* p, size_t word_size) : + _base(word_size == 0 ? nullptr : p), _word_size(word_size) {} + MetaBlock() : MetaBlock(nullptr, 0) {} + + MetaWord* base() const { return _base; } + const MetaWord* end() const { return _base + _word_size; } + size_t word_size() const { return _word_size; } + bool is_empty() const { return _base == nullptr; } + bool is_nonempty() const { return _base != nullptr; } + void reset() { _base = nullptr; _word_size = 0; } + + bool operator==(const MetaBlock& rhs) const { + return base() == rhs.base() && + word_size() == rhs.word_size(); + } + + // Split off tail block. + inline MetaBlock split_off_tail(size_t tailsize); + + DEBUG_ONLY(inline void verify() const;) + + // Convenience functions + inline bool is_aligned_base(size_t alignment_words) const; + inline bool is_aligned_size(size_t alignment_words) const; + + void print_on(outputStream* st) const; +}; + +#define METABLOCKFORMAT "block (@" PTR_FORMAT " word size " SIZE_FORMAT ")" +#define METABLOCKFORMATARGS(__block__) p2i((__block__).base()), (__block__).word_size() + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_METABLOCK_HPP diff --git a/src/hotspot/share/memory/metaspace/metablock.inline.hpp b/src/hotspot/share/memory/metaspace/metablock.inline.hpp new file mode 100644 index 00000000000..04eb6c22277 --- /dev/null +++ b/src/hotspot/share/memory/metaspace/metablock.inline.hpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2023 Red Hat, Inc. All rights reserved. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_METABLOCK_INLINE_HPP +#define SHARE_MEMORY_METASPACE_METABLOCK_INLINE_HPP + +#include "memory/metaspace/metablock.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/align.hpp" +#include "utilities/ostream.hpp" +#include "utilities/debug.hpp" + +class outputStream; + +namespace metaspace { + +inline MetaBlock MetaBlock::split_off_tail(size_t tailsize) { + if (is_empty() || tailsize == 0) { + return MetaBlock(); + } + assert(tailsize <= _word_size, "invalid split point for block " + METABLOCKFORMAT ": %zu", METABLOCKFORMATARGS(*this), tailsize); + const size_t new_size = _word_size - tailsize; + MetaBlock tail(_base + new_size, tailsize); + _word_size = new_size; + if (_word_size == 0) { + _base = nullptr; + } + return tail; +} + +inline void MetaBlock::print_on(outputStream* st) const { + st->print(METABLOCKFORMAT, METABLOCKFORMATARGS(*this)); +} + +// Convenience functions +inline bool MetaBlock::is_aligned_base(size_t alignment_words) const { + return is_aligned(_base, alignment_words * BytesPerWord); +} + +inline bool MetaBlock::is_aligned_size(size_t alignment_words) const { + return is_aligned(_word_size, alignment_words); +} + +// some convenience asserts +#define assert_block_base_aligned(block, alignment_words) \ + assert(block.is_aligned_base(alignment_words), "Block wrong base alignment " METABLOCKFORMAT, METABLOCKFORMATARGS(block)); + +#define assert_block_size_aligned(block, alignment_words) \ + assert(block.is_aligned_size(alignment_words), "Block wrong size alignment " METABLOCKFORMAT, METABLOCKFORMATARGS(block)); + +#define assert_block_larger_or_equal(block, x) \ + assert(block.word_size() >= x, "Block too small " METABLOCKFORMAT, METABLOCKFORMATARGS(block)); + +#ifdef ASSERT +inline void MetaBlock::verify() const { + assert( (_base == nullptr && _word_size == 0) || + (_base != nullptr && _word_size > 0), + "block invalid " METABLOCKFORMAT, METABLOCKFORMATARGS(*this)); +} +#endif + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_METABLOCK_INLINE_HPP diff --git a/src/hotspot/share/memory/metaspace/metaspaceArena.cpp b/src/hotspot/share/memory/metaspace/metaspaceArena.cpp index 92d7d0ea7eb..33f1bfc46a3 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceArena.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceArena.cpp @@ -30,10 +30,12 @@ #include "memory/metaspace/counters.hpp" #include "memory/metaspace/freeBlocks.hpp" #include "memory/metaspace/internalStats.hpp" +#include "memory/metaspace/metablock.inline.hpp" #include "memory/metaspace/metachunk.hpp" #include "memory/metaspace/metaspaceArena.hpp" #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp" #include "memory/metaspace/metaspaceCommon.hpp" +#include "memory/metaspace/metaspaceContext.hpp" #include "memory/metaspace/metaspaceSettings.hpp" #include "memory/metaspace/metaspaceStatistics.hpp" #include "memory/metaspace/virtualSpaceList.hpp" @@ -56,24 +58,25 @@ chunklevel_t MetaspaceArena::next_chunk_level() const { return _growth_policy->get_level_at_step(growth_step); } -// Given a chunk, add its remaining free committed space to the free block list. -void MetaspaceArena::salvage_chunk(Metachunk* c) { - size_t remaining_words = c->free_below_committed_words(); +// Given a chunk, return the committed remainder of this chunk. +MetaBlock MetaspaceArena::salvage_chunk(Metachunk* c) { + MetaBlock result; + const size_t remaining_words = c->free_below_committed_words(); if (remaining_words >= FreeBlocks::MinWordSize) { UL2(trace, "salvaging chunk " METACHUNK_FULL_FORMAT ".", METACHUNK_FULL_FORMAT_ARGS(c)); MetaWord* ptr = c->allocate(remaining_words); assert(ptr != nullptr, "Should have worked"); - _total_used_words_counter->increment_by(remaining_words); - add_allocation_to_fbl(ptr, remaining_words); + result = MetaBlock(ptr, remaining_words); // After this operation: the chunk should have no free committed space left. assert(c->free_below_committed_words() == 0, "Salvaging chunk failed (chunk " METACHUNK_FULL_FORMAT ").", METACHUNK_FULL_FORMAT_ARGS(c)); } + return result; } // Allocate a new chunk from the underlying chunk manager able to hold at least @@ -97,28 +100,35 @@ Metachunk* MetaspaceArena::allocate_new_chunk(size_t requested_word_size) { return c; } -void MetaspaceArena::add_allocation_to_fbl(MetaWord* p, size_t word_size) { - assert(p != nullptr, "p is null"); - assert_is_aligned_metaspace_pointer(p); - assert(word_size > 0, "zero sized"); - +void MetaspaceArena::add_allocation_to_fbl(MetaBlock bl) { + assert(bl.is_nonempty(), "Sanity"); + assert_block_base_aligned(bl, allocation_alignment_words()); + assert_block_size_aligned(bl, Metaspace::min_allocation_alignment_words); if (_fbl == nullptr) { _fbl = new FreeBlocks(); // Create only on demand } - _fbl->add_block(p, word_size); + _fbl->add_block(bl); } -MetaspaceArena::MetaspaceArena(ChunkManager* chunk_manager, const ArenaGrowthPolicy* growth_policy, - SizeAtomicCounter* total_used_words_counter, - const char* name) : - _chunk_manager(chunk_manager), +MetaspaceArena::MetaspaceArena(MetaspaceContext* context, + const ArenaGrowthPolicy* growth_policy, + size_t allocation_alignment_words, + const char* name) : + _allocation_alignment_words(allocation_alignment_words), + _chunk_manager(context->cm()), _growth_policy(growth_policy), _chunks(), _fbl(nullptr), - _total_used_words_counter(total_used_words_counter), + _total_used_words_counter(context->used_words_counter()), _name(name) { - UL(debug, ": born."); + // Check arena allocation alignment + assert(is_power_of_2(_allocation_alignment_words) && + _allocation_alignment_words >= Metaspace::min_allocation_alignment_words && + _allocation_alignment_words <= chunklevel::MIN_CHUNK_WORD_SIZE, + "Invalid alignment: %zu", _allocation_alignment_words); + + UL(debug, "born."); // Update statistics InternalStats::inc_num_arena_births(); @@ -140,7 +150,7 @@ MetaspaceArena::~MetaspaceArena() { c = c2; } - UL2(info, "returned %d chunks, total capacity " SIZE_FORMAT " words.", + UL2(debug, "returned %d chunks, total capacity " SIZE_FORMAT " words.", return_counter.count(), return_counter.total_size()); _total_used_words_counter->decrement_by(return_counter.total_size()); @@ -205,49 +215,93 @@ bool MetaspaceArena::attempt_enlarge_current_chunk(size_t requested_word_size) { // 3) Attempt to enlarge the current chunk in place if it is too small. // 4) Attempt to get a new chunk and allocate from that chunk. // At any point, if we hit a commit limit, we return null. -MetaWord* MetaspaceArena::allocate(size_t requested_word_size) { +MetaBlock MetaspaceArena::allocate(size_t requested_word_size, MetaBlock& wastage) { UL2(trace, "requested " SIZE_FORMAT " words.", requested_word_size); - MetaWord* p = nullptr; const size_t aligned_word_size = get_raw_word_size_for_requested_word_size(requested_word_size); + MetaBlock result; + bool taken_from_fbl = false; + // Before bothering the arena proper, attempt to re-use a block from the free blocks list if (_fbl != nullptr && !_fbl->is_empty()) { - p = _fbl->remove_block(aligned_word_size); - if (p != nullptr) { + result = _fbl->remove_block(aligned_word_size); + if (result.is_nonempty()) { + assert_block_larger_or_equal(result, aligned_word_size); + assert_block_base_aligned(result, allocation_alignment_words()); + assert_block_size_aligned(result, Metaspace::min_allocation_alignment_words); + // Split off wastage + wastage = result.split_off_tail(result.word_size() - aligned_word_size); + // Stats, logging DEBUG_ONLY(InternalStats::inc_num_allocs_from_deallocated_blocks();) - UL2(trace, "returning " PTR_FORMAT " - taken from fbl (now: %d, " SIZE_FORMAT ").", - p2i(p), _fbl->count(), _fbl->total_size()); - assert_is_aligned_metaspace_pointer(p); + UL2(trace, "returning " METABLOCKFORMAT " with wastage " METABLOCKFORMAT " - taken from fbl (now: %d, " SIZE_FORMAT ").", + METABLOCKFORMATARGS(result), METABLOCKFORMATARGS(wastage), _fbl->count(), _fbl->total_size()); // Note: free blocks in freeblock dictionary still count as "used" as far as statistics go; - // therefore we have no need to adjust any usage counters (see epilogue of allocate_inner()) - // and can just return here. - return p; + // therefore we don't need to adjust any usage counters (see epilogue of allocate_inner()). + taken_from_fbl = true; } } - // Primary allocation - p = allocate_inner(aligned_word_size); + if (result.is_empty()) { + // Free-block allocation failed; we allocate from the arena. + result = allocate_inner(aligned_word_size, wastage); + } - return p; + // Logging + if (result.is_nonempty()) { + LogTarget(Trace, metaspace) lt; + if (lt.is_enabled()) { + LogStream ls(lt); + ls.print(LOGFMT ": returning " METABLOCKFORMAT " taken from %s, ", LOGFMT_ARGS, + METABLOCKFORMATARGS(result), (taken_from_fbl ? "fbl" : "arena")); + if (wastage.is_empty()) { + ls.print("no wastage"); + } else { + ls.print("wastage " METABLOCKFORMAT, METABLOCKFORMATARGS(wastage)); + } + } + } else { + UL(info, "allocation failed, returned null."); + } + + // Final sanity checks +#ifdef ASSERT + result.verify(); + wastage.verify(); + if (result.is_nonempty()) { + assert(result.word_size() == aligned_word_size && + is_aligned(result.base(), _allocation_alignment_words * BytesPerWord), + "result bad or unaligned: " METABLOCKFORMAT ".", METABLOCKFORMATARGS(result)); + } + if (wastage.is_nonempty()) { + assert(wastage.is_empty() || + (wastage.is_aligned_base(Metaspace::min_allocation_alignment_words) && + wastage.is_aligned_size(Metaspace::min_allocation_alignment_words)), + "Misaligned wastage: " METABLOCKFORMAT".", METABLOCKFORMATARGS(wastage)); + } +#endif // ASSERT + return result; } // Allocate from the arena proper, once dictionary allocations and fencing are sorted out. -MetaWord* MetaspaceArena::allocate_inner(size_t word_size) { - assert_is_aligned(word_size, metaspace::AllocationAlignmentWordSize); +MetaBlock MetaspaceArena::allocate_inner(size_t word_size, MetaBlock& wastage) { - MetaWord* p = nullptr; + MetaBlock result; bool current_chunk_too_small = false; bool commit_failure = false; + size_t alignment_gap_size = 0; if (current_chunk() != nullptr) { - // Attempt to satisfy the allocation from the current chunk. + const MetaWord* const chunk_top = current_chunk()->top(); + alignment_gap_size = align_up(chunk_top, _allocation_alignment_words * BytesPerWord) - chunk_top; + const size_t word_size_plus_alignment = word_size + alignment_gap_size; + // If the current chunk is too small to hold the requested size, attempt to enlarge it. // If that fails, retire the chunk. - if (current_chunk()->free_words() < word_size) { - if (!attempt_enlarge_current_chunk(word_size)) { + if (current_chunk()->free_words() < word_size_plus_alignment) { + if (!attempt_enlarge_current_chunk(word_size_plus_alignment)) { current_chunk_too_small = true; } else { DEBUG_ONLY(InternalStats::inc_num_chunks_enlarged();) @@ -259,20 +313,26 @@ MetaWord* MetaspaceArena::allocate_inner(size_t word_size) { // hit a limit (either GC threshold or MaxMetaspaceSize). In that case retire the // chunk. if (!current_chunk_too_small) { - if (!current_chunk()->ensure_committed_additional(word_size)) { - UL2(info, "commit failure (requested size: " SIZE_FORMAT ")", word_size); + if (!current_chunk()->ensure_committed_additional(word_size_plus_alignment)) { + UL2(info, "commit failure (requested size: " SIZE_FORMAT ")", word_size_plus_alignment); commit_failure = true; } } // Allocate from the current chunk. This should work now. if (!current_chunk_too_small && !commit_failure) { - p = current_chunk()->allocate(word_size); - assert(p != nullptr, "Allocation from chunk failed."); + MetaWord* const p_gap = current_chunk()->allocate(word_size_plus_alignment); + assert(p_gap != nullptr, "Allocation from chunk failed."); + MetaWord* const p_user_allocation = p_gap + alignment_gap_size; + result = MetaBlock(p_user_allocation, word_size); + if (alignment_gap_size > 0) { + NOT_LP64(assert(alignment_gap_size >= AllocationAlignmentWordSize, "Sanity")); + wastage = MetaBlock(p_gap, alignment_gap_size); + } } } - if (p == nullptr) { + if (result.is_empty()) { // If we are here, we either had no current chunk to begin with or it was deemed insufficient. assert(current_chunk() == nullptr || current_chunk_too_small || commit_failure, "Sanity"); @@ -286,63 +346,69 @@ MetaWord* MetaspaceArena::allocate_inner(size_t word_size) { // We have a new chunk. Before making it the current chunk, retire the old one. if (current_chunk() != nullptr) { - salvage_chunk(current_chunk()); + wastage = salvage_chunk(current_chunk()); DEBUG_ONLY(InternalStats::inc_num_chunks_retired();) } _chunks.add(new_chunk); - // Now, allocate from that chunk. That should work. - p = current_chunk()->allocate(word_size); + // Now, allocate from that chunk. That should work. Note that the resulting allocation + // is guaranteed to be aligned to arena alignment, since arena alignment cannot be larger + // than smallest chunk size, and chunk starts are aligned by their size (buddy allocation). + MetaWord* const p = current_chunk()->allocate(word_size); assert(p != nullptr, "Allocation from chunk failed."); + result = MetaBlock(p, word_size); } else { UL2(info, "failed to allocate new chunk for requested word size " SIZE_FORMAT ".", word_size); } } - if (p == nullptr) { + if (result.is_empty()) { InternalStats::inc_num_allocs_failed_limit(); } else { DEBUG_ONLY(InternalStats::inc_num_allocs();) - _total_used_words_counter->increment_by(word_size); + _total_used_words_counter->increment_by(word_size + wastage.word_size()); } SOMETIMES(verify();) - if (p == nullptr) { - UL(info, "allocation failed, returned null."); - } else { + if (result.is_nonempty()) { UL2(trace, "after allocation: %u chunk(s), current:" METACHUNK_FULL_FORMAT, _chunks.count(), METACHUNK_FULL_FORMAT_ARGS(current_chunk())); - UL2(trace, "returning " PTR_FORMAT ".", p2i(p)); } - assert_is_aligned_metaspace_pointer(p); +#ifdef ASSERT + if (wastage.is_nonempty()) { + // Wastage from arena allocations only occurs if either or both are true: + // - it is too small to hold the requested allocation words + // - it is misaligned + assert(!wastage.is_aligned_base(allocation_alignment_words()) || + wastage.word_size() < word_size, + "Unexpected wastage: " METABLOCKFORMAT ", arena alignment: %zu, allocation word size: %zu", + METABLOCKFORMATARGS(wastage), allocation_alignment_words(), word_size); + wastage.verify(); + } +#endif // ASSERT - return p; + return result; } // Prematurely returns a metaspace allocation to the _block_freelists // because it is not needed anymore (requires CLD lock to be active). -void MetaspaceArena::deallocate(MetaWord* p, size_t word_size) { - // At this point a current chunk must exist since we only deallocate if we did allocate before. - assert(current_chunk() != nullptr, "stray deallocation?"); - assert(is_valid_area(p, word_size), - "Pointer range not part of this Arena and cannot be deallocated: (" PTR_FORMAT ".." PTR_FORMAT ").", - p2i(p), p2i(p + word_size)); - - UL2(trace, "deallocating " PTR_FORMAT ", word size: " SIZE_FORMAT ".", - p2i(p), word_size); - - // Only blocks that had been allocated via MetaspaceArena::allocate(size) must be handed in - // to MetaspaceArena::deallocate(), and only with the same size that had been original used for allocation. - // Therefore the pointer must be aligned correctly, and size can be alignment-adjusted (the latter - // only matters on 32-bit): - assert_is_aligned_metaspace_pointer(p); - size_t raw_word_size = get_raw_word_size_for_requested_word_size(word_size); - - add_allocation_to_fbl(p, raw_word_size); - +void MetaspaceArena::deallocate(MetaBlock block) { + // Note that we may receive blocks that don't originate from this + // arena, and that is okay. + DEBUG_ONLY(block.verify();) + // This only matters on 32-bit: + // Since we always align up allocations from arena, we align up here, too. +#ifndef _LP64 + MetaBlock raw_block(block.base(), get_raw_word_size_for_requested_word_size(block.word_size())); + add_allocation_to_fbl(raw_block); +#else + add_allocation_to_fbl(block); +#endif + UL2(trace, "added to fbl: " METABLOCKFORMAT ", (now: %d, " SIZE_FORMAT ").", + METABLOCKFORMATARGS(block), _fbl->count(), _fbl->total_size()); SOMETIMES(verify();) } @@ -400,15 +466,15 @@ void MetaspaceArena::verify() const { } } -// Returns true if the area indicated by pointer and size have actually been allocated -// from this arena. -bool MetaspaceArena::is_valid_area(MetaWord* p, size_t word_size) const { - assert(p != nullptr && word_size > 0, "Sanity"); +// Returns true if the given block is contained in this arena +bool MetaspaceArena::contains(MetaBlock bl) const { + DEBUG_ONLY(bl.verify();) + assert(bl.is_nonempty(), "Sanity"); bool found = false; for (const Metachunk* c = _chunks.first(); c != nullptr && !found; c = c->next()) { - assert(c->is_valid_committed_pointer(p) == - c->is_valid_committed_pointer(p + word_size - 1), "range intersects"); - found = c->is_valid_committed_pointer(p); + assert(c->is_valid_committed_pointer(bl.base()) == + c->is_valid_committed_pointer(bl.end() - 1), "range intersects"); + found = c->is_valid_committed_pointer(bl.base()); } return found; } diff --git a/src/hotspot/share/memory/metaspace/metaspaceArena.hpp b/src/hotspot/share/memory/metaspace/metaspaceArena.hpp index 77eb939c6b4..75d1ec5318c 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceArena.hpp +++ b/src/hotspot/share/memory/metaspace/metaspaceArena.hpp @@ -29,6 +29,7 @@ #include "memory/allocation.hpp" #include "memory/metaspace.hpp" #include "memory/metaspace/counters.hpp" +#include "memory/metaspace/metablock.hpp" #include "memory/metaspace/metachunkList.hpp" class outputStream; @@ -37,11 +38,12 @@ class Mutex; namespace metaspace { class ArenaGrowthPolicy; -class ChunkManager; -class Metachunk; -class FreeBlocks; - struct ArenaStats; +class ChunkManager; +class FreeBlocks; +class Metachunk; +class MetaspaceContext; + // The MetaspaceArena is a growable metaspace memory pool belonging to a CLD; // internally it consists of a list of metaspace chunks, of which the head chunk @@ -74,10 +76,14 @@ struct ArenaStats; // class MetaspaceArena : public CHeapObj { + friend class MetaspaceArenaTestFriend; // Please note that access to a metaspace arena may be shared // between threads and needs to be synchronized in CLMS. + // Allocation alignment specific to this arena + const size_t _allocation_alignment_words; + // Reference to the chunk manager to allocate chunks from. ChunkManager* const _chunk_manager; @@ -104,10 +110,10 @@ class MetaspaceArena : public CHeapObj { // free block list FreeBlocks* fbl() const { return _fbl; } - void add_allocation_to_fbl(MetaWord* p, size_t word_size); + void add_allocation_to_fbl(MetaBlock bl); - // Given a chunk, add its remaining free committed space to the free block list. - void salvage_chunk(Metachunk* c); + // Given a chunk, return the committed remainder of this chunk. + MetaBlock salvage_chunk(Metachunk* c); // Allocate a new chunk from the underlying chunk manager able to hold at least // requested word size. @@ -122,32 +128,31 @@ class MetaspaceArena : public CHeapObj { // On success, true is returned, false otherwise. bool attempt_enlarge_current_chunk(size_t requested_word_size); - // Returns true if the area indicated by pointer and size have actually been allocated - // from this arena. - DEBUG_ONLY(bool is_valid_area(MetaWord* p, size_t word_size) const;) - // Allocate from the arena proper, once dictionary allocations and fencing are sorted out. - MetaWord* allocate_inner(size_t word_size); + MetaBlock allocate_inner(size_t word_size, MetaBlock& wastage); public: - MetaspaceArena(ChunkManager* chunk_manager, const ArenaGrowthPolicy* growth_policy, - SizeAtomicCounter* total_used_words_counter, + MetaspaceArena(MetaspaceContext* context, + const ArenaGrowthPolicy* growth_policy, + size_t allocation_alignment_words, const char* name); ~MetaspaceArena(); + size_t allocation_alignment_words() const { return _allocation_alignment_words; } + size_t allocation_alignment_bytes() const { return allocation_alignment_words() * BytesPerWord; } + // Allocate memory from Metaspace. - // 1) Attempt to allocate from the dictionary of deallocated blocks. - // 2) Attempt to allocate from the current chunk. - // 3) Attempt to enlarge the current chunk in place if it is too small. - // 4) Attempt to get a new chunk and allocate from that chunk. - // At any point, if we hit a commit limit, we return null. - MetaWord* allocate(size_t word_size); + // On success, returns non-empty block of the specified word size, and + // possibly a wastage block that is the result of alignment operations. + // On failure, returns an empty block. Failure may happen if we hit a + // commit limit. + MetaBlock allocate(size_t word_size, MetaBlock& wastage); // Prematurely returns a metaspace allocation to the _block_freelists because it is not // needed anymore. - void deallocate(MetaWord* p, size_t word_size); + void deallocate(MetaBlock bl); // Update statistics. This walks all in-use chunks. void add_to_statistics(ArenaStats* out) const; @@ -161,6 +166,8 @@ public: void print_on(outputStream* st) const; + // Returns true if the given block is contained in this arena + DEBUG_ONLY(bool contains(MetaBlock bl) const;) }; } // namespace metaspace diff --git a/src/hotspot/share/memory/metaspace/metaspaceCommon.hpp b/src/hotspot/share/memory/metaspace/metaspaceCommon.hpp index 511f4e6e092..d296ffd6cd7 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceCommon.hpp +++ b/src/hotspot/share/memory/metaspace/metaspaceCommon.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -42,13 +42,11 @@ namespace metaspace { // correctly. We currently don't hold members with a larger alignment requirement // than 64-bit inside MetaData, so 8-byte alignment is enough. // -// Klass* structures need to be aligned to KlassAlignmentInBytes, but since that is -// 64-bit, we don't need special handling for allocating Klass*. +// Klass* structures need to be aligned to Klass* alignment, // // On 64-bit platforms, we align to word size; on 32-bit, we align to two words. static const size_t AllocationAlignmentByteSize = 8; -STATIC_ASSERT(AllocationAlignmentByteSize == (size_t)KlassAlignmentInBytes); static const size_t AllocationAlignmentWordSize = AllocationAlignmentByteSize / BytesPerWord; diff --git a/src/hotspot/share/memory/metaspace/metaspaceContext.cpp b/src/hotspot/share/memory/metaspace/metaspaceContext.cpp index e2a2d752d64..b43f4cd3b15 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceContext.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceContext.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -75,6 +75,18 @@ void MetaspaceContext::print_on(outputStream* st) const { _cm->print_on(st); } +size_t MetaspaceContext::used_words() const { + return _used_words_counter.get(); +} + +size_t MetaspaceContext::committed_words() const { + return _vslist->committed_words(); +} + +size_t MetaspaceContext::reserved_words() const { + return _vslist->reserved_words(); +} + #ifdef ASSERT void MetaspaceContext::verify() const { _vslist->verify(); diff --git a/src/hotspot/share/memory/metaspace/metaspaceContext.hpp b/src/hotspot/share/memory/metaspace/metaspaceContext.hpp index be1d6f356ae..c773c6385b3 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceContext.hpp +++ b/src/hotspot/share/memory/metaspace/metaspaceContext.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -27,6 +27,7 @@ #define SHARE_MEMORY_METASPACE_METASPACECONTEXT_HPP #include "memory/allocation.hpp" +#include "memory/metaspace/counters.hpp" #include "memory/virtualspace.hpp" #include "utilities/debug.hpp" @@ -61,6 +62,7 @@ class MetaspaceContext : public CHeapObj { const char* const _name; VirtualSpaceList* const _vslist; ChunkManager* const _cm; + SizeAtomicCounter _used_words_counter; MetaspaceContext(const char* name, VirtualSpaceList* vslist, ChunkManager* cm) : _name(name), @@ -78,8 +80,9 @@ public: // untouched, otherwise all memory is unmapped. ~MetaspaceContext(); - VirtualSpaceList* vslist() { return _vslist; } - ChunkManager* cm() { return _cm; } + VirtualSpaceList* vslist() { return _vslist; } + ChunkManager* cm() { return _cm; } + SizeAtomicCounter* used_words_counter() { return &_used_words_counter; } // Create a new, empty, expandable metaspace context. static MetaspaceContext* create_expandable_context(const char* name, CommitLimiter* commit_limiter); @@ -103,6 +106,9 @@ public: // null otherwise. static MetaspaceContext* context_class() { return _class_space_context; } + size_t used_words() const; + size_t committed_words() const; + size_t reserved_words() const; }; } // end namespace diff --git a/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp b/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp index 3cd9ba5ab87..cbd2400444f 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp @@ -39,6 +39,7 @@ #include "memory/metaspace/runningCounters.hpp" #include "memory/metaspace/virtualSpaceList.hpp" #include "memory/metaspaceUtils.hpp" +#include "oops/compressedKlass.hpp" #include "runtime/os.hpp" namespace metaspace { @@ -117,6 +118,9 @@ static void print_settings(outputStream* out, size_t scale) { out->cr(); out->print_cr("CDS: %s", (CDSConfig::is_using_archive() ? "on" : (CDSConfig::is_dumping_static_archive() ? "dump" : "off"))); Settings::print_on(out); +#ifdef _LP64 + CompressedKlassPointers::print_mode(out); +#endif } // This will print out a basic metaspace usage report but @@ -324,7 +328,7 @@ void MetaspaceReporter::print_report(outputStream* out, size_t scale, int flags) // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace. const size_t committed_words = RunningCounters::committed_words(); - out->print("(percentages refer to total committed size "); + out->print(" (percentages refer to total committed size "); print_scaled_words(out, committed_words, scale); out->print_cr("):"); diff --git a/src/hotspot/share/memory/metaspace/metaspaceStatistics.cpp b/src/hotspot/share/memory/metaspace/metaspaceStatistics.cpp index 2c32cbe38b1..32329831e7c 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceStatistics.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceStatistics.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2018, 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -186,16 +186,12 @@ void ArenaStats::print_on(outputStream* st, size_t scale, bool detailed) const } #ifdef ASSERT - void ArenaStats::verify() const { size_t total_used = 0; for (chunklevel_t l = chunklevel::LOWEST_CHUNK_LEVEL; l <= chunklevel::HIGHEST_CHUNK_LEVEL; l++) { _stats[l].verify(); total_used += _stats[l]._used_words; } - // Deallocated allocations still count as used - assert(total_used >= _free_blocks_word_size, - "Sanity"); } #endif diff --git a/src/hotspot/share/memory/metaspace/runningCounters.cpp b/src/hotspot/share/memory/metaspace/runningCounters.cpp index 323862ac02f..75fc4b9792c 100644 --- a/src/hotspot/share/memory/metaspace/runningCounters.cpp +++ b/src/hotspot/share/memory/metaspace/runningCounters.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -25,15 +25,12 @@ #include "precompiled.hpp" #include "memory/metaspace/chunkManager.hpp" -#include "memory/metaspace/counters.hpp" +#include "memory/metaspace/metaspaceContext.hpp" #include "memory/metaspace/runningCounters.hpp" #include "memory/metaspace/virtualSpaceList.hpp" namespace metaspace { -SizeAtomicCounter RunningCounters::_used_class_counter; -SizeAtomicCounter RunningCounters::_used_nonclass_counter; - // Return reserved size, in words, for Metaspace size_t RunningCounters::reserved_words() { return reserved_words_class() + reserved_words_nonclass(); @@ -72,11 +69,12 @@ size_t RunningCounters::used_words() { } size_t RunningCounters::used_words_class() { - return _used_class_counter.get(); + const MetaspaceContext* context = MetaspaceContext::context_class(); + return context != nullptr ? context->used_words() : 0; } size_t RunningCounters::used_words_nonclass() { - return _used_nonclass_counter.get(); + return MetaspaceContext::context_nonclass()->used_words(); } // ---- free chunks ----- diff --git a/src/hotspot/share/memory/metaspace/runningCounters.hpp b/src/hotspot/share/memory/metaspace/runningCounters.hpp index ff24ead7ebd..e898a099818 100644 --- a/src/hotspot/share/memory/metaspace/runningCounters.hpp +++ b/src/hotspot/share/memory/metaspace/runningCounters.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2022 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -27,17 +27,11 @@ #define SHARE_MEMORY_METASPACE_RUNNINGCOUNTERS_HPP #include "memory/allStatic.hpp" -#include "memory/metaspace/counters.hpp" namespace metaspace { // This class is a convenience interface for accessing global metaspace counters. -class RunningCounters : public AllStatic { - - static SizeAtomicCounter _used_class_counter; - static SizeAtomicCounter _used_nonclass_counter; - -public: +struct RunningCounters : public AllStatic { // ---- virtual memory ----- @@ -65,10 +59,6 @@ public: static size_t free_chunks_words_class(); static size_t free_chunks_words_nonclass(); - // Direct access to the counters. - static SizeAtomicCounter* used_nonclass_counter() { return &_used_nonclass_counter; } - static SizeAtomicCounter* used_class_counter() { return &_used_class_counter; } - }; } // namespace metaspace diff --git a/src/hotspot/share/memory/metaspace/testHelpers.cpp b/src/hotspot/share/memory/metaspace/testHelpers.cpp index 2d1071d77dd..b974f06f243 100644 --- a/src/hotspot/share/memory/metaspace/testHelpers.cpp +++ b/src/hotspot/share/memory/metaspace/testHelpers.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2021 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -54,12 +54,17 @@ MetaspaceTestArena::~MetaspaceTestArena() { MetaWord* MetaspaceTestArena::allocate(size_t word_size) { MutexLocker fcl(_lock, Mutex::_no_safepoint_check_flag); - return _arena->allocate(word_size); + MetaBlock result, wastage; + result = _arena->allocate(word_size, wastage); + if (wastage.is_nonempty()) { + _arena->deallocate(wastage); + } + return result.base(); } void MetaspaceTestArena::deallocate(MetaWord* p, size_t word_size) { MutexLocker fcl(_lock, Mutex::_no_safepoint_check_flag); - return _arena->deallocate(p, word_size); + _arena->deallocate(MetaBlock(p, word_size)); } ///// MetaspaceTestArea ////// @@ -70,7 +75,6 @@ MetaspaceTestContext::MetaspaceTestContext(const char* name, size_t commit_limit _commit_limit(commit_limit), _context(nullptr), _commit_limiter(commit_limit == 0 ? max_uintx : commit_limit), // commit_limit == 0 -> no limit - _used_words_counter(), _rs() { assert(is_aligned(reserve_limit, Metaspace::reserve_alignment_words()), "reserve_limit (" SIZE_FORMAT ") " @@ -103,7 +107,7 @@ MetaspaceTestArena* MetaspaceTestContext::create_arena(Metaspace::MetaspaceType MetaspaceArena* arena = nullptr; { MutexLocker ml(lock, Mutex::_no_safepoint_check_flag); - arena = new MetaspaceArena(_context->cm(), growth_policy, &_used_words_counter, _name); + arena = new MetaspaceArena(_context, growth_policy, Metaspace::min_allocation_alignment_words, _name); } return new MetaspaceTestArena(lock, arena); } @@ -124,5 +128,18 @@ void MetaspaceTestContext::print_on(outputStream* st) const { _context->print_on(st); } +size_t MetaspaceTestContext::used_words() const { + return _context->used_words_counter()->get(); +} + +size_t MetaspaceTestContext::committed_words() const { + assert(_commit_limiter.committed_words() == _context->committed_words(), "Sanity"); + return _context->committed_words(); +} + +size_t MetaspaceTestContext::reserved_words() const { + return _context->reserved_words(); +} + } // namespace metaspace diff --git a/src/hotspot/share/memory/metaspace/testHelpers.hpp b/src/hotspot/share/memory/metaspace/testHelpers.hpp index 669a3f61226..65d5ee47512 100644 --- a/src/hotspot/share/memory/metaspace/testHelpers.hpp +++ b/src/hotspot/share/memory/metaspace/testHelpers.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -75,7 +75,6 @@ class MetaspaceTestContext : public CHeapObj { MetaspaceContext* _context; CommitLimiter _commit_limiter; - SizeAtomicCounter _used_words_counter; // For non-expandable contexts we keep track of the space // and delete it at destruction time. @@ -98,15 +97,16 @@ public: const CommitLimiter& commit_limiter() const { return _commit_limiter; } const VirtualSpaceList& vslist() const { return *(_context->vslist()); } ChunkManager& cm() { return *(_context->cm()); } + MetaspaceContext* context() const { return _context; } // Returns reserve- and commit limit we run the test with (in the real world, // these would be equivalent to CompressedClassSpaceSize resp MaxMetaspaceSize) size_t reserve_limit() const { return _reserve_limit == 0 ? max_uintx : 0; } size_t commit_limit() const { return _commit_limit == 0 ? max_uintx : 0; } - // Convenience function to retrieve total committed/used words - size_t used_words() const { return _used_words_counter.get(); } - size_t committed_words() const { return _commit_limiter.committed_words(); } + size_t used_words() const; + size_t committed_words() const; + size_t reserved_words() const; DEBUG_ONLY(void verify() const;) diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp index 4af9c8b2ef2..6daeb2f1045 100644 --- a/src/hotspot/share/memory/universe.cpp +++ b/src/hotspot/share/memory/universe.cpp @@ -397,8 +397,13 @@ void Universe::genesis(TRAPS) { HandleMark hm(THREAD); // Explicit null checks are needed if these offsets are not smaller than the page size - assert(oopDesc::klass_offset_in_bytes() < static_cast(os::vm_page_size()), - "Klass offset is expected to be less than the page size"); + if (UseCompactObjectHeaders) { + assert(oopDesc::mark_offset_in_bytes() < static_cast(os::vm_page_size()), + "Mark offset is expected to be less than the page size"); + } else { + assert(oopDesc::klass_offset_in_bytes() < static_cast(os::vm_page_size()), + "Klass offset is expected to be less than the page size"); + } assert(arrayOopDesc::length_offset_in_bytes() < static_cast(os::vm_page_size()), "Array length offset is expected to be less than the page size"); diff --git a/src/hotspot/share/oops/arrayOop.hpp b/src/hotspot/share/oops/arrayOop.hpp index 1ca8a9530a4..f0c476a2486 100644 --- a/src/hotspot/share/oops/arrayOop.hpp +++ b/src/hotspot/share/oops/arrayOop.hpp @@ -79,11 +79,11 @@ private: } // The _length field is not declared in C++. It is allocated after the - // declared nonstatic fields in arrayOopDesc if not compressed, otherwise - // it occupies the second half of the _klass field in oopDesc. + // mark-word when using compact headers (+UseCompactObjectHeaders), otherwise + // after the compressed Klass* when running with compressed class-pointers + // (+UseCompressedClassPointers), or else after the full Klass*. static int length_offset_in_bytes() { - return UseCompressedClassPointers ? klass_gap_offset_in_bytes() : - (int)sizeof(arrayOopDesc); + return oopDesc::base_offset_in_bytes(); } // Returns the offset of the first element. diff --git a/src/hotspot/share/oops/compressedKlass.cpp b/src/hotspot/share/oops/compressedKlass.cpp index 02483d25a9a..0cfc0e3c60b 100644 --- a/src/hotspot/share/oops/compressedKlass.cpp +++ b/src/hotspot/share/oops/compressedKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,24 +27,128 @@ #include "memory/metaspace.hpp" #include "oops/compressedKlass.inline.hpp" #include "runtime/globals.hpp" +#include "runtime/java.hpp" #include "runtime/os.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/ostream.hpp" -address CompressedKlassPointers::_base = nullptr; -int CompressedKlassPointers::_shift = 0; +int CompressedKlassPointers::_narrow_klass_pointer_bits = -1; +int CompressedKlassPointers::_max_shift = -1; + +address CompressedKlassPointers::_base = (address)-1; +int CompressedKlassPointers::_shift = -1; address CompressedKlassPointers::_klass_range_start = nullptr; address CompressedKlassPointers::_klass_range_end = nullptr; +narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlass)-1; +narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1; #ifdef _LP64 -#ifdef ASSERT -void CompressedKlassPointers::assert_is_valid_encoding(address addr, size_t len, address base, int shift) { - assert(base + nth_bit(32 + shift) >= addr + len, "Encoding (base=" PTR_FORMAT ", shift=%d) does not " - "fully cover the class range " PTR_FORMAT "-" PTR_FORMAT, p2i(base), shift, p2i(addr), p2i(addr + len)); +size_t CompressedKlassPointers::max_klass_range_size() { + // We disallow klass range sizes larger than 4GB even if the encoding + // range would allow for a larger Klass range (e.g. Base=zero, shift=3 -> 32GB). + // That is because many CPU-specific compiler decodings do not want the + // shifted narrow Klass to spill over into the third quadrant of the 64-bit target + // address, e.g. to use a 16-bit move for a simplified base addition. + return MIN2(4 * G, max_encoding_range_size()); } + +void CompressedKlassPointers::pre_initialize() { + if (UseCompactObjectHeaders) { + _narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh; + _max_shift = max_shift_coh; + } else { + _narrow_klass_pointer_bits = narrow_klass_pointer_bits_noncoh; + _max_shift = max_shift_noncoh; + } +} + +#ifdef ASSERT +void CompressedKlassPointers::sanity_check_after_initialization() { + // In expectation of an assert, prepare condensed info to be printed with the assert. + char tmp[256]; + os::snprintf(tmp, sizeof(tmp), "klass range: " RANGE2FMT "," + " base " PTR_FORMAT ", shift %d, lowest/highest valid narrowKlass %u/%u", + RANGE2FMTARGS(_klass_range_start, _klass_range_end), + p2i(_base), _shift, _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id); +#define ASSERT_HERE(cond) assert(cond, " (%s)", tmp); +#define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp); + + // All values must be inited + ASSERT_HERE(_max_shift != -1); + ASSERT_HERE(_klass_range_start != (address)-1); + ASSERT_HERE(_klass_range_end != (address)-1); + ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1); + ASSERT_HERE(_base != (address)-1); + ASSERT_HERE(_shift != -1); + + const size_t klass_align = klass_alignment_in_bytes(); + + // must be aligned enough hold 64-bit data + ASSERT_HERE(is_aligned(klass_align, sizeof(uint64_t))); + + // should be smaller than the minimum metaspace chunk size (soft requirement) + ASSERT_HERE(klass_align <= K); + + ASSERT_HERE(_klass_range_end > _klass_range_start); + + // Check that Klass range is fully engulfed in the encoding range + const address encoding_start = _base; + const address encoding_end = _base + nth_bit(narrow_klass_pointer_bits() + _shift); + ASSERT_HERE_2(_klass_range_start >= _base && _klass_range_end <= encoding_end, + "Resulting encoding range does not fully cover the class range"); + + // Check that Klass range is aligned to Klass alignment. Note that this should never be + // an issue since the Klass range is handed in by either CDS- or Metaspace-initialization, and + // it should be the result of an mmap operation that operates on page sizes. So as long as + // the Klass alignment is <= page size, we are fine. + ASSERT_HERE_2(is_aligned(_klass_range_start, klass_align) && + is_aligned(_klass_range_end, klass_align), + "Klass range must start and end at a properly aligned address"); + + // Check _lowest_valid_narrow_klass_id and _highest_valid_narrow_klass_id + ASSERT_HERE_2(_lowest_valid_narrow_klass_id > 0, "Null is not a valid narrowKlass"); + ASSERT_HERE(_highest_valid_narrow_klass_id > _lowest_valid_narrow_klass_id); + + Klass* const k1 = decode_not_null_without_asserts(_lowest_valid_narrow_klass_id, _base, _shift); + if (encoding_start == _klass_range_start) { + ASSERT_HERE_2((address)k1 == _klass_range_start + klass_align, "Not lowest"); + } else { + ASSERT_HERE_2((address)k1 == _klass_range_start, "Not lowest"); + } + narrowKlass nk1 = encode_not_null_without_asserts(k1, _base, _shift); + ASSERT_HERE_2(nk1 == _lowest_valid_narrow_klass_id, "not reversible"); + + Klass* const k2 = decode_not_null_without_asserts(_highest_valid_narrow_klass_id, _base, _shift); + ASSERT_HERE((address)k2 == _klass_range_end - klass_align); + narrowKlass nk2 = encode_not_null_without_asserts(k2, _base, _shift); + ASSERT_HERE_2(nk2 == _highest_valid_narrow_klass_id, "not reversible"); + +#ifdef AARCH64 + // On aarch64, we never expect a shift value > 0 in standard (non-coh) mode + ASSERT_HERE_2(UseCompactObjectHeaders || _shift == 0, "Shift > 0 in non-coh mode?"); #endif +#undef ASSERT_HERE +#undef ASSERT_HERE_2 +} +#endif // ASSERT + +// Helper function: given current Klass Range, Base and Shift, calculate the lowest and highest values +// of narrowKlass we can expect. +void CompressedKlassPointers::calc_lowest_highest_narrow_klass_id() { + address lowest_possible_klass_location = _klass_range_start; + + // A Klass will never be placed at the Encoding range start, since that would translate to a narrowKlass=0, which + // is disallowed. Note that both Metaspace and CDS prvent allocation at the first address for this reason. + if (lowest_possible_klass_location == _base) { + lowest_possible_klass_location += klass_alignment_in_bytes(); + } + _lowest_valid_narrow_klass_id = (narrowKlass) ((uintptr_t)(lowest_possible_klass_location - _base) >> _shift); + + address highest_possible_klass_location = _klass_range_end - klass_alignment_in_bytes(); + _highest_valid_narrow_klass_id = (narrowKlass) ((uintptr_t)(highest_possible_klass_location - _base) >> _shift); +} // Given a klass range [addr, addr+len) and a given encoding scheme, assert that this scheme covers the range, then // set this encoding scheme. Used by CDS at runtime to re-instate the scheme used to pre-compute klass ids for @@ -52,14 +156,18 @@ void CompressedKlassPointers::assert_is_valid_encoding(address addr, size_t len, void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t len, address requested_base, int requested_shift) { address const end = addr + len; - const int narrow_klasspointer_bits = sizeof(narrowKlass) * 8; - const size_t encoding_range_size = nth_bit(narrow_klasspointer_bits + requested_shift); - address encoding_range_end = requested_base + encoding_range_size; + if (len > max_klass_range_size()) { + stringStream ss; + ss.print("Class space size and CDS archive size combined (%zu) " + "exceed the maximum possible size (%zu)", + len, max_klass_range_size()); + vm_exit_during_initialization(ss.base()); + } - // Note: it would be technically valid for the encoding base to precede the start of the Klass range. But we only call - // this function from CDS, and therefore know this to be true. + // Note: While it would be technically valid for the encoding base to precede the start of the Klass range, + // we never do this here. This is used at CDS runtime to re-instate the scheme used to precompute the + // narrow Klass IDs in the archive, and the requested base should point to the start of the Klass range. assert(requested_base == addr, "Invalid requested base"); - assert(encoding_range_end >= end, "Encoding does not cover the full Klass range"); // Remember Klass range: _klass_range_start = addr; @@ -68,7 +176,9 @@ void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t _base = requested_base; _shift = requested_shift; - DEBUG_ONLY(assert_is_valid_encoding(addr, len, _base, _shift);) + calc_lowest_highest_narrow_klass_id(); + + DEBUG_ONLY(sanity_check_after_initialization();) } char* CompressedKlassPointers::reserve_address_space_X(uintptr_t from, uintptr_t to, size_t size, size_t alignment, bool aslr) { @@ -77,60 +187,105 @@ char* CompressedKlassPointers::reserve_address_space_X(uintptr_t from, uintptr_t } char* CompressedKlassPointers::reserve_address_space_for_unscaled_encoding(size_t size, bool aslr) { - return reserve_address_space_X(0, nth_bit(32), size, Metaspace::reserve_alignment(), aslr); + const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits()); + return reserve_address_space_X(0, unscaled_max, size, Metaspace::reserve_alignment(), aslr); } char* CompressedKlassPointers::reserve_address_space_for_zerobased_encoding(size_t size, bool aslr) { - return reserve_address_space_X(nth_bit(32), nth_bit(32 + LogKlassAlignmentInBytes), size, Metaspace::reserve_alignment(), aslr); + const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits()); + const size_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift()); + return reserve_address_space_X(unscaled_max, zerobased_max, size, Metaspace::reserve_alignment(), aslr); } char* CompressedKlassPointers::reserve_address_space_for_16bit_move(size_t size, bool aslr) { return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr); } -#if !defined(AARCH64) || defined(ZERO) -// On aarch64 we have an own version; all other platforms use the default version void CompressedKlassPointers::initialize(address addr, size_t len) { + if (len > max_klass_range_size()) { + stringStream ss; + ss.print("Class space size (%zu) exceeds the maximum possible size (%zu)", + len, max_klass_range_size()); + vm_exit_during_initialization(ss.base()); + } + // Remember the Klass range: _klass_range_start = addr; _klass_range_end = addr + len; - // The default version of this code tries, in order of preference: - // -unscaled (base=0 shift=0) - // -zero-based (base=0 shift>0) - // -nonzero-base (base>0 shift=0) - // Note that base>0 shift>0 should never be needed, since the klass range will - // never exceed 4GB. - constexpr uintptr_t unscaled_max = nth_bit(32); - assert(len <= unscaled_max, "Klass range larger than 32 bits?"); + // Calculate Base and Shift: - constexpr uintptr_t zerobased_max = nth_bit(32 + LogKlassAlignmentInBytes); + if (UseCompactObjectHeaders) { - address const end = addr + len; - if (end <= (address)unscaled_max) { - _base = nullptr; - _shift = 0; - } else { - if (end <= (address)zerobased_max) { - _base = nullptr; - _shift = LogKlassAlignmentInBytes; - } else { - _base = addr; - _shift = 0; + // In compact object header mode, with 22-bit narrowKlass, we don't attempt for + // zero-based mode. Instead, we set the base to the start of the klass range and + // then try for the smallest shift possible that still covers the whole range. + // The reason is that we want to avoid, if possible, shifts larger than + // a cacheline size. + _base = addr; + + const int log_cacheline = exact_log2(DEFAULT_CACHE_LINE_SIZE); + int s = max_shift(); + while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) { + s--; } + _shift = s; + + } else { + + // Traditional (non-compact) header mode + const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits()); + const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift()); + +#ifdef AARCH64 + // Aarch64 avoids zero-base shifted mode (_base=0 _shift>0), instead prefers + // non-zero-based mode with a zero shift. + _shift = 0; + address const end = addr + len; + _base = (end <= (address)unscaled_max) ? nullptr : addr; +#else + // We try, in order of preference: + // -unscaled (base=0 shift=0) + // -zero-based (base=0 shift>0) + // -nonzero-base (base>0 shift=0) + // Note that base>0 shift>0 should never be needed, since the klass range will + // never exceed 4GB. + address const end = addr + len; + if (end <= (address)unscaled_max) { + _base = nullptr; + _shift = 0; + } else { + if (end <= (address)zerobased_max) { + _base = nullptr; + _shift = max_shift(); + } else { + _base = addr; + _shift = 0; + } + } +#endif // AARCH64 } - DEBUG_ONLY(assert_is_valid_encoding(addr, len, _base, _shift);) + calc_lowest_highest_narrow_klass_id(); + +#ifdef ASSERT + sanity_check_after_initialization(); +#endif } -#endif // !AARCH64 || ZERO void CompressedKlassPointers::print_mode(outputStream* st) { + st->print_cr("UseCompressedClassPointers %d, UseCompactObjectHeaders %d", + UseCompressedClassPointers, UseCompactObjectHeaders); if (UseCompressedClassPointers) { + st->print_cr("Narrow klass pointer bits %d, Max shift %d", + _narrow_klass_pointer_bits, _max_shift); st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", p2i(base()), shift()); st->print_cr("Encoding Range: " RANGE2FMT, RANGE2FMTARGS(_base, encoding_range_end())); st->print_cr("Klass Range: " RANGE2FMT, RANGE2FMTARGS(_klass_range_start, _klass_range_end)); + st->print_cr("Klass ID Range: [%u - %u) (%u)", _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id + 1, + _highest_valid_narrow_klass_id + 1 - _lowest_valid_narrow_klass_id); } else { st->print_cr("UseCompressedClassPointers off"); } diff --git a/src/hotspot/share/oops/compressedKlass.hpp b/src/hotspot/share/oops/compressedKlass.hpp index 8f89b0550ff..9e3a09d73b9 100644 --- a/src/hotspot/share/oops/compressedKlass.hpp +++ b/src/hotspot/share/oops/compressedKlass.hpp @@ -26,6 +26,7 @@ #define SHARE_OOPS_COMPRESSEDKLASS_HPP #include "memory/allStatic.hpp" +#include "utilities/align.hpp" #include "utilities/globalDefinitions.hpp" class outputStream; @@ -97,42 +98,98 @@ class Klass; // If compressed klass pointers then use narrowKlass. typedef juint narrowKlass; -const int LogKlassAlignmentInBytes = 3; -const int KlassAlignmentInBytes = 1 << LogKlassAlignmentInBytes; - -// Maximal size of compressed class space. Above this limit compression is not possible. -// Also upper bound for placement of zero based class space. (Class space is further limited -// to be < 3G, see arguments.cpp.) -const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes; - // For UseCompressedClassPointers. class CompressedKlassPointers : public AllStatic { friend class VMStructs; friend class ArchiveBuilder; + // We use a different narrow Klass pointer geometry depending on + // whether we run in standard mode or in compact-object-header-mode. + + // Narrow klass pointer bits for an unshifted narrow Klass pointer. + static constexpr int narrow_klass_pointer_bits_noncoh = 32; + static constexpr int narrow_klass_pointer_bits_coh = 22; + + // Bit size of a narrowKlass + static int _narrow_klass_pointer_bits; + + // The maximum shift values we can use depending on UseCompactObjectHeaders + static constexpr int max_shift_noncoh = 3; + static constexpr int max_shift_coh = 10; + + // Maximum shift usable + static int _max_shift; + + // Encoding Base, Encoding Shift static address _base; static int _shift; // Start and end of the Klass Range. - // Note: guaranteed to be aligned to KlassAlignmentInBytes + // Note: guaranteed to be aligned to 1< + static inline void check_init(T var) { + assert(var != (T)-1, "Not yet initialized"); + } static inline Klass* decode_not_null_without_asserts(narrowKlass v, address base, int shift); - static inline Klass* decode_not_null(narrowKlass v, address base, int shift); - - static inline narrowKlass encode_not_null(Klass* v, address base, int shift); public: + // Initialization sequence: + // 1) Parse arguments. The following arguments take a role: + // - UseCompressedClassPointers + // - UseCompactObjectHeaders + // - Xshare on off dump + // - CompressedClassSpaceSize + // 2) call pre_initialize(): depending on UseCompactObjectHeaders, defines the limits of narrow Klass pointer + // geometry (how many bits, the max. possible shift) + // 3) .. from here on, narrow_klass_pointer_bits() and max_shift() can be used + // 4) call reserve_address_space_for_compressed_classes() either from CDS initialization or, if CDS is off, + // from metaspace initialization. Reserves space for class space + CDS, attempts to reserve such that + // we later can use a "good" encoding scheme. Reservation is highly CPU-specific. + // 5) Initialize the narrow Klass encoding scheme by determining encoding base and shift: + // 5a) if CDS=on: Calls initialize_for_given_encoding() with the reservation base from step (4) and the + // CDS-intrinsic setting for shift; here, we don't have any freedom to deviate from the base. + // 5b) if CDS=off: Calls initialize() - here, we have more freedom and, if we want, can choose an encoding + // base that differs from the reservation base from step (4). That allows us, e.g., to later use + // zero-based encoding. + // 6) ... from now on, we can use base() and shift(). + + // Called right after argument parsing; defines narrow klass pointer geometry limits + static void pre_initialize(); + + // The number of bits a narrow Klass pointer has; + static int narrow_klass_pointer_bits() { check_init(_narrow_klass_pointer_bits); return _narrow_klass_pointer_bits; } + + // The maximum possible shift; the actual shift employed later can be smaller (see initialize()) + static int max_shift() { check_init(_max_shift); return _max_shift; } + + // Returns the maximum encoding range, given the current geometry (narrow klass bit size and shift) + static size_t max_encoding_range_size() { return nth_bit(narrow_klass_pointer_bits() + max_shift()); } + + // Returns the maximum allowed klass range size. + static size_t max_klass_range_size(); + // Reserve a range of memory that is to contain Klass strucutures which are referenced by narrow Klass IDs. // If optimize_for_zero_base is true, the implementation will attempt to reserve optimized for zero-based encoding. static char* reserve_address_space_for_compressed_classes(size_t size, bool aslr, bool optimize_for_zero_base); @@ -152,33 +209,55 @@ public: static void print_mode(outputStream* st); - static address base() { return _base; } - static int shift() { return _shift; } + // Can only be used after initialization + static address base() { check_init(_base); return _base; } + static int shift() { check_init(_shift); return _shift; } static address klass_range_start() { return _klass_range_start; } static address klass_range_end() { return _klass_range_end; } static inline address encoding_range_end(); + // Returns the alignment a Klass* is guaranteed to have. + // Note: *Not* the same as 1 << shift ! Klass are always guaranteed to be at least 64-bit aligned, + // so this will return 8 even if shift is 0. + static int klass_alignment_in_bytes() { return nth_bit(MAX2(3, _shift)); } + static int klass_alignment_in_words() { return klass_alignment_in_bytes() / BytesPerWord; } + + // Returns the highest possible narrowKlass value given the current Klass range + static narrowKlass highest_valid_narrow_klass_id() { return _highest_valid_narrow_klass_id; } + static bool is_null(Klass* v) { return v == nullptr; } static bool is_null(narrowKlass v) { return v == 0; } // Versions without asserts - static inline Klass* decode_not_null_without_asserts(narrowKlass v); static inline Klass* decode_without_asserts(narrowKlass v); - static inline Klass* decode_not_null(narrowKlass v); static inline Klass* decode(narrowKlass v); + static inline narrowKlass encode_not_null_without_asserts(Klass* k, address narrow_base, int shift); static inline narrowKlass encode_not_null(Klass* v); static inline narrowKlass encode(Klass* v); +#ifdef ASSERT + // Given an address, check that it can be encoded with the current encoding + inline static void check_encodable(const void* addr); + // Given a narrow Klass ID, check that it is valid according to current encoding + inline static void check_valid_narrow_klass_id(narrowKlass nk); +#endif + // Returns whether the pointer is in the memory region used for encoding compressed // class pointers. This includes CDS. - static inline bool is_encodable(const void* p) { - return (address) p >= _klass_range_start && - (address) p < _klass_range_end; + static inline bool is_encodable(const void* addr) { + // An address can only be encoded if: + // + // 1) the address lies within the klass range. + // 2) It is suitably aligned to 2^encoding_shift. This only really matters for + // +UseCompactObjectHeaders, since the encoding shift can be large (max 10 bits -> 1KB). + return (address)addr >= _klass_range_start && (address)addr < _klass_range_end && + is_aligned(addr, klass_alignment_in_bytes()); } + }; #endif // SHARE_OOPS_COMPRESSEDKLASS_HPP diff --git a/src/hotspot/share/oops/compressedKlass.inline.hpp b/src/hotspot/share/oops/compressedKlass.inline.hpp index c9c9af24ad8..7c5da48a494 100644 --- a/src/hotspot/share/oops/compressedKlass.inline.hpp +++ b/src/hotspot/share/oops/compressedKlass.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,42 +32,24 @@ #include "utilities/align.hpp" #include "utilities/globalDefinitions.hpp" -static inline bool check_alignment(Klass* v) { - return (intptr_t)v % KlassAlignmentInBytes == 0; +inline Klass* CompressedKlassPointers::decode_not_null_without_asserts(narrowKlass v, address narrow_base_base, int shift) { + return (Klass*)((uintptr_t)narrow_base_base +((uintptr_t)v << shift)); } -inline Klass* CompressedKlassPointers::decode_not_null_without_asserts(narrowKlass v, address narrow_base, int shift) { - return (Klass*)((uintptr_t)narrow_base +((uintptr_t)v << shift)); -} - -inline Klass* CompressedKlassPointers::decode_not_null(narrowKlass v, address narrow_base, int shift) { - assert(!is_null(v), "narrow klass value can never be zero"); - Klass* result = decode_not_null_without_asserts(v, narrow_base, shift); - assert(check_alignment(result), "address not aligned: " PTR_FORMAT, p2i(result)); - return result; -} - -inline narrowKlass CompressedKlassPointers::encode_not_null(Klass* v, address narrow_base, int shift) { - assert(!is_null(v), "klass value can never be zero"); - assert(check_alignment(v), "Address not aligned"); - uint64_t pd = (uint64_t)(pointer_delta(v, narrow_base, 1)); - assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding (Klass " PTR_FORMAT ", Base " PTR_FORMAT ")", p2i(v), p2i(narrow_base)); - uint64_t result = pd >> shift; - assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow"); - assert(decode_not_null((narrowKlass)result, narrow_base, shift) == v, "reversibility"); - return (narrowKlass)result; -} - -inline Klass* CompressedKlassPointers::decode_not_null_without_asserts(narrowKlass v) { - return decode_not_null_without_asserts(v, base(), shift()); +inline narrowKlass CompressedKlassPointers::encode_not_null_without_asserts(Klass* k, address narrow_base, int shift) { + return (narrowKlass)(pointer_delta(k, narrow_base, 1) >> shift); } inline Klass* CompressedKlassPointers::decode_without_asserts(narrowKlass v) { - return is_null(v) ? nullptr : decode_not_null_without_asserts(v); + return is_null(v) ? nullptr : decode_not_null_without_asserts(v, base(), shift()); } inline Klass* CompressedKlassPointers::decode_not_null(narrowKlass v) { - return decode_not_null(v, base(), shift()); + assert(!is_null(v), "narrow klass value can never be zero"); + DEBUG_ONLY(check_valid_narrow_klass_id(v);) + Klass* const k = decode_not_null_without_asserts(v, base(), shift()); + DEBUG_ONLY(check_encodable(k)); + return k; } inline Klass* CompressedKlassPointers::decode(narrowKlass v) { @@ -75,15 +57,40 @@ inline Klass* CompressedKlassPointers::decode(narrowKlass v) { } inline narrowKlass CompressedKlassPointers::encode_not_null(Klass* v) { - return encode_not_null(v, base(), shift()); + assert(!is_null(v), "klass value can never be zero"); + DEBUG_ONLY(check_encodable(v);) + const narrowKlass nk = encode_not_null_without_asserts(v, base(), shift()); + assert(decode_not_null_without_asserts(nk, base(), shift()) == v, "reversibility"); + DEBUG_ONLY(check_valid_narrow_klass_id(nk);) + return nk; } inline narrowKlass CompressedKlassPointers::encode(Klass* v) { return is_null(v) ? (narrowKlass)0 : encode_not_null(v); } +#ifdef ASSERT +inline void CompressedKlassPointers::check_encodable(const void* addr) { + assert(UseCompressedClassPointers, "Only call for +UseCCP"); + assert(addr != nullptr, "Null Klass?"); + assert(is_encodable(addr), + "Address " PTR_FORMAT " is not encodable (Klass range: " RANGEFMT ", klass alignment: %d)", + p2i(addr), RANGE2FMTARGS(_klass_range_start, _klass_range_end), klass_alignment_in_bytes()); +} + +inline void CompressedKlassPointers::check_valid_narrow_klass_id(narrowKlass nk) { + check_init(_base); + assert(UseCompressedClassPointers, "Only call for +UseCCP"); + assert(nk > 0, "narrow Klass ID is 0"); + const uint64_t nk_mask = ~right_n_bits(narrow_klass_pointer_bits()); + assert(((uint64_t)nk & nk_mask) == 0, "narrow klass id bit spillover (%u)", nk); + assert(nk >= _lowest_valid_narrow_klass_id && + nk <= _highest_valid_narrow_klass_id, "narrowKlass ID out of range (%u)", nk); +} +#endif // ASSERT + inline address CompressedKlassPointers::encoding_range_end() { - const int max_bits = (sizeof(narrowKlass) * BitsPerByte) + _shift; // narrowKlass are 32 bit + const int max_bits = narrow_klass_pointer_bits() + _shift; return _base + nth_bit(max_bits); } diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp index 46a6e7719f3..990637c2951 100644 --- a/src/hotspot/share/oops/instanceKlass.cpp +++ b/src/hotspot/share/oops/instanceKlass.cpp @@ -455,7 +455,7 @@ InstanceKlass* InstanceKlass::allocate_instance_klass(const ClassFileParser& par assert(loader_data != nullptr, "invariant"); InstanceKlass* ik; - const bool use_class_space = !parser.is_interface() && !parser.is_abstract(); + const bool use_class_space = parser.klass_needs_narrow_id(); // Allocation if (parser.is_instance_ref_klass()) { @@ -475,6 +475,11 @@ InstanceKlass* InstanceKlass::allocate_instance_klass(const ClassFileParser& par ik = new (loader_data, size, use_class_space, THREAD) InstanceKlass(parser); } + if (ik != nullptr && UseCompressedClassPointers && use_class_space) { + assert(CompressedKlassPointers::is_encodable(ik), + "Klass " PTR_FORMAT "needs a narrow Klass ID, but is not encodable", p2i(ik)); + } + // Check for pending exception before adding to the loader data and incrementing // class count. Can get OOM here. if (HAS_PENDING_EXCEPTION) { diff --git a/src/hotspot/share/oops/instanceOop.hpp b/src/hotspot/share/oops/instanceOop.hpp index 8de3b1a742c..e97cd00f79f 100644 --- a/src/hotspot/share/oops/instanceOop.hpp +++ b/src/hotspot/share/oops/instanceOop.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,17 +32,6 @@ // Evaluating "new HashTable()" will create an instanceOop. class instanceOopDesc : public oopDesc { - public: - // aligned header size. - static int header_size() { return sizeof(instanceOopDesc)/HeapWordSize; } - - // If compressed, the offset of the fields of the instance may not be aligned. - static int base_offset_in_bytes() { - return (UseCompressedClassPointers) ? - klass_gap_offset_in_bytes() : - sizeof(instanceOopDesc); - - } }; // See similar requirement for oopDesc. diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp index b87f341ce48..884816764a0 100644 --- a/src/hotspot/share/oops/klass.cpp +++ b/src/hotspot/share/oops/klass.cpp @@ -43,6 +43,7 @@ #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "oops/compressedKlass.inline.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/instanceKlass.hpp" #include "oops/klass.inline.hpp" @@ -274,6 +275,23 @@ Method* Klass::uncached_lookup_method(const Symbol* name, const Symbol* signatur return nullptr; } +static markWord make_prototype(const Klass* kls) { + markWord prototype = markWord::prototype(); +#ifdef _LP64 + if (UseCompactObjectHeaders) { + // With compact object headers, the narrow Klass ID is part of the mark word. + // We therfore seed the mark word with the narrow Klass ID. + // Note that only those Klass that can be instantiated have a narrow Klass ID. + // For those who don't, we leave the klass bits empty and assert if someone + // tries to use those. + const narrowKlass nk = CompressedKlassPointers::is_encodable(kls) ? + CompressedKlassPointers::encode(const_cast(kls)) : 0; + prototype = prototype.set_narrow_klass(nk); + } +#endif + return prototype; +} + Klass::Klass() : _kind(UnknownKlassKind) { assert(CDSConfig::is_dumping_static_archive() || CDSConfig::is_using_archive(), "only for cds"); } @@ -283,6 +301,7 @@ Klass::Klass() : _kind(UnknownKlassKind) { // The constructor is also used from CppVtableCloner, // which doesn't zero out the memory before calling the constructor. Klass::Klass(KlassKind kind) : _kind(kind), + _prototype_header(make_prototype(this)), _shared_class_path_index(-1) { CDS_ONLY(_shared_class_flags = 0;) CDS_JAVA_HEAP_ONLY(_archived_mirror_index = -1;) @@ -985,6 +1004,10 @@ void Klass::oop_print_on(oop obj, outputStream* st) { // print header obj->mark().print_on(st); st->cr(); + if (UseCompactObjectHeaders) { + st->print(BULLET"prototype_header: " INTPTR_FORMAT, _prototype_header.value()); + st->cr(); + } } // print class @@ -1007,7 +1030,14 @@ void Klass::verify_on(outputStream* st) { // This can be expensive, but it is worth checking that this klass is actually // in the CLD graph but not in production. - assert(Metaspace::contains((address)this), "Should be"); +#ifdef ASSERT + if (UseCompressedClassPointers && needs_narrow_id()) { + // Stricter checks for both correct alignment and placement + CompressedKlassPointers::check_encodable(this); + } else { + assert(Metaspace::contains((address)this), "Should be"); + } +#endif // ASSERT guarantee(this->is_klass(),"should be klass"); @@ -1035,6 +1065,8 @@ void Klass::oop_verify_on(oop obj, outputStream* st) { guarantee(obj->klass()->is_klass(), "klass field is not a klass"); } +// Note: this function is called with an address that may or may not be a Klass. +// The point is not to assert it is but to check if it could be. bool Klass::is_valid(Klass* k) { if (!is_aligned(k, sizeof(MetaWord))) return false; if ((size_t)k < os::min_page_size()) return false; diff --git a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp index e5447ed41ee..2629be64bea 100644 --- a/src/hotspot/share/oops/klass.hpp +++ b/src/hotspot/share/oops/klass.hpp @@ -165,6 +165,8 @@ class Klass : public Metadata { uintx _secondary_supers_bitmap; uint8_t _hash_slot; + markWord _prototype_header; // Used to initialize objects' header + int _vtable_len; // vtable length. This field may be read very often when we // have lots of itable dispatches (e.g., lambdas and streams). // Keep it away from the beginning of a Klass to avoid cacheline @@ -707,6 +709,10 @@ public: bool is_cloneable() const; void set_is_cloneable(); + inline markWord prototype_header() const; + inline void set_prototype_header(markWord header); + static ByteSize prototype_header_offset() { return in_ByteSize(offset_of(Klass, _prototype_header)); } + JFR_ONLY(DEFINE_TRACE_ID_METHODS;) virtual void metaspace_pointers_do(MetaspaceClosure* iter); @@ -761,6 +767,10 @@ public: static bool is_valid(Klass* k); static void on_secondary_supers_verification_failure(Klass* super, Klass* sub, bool linear_result, bool table_result, const char* msg); + + // Returns true if this Klass needs to be addressable via narrow Klass ID. + inline bool needs_narrow_id() const; + }; #endif // SHARE_OOPS_KLASS_HPP diff --git a/src/hotspot/share/oops/klass.inline.hpp b/src/hotspot/share/oops/klass.inline.hpp index 828c5697da2..4f9401f1709 100644 --- a/src/hotspot/share/oops/klass.inline.hpp +++ b/src/hotspot/share/oops/klass.inline.hpp @@ -59,6 +59,24 @@ inline bool Klass::is_loader_alive() const { return class_loader_data()->is_alive(); } +inline markWord Klass::prototype_header() const { + assert(UseCompactObjectHeaders, "only use with compact object headers"); +#ifdef _LP64 + // You only need prototypes for allocating objects. If the class is not instantiable, it won't live in + // class space and have no narrow Klass ID. But in that case we should not need the prototype. + assert(_prototype_header.narrow_klass() > 0, "Klass " PTR_FORMAT ": invalid prototype (" PTR_FORMAT ")", + p2i(this), _prototype_header.value()); +#endif + return _prototype_header; +} + +// This is only used when dumping the archive. In other cases, +// the _prototype_header is already initialized to the right thing. +inline void Klass::set_prototype_header(markWord header) { + assert(UseCompactObjectHeaders, "only with compact headers"); + _prototype_header = header; +} + // Loading the java_mirror does not keep its holder alive. See Klass::keep_alive(). inline oop Klass::java_mirror() const { return _java_mirror.resolve(); @@ -152,4 +170,13 @@ inline bool Klass::search_secondary_supers(Klass *k) const { return result; } +// Returns true if this Klass needs to be addressable via narrow Klass ID. +inline bool Klass::needs_narrow_id() const { + // Classes that are never instantiated need no narrow Klass Id, since the + // only point of having a narrow id is to put it into an object header. Keeping + // never instantiated classes out of class space lessens the class space pressure. + // For more details, see JDK-8338526. + // Note: don't call this function before access flags are initialized. + return !is_abstract() && !is_interface(); +} #endif // SHARE_OOPS_KLASS_INLINE_HPP diff --git a/src/hotspot/share/oops/markWord.cpp b/src/hotspot/share/oops/markWord.cpp index 2bbec570fa8..a9b1a7b026a 100644 --- a/src/hotspot/share/oops/markWord.cpp +++ b/src/hotspot/share/oops/markWord.cpp @@ -29,6 +29,12 @@ #include "runtime/objectMonitor.inline.hpp" #include "utilities/ostream.hpp" +#ifdef _LP64 +STATIC_ASSERT(markWord::klass_shift + markWord::klass_bits == 64); +// The hash (preceding klass bits) shall be a direct neighbor but not interleave +STATIC_ASSERT(markWord::klass_shift == markWord::hash_bits + markWord::hash_shift); +#endif + markWord markWord::displaced_mark_helper() const { assert(has_displaced_mark_helper(), "check"); if (has_monitor()) { diff --git a/src/hotspot/share/oops/markWord.hpp b/src/hotspot/share/oops/markWord.hpp index bbd80d02cbd..7d2bff1efc0 100644 --- a/src/hotspot/share/oops/markWord.hpp +++ b/src/hotspot/share/oops/markWord.hpp @@ -26,6 +26,7 @@ #define SHARE_OOPS_MARKWORD_HPP #include "metaprogramming/primitiveConversions.hpp" +#include "oops/compressedKlass.hpp" #include "oops/oopsHierarchy.hpp" #include "runtime/globals.hpp" @@ -37,11 +38,15 @@ // // 32 bits: // -------- -// hash:25 ------------>| age:4 unused_gap:1 lock:2 (normal object) +// hash:25 ------------>| age:4 self-fwd:1 lock:2 (normal object) // // 64 bits: // -------- -// unused:25 hash:31 -->| unused_gap:1 age:4 unused_gap:1 lock:2 (normal object) +// unused:22 hash:31 -->| unused_gap:4 age:4 self-fwd:1 lock:2 (normal object) +// +// 64 bits (with compact headers): +// ------------------------------- +// klass:22 hash:31 -->| unused_gap:4 age:4 self-fwd:1 lock:2 (normal object) // // - hash contains the identity hash value: largest value is // 31 bits, see os::random(). Also, 64-bit vm's require @@ -104,22 +109,37 @@ class markWord { // Constants static const int age_bits = 4; static const int lock_bits = 2; - static const int first_unused_gap_bits = 1; - static const int max_hash_bits = BitsPerWord - age_bits - lock_bits - first_unused_gap_bits; + static const int self_fwd_bits = 1; + static const int max_hash_bits = BitsPerWord - age_bits - lock_bits - self_fwd_bits; static const int hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits; - static const int second_unused_gap_bits = LP64_ONLY(1) NOT_LP64(0); + static const int unused_gap_bits = LP64_ONLY(4) NOT_LP64(0); // Reserved for Valhalla. static const int lock_shift = 0; - static const int age_shift = lock_bits + first_unused_gap_bits; - static const int hash_shift = age_shift + age_bits + second_unused_gap_bits; + static const int self_fwd_shift = lock_shift + lock_bits; + static const int age_shift = self_fwd_shift + self_fwd_bits; + static const int hash_shift = age_shift + age_bits + unused_gap_bits; static const uintptr_t lock_mask = right_n_bits(lock_bits); static const uintptr_t lock_mask_in_place = lock_mask << lock_shift; + static const uintptr_t self_fwd_mask = right_n_bits(self_fwd_bits); + static const uintptr_t self_fwd_mask_in_place = self_fwd_mask << self_fwd_shift; static const uintptr_t age_mask = right_n_bits(age_bits); static const uintptr_t age_mask_in_place = age_mask << age_shift; static const uintptr_t hash_mask = right_n_bits(hash_bits); static const uintptr_t hash_mask_in_place = hash_mask << hash_shift; +#ifdef _LP64 + // Used only with compact headers: + // We store the (narrow) Klass* in the bits 43 to 64. + + // These are for bit-precise extraction of the narrow Klass* from the 64-bit Markword + static constexpr int klass_shift = hash_shift + hash_bits; + static constexpr int klass_bits = 22; + static constexpr uintptr_t klass_mask = right_n_bits(klass_bits); + static constexpr uintptr_t klass_mask_in_place = klass_mask << klass_shift; +#endif + + static const uintptr_t locked_value = 0; static const uintptr_t unlocked_value = 1; static const uintptr_t monitor_value = 2; @@ -144,8 +164,9 @@ class markWord { bool is_marked() const { return (mask_bits(value(), lock_mask_in_place) == marked_value); } - bool is_forwarded() const { - return (mask_bits(value(), lock_mask_in_place) == marked_value); + bool is_forwarded() const { + // Returns true for normal forwarded (0b011) and self-forwarded (0b1xx). + return mask_bits(value(), lock_mask_in_place | self_fwd_mask_in_place) >= static_cast(marked_value); } bool is_neutral() const { // Not locked, or marked - a "clean" neutral state return (mask_bits(value(), lock_mask_in_place) == unlocked_value); @@ -260,6 +281,12 @@ class markWord { return hash() == no_hash; } + inline Klass* klass() const; + inline Klass* klass_or_null() const; + inline Klass* klass_without_asserts() const; + inline narrowKlass narrow_klass() const; + inline markWord set_narrow_klass(narrowKlass narrow_klass) const; + // Prototype mark for initialization static markWord prototype() { return markWord( no_hash_in_place | no_lock_in_place ); @@ -274,6 +301,21 @@ class markWord { // Recover address of oop from encoded form used in mark inline void* decode_pointer() const { return (void*)clear_lock_bits().value(); } + inline bool is_self_forwarded() const { + NOT_LP64(assert(LockingMode != LM_LEGACY, "incorrect with LM_LEGACY on 32 bit");) + return mask_bits(value(), self_fwd_mask_in_place) != 0; + } + + inline markWord set_self_forwarded() const { + NOT_LP64(assert(LockingMode != LM_LEGACY, "incorrect with LM_LEGACY on 32 bit");) + return markWord(value() | self_fwd_mask_in_place); + } + + inline markWord unset_self_forwarded() const { + NOT_LP64(assert(LockingMode != LM_LEGACY, "incorrect with LM_LEGACY on 32 bit");) + return markWord(value() & ~self_fwd_mask_in_place); + } + inline oop forwardee() const { return cast_to_oop(decode_pointer()); } diff --git a/src/hotspot/share/oops/markWord.inline.hpp b/src/hotspot/share/oops/markWord.inline.hpp new file mode 100644 index 00000000000..27c8cfdeaef --- /dev/null +++ b/src/hotspot/share/oops/markWord.inline.hpp @@ -0,0 +1,81 @@ +/* + * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_OOPS_MARKWORD_INLINE_HPP +#define SHARE_OOPS_MARKWORD_INLINE_HPP + +#include "oops/compressedOops.inline.hpp" +#include "oops/markWord.hpp" + +narrowKlass markWord::narrow_klass() const { +#ifdef _LP64 + assert(UseCompactObjectHeaders, "only used with compact object headers"); + return narrowKlass(value() >> klass_shift); +#else + ShouldNotReachHere(); + return 0; +#endif +} + +markWord markWord::set_narrow_klass(narrowKlass narrow_klass) const { +#ifdef _LP64 + assert(UseCompactObjectHeaders, "only used with compact object headers"); + return markWord((value() & ~klass_mask_in_place) | ((uintptr_t) narrow_klass << klass_shift)); +#else + ShouldNotReachHere(); + return markWord(0); +#endif +} + +Klass* markWord::klass() const { +#ifdef _LP64 + assert(UseCompactObjectHeaders, "only used with compact object headers"); + return CompressedKlassPointers::decode_not_null(narrow_klass()); +#else + ShouldNotReachHere(); + return nullptr; +#endif +} + +Klass* markWord::klass_or_null() const { +#ifdef _LP64 + assert(UseCompactObjectHeaders, "only used with compact object headers"); + return CompressedKlassPointers::decode(narrow_klass()); +#else + ShouldNotReachHere(); + return nullptr; +#endif +} + +Klass* markWord::klass_without_asserts() const { +#ifdef _LP64 + assert(UseCompactObjectHeaders, "only used with compact object headers"); + return CompressedKlassPointers::decode_without_asserts(narrow_klass()); +#else + ShouldNotReachHere(); + return nullptr; +#endif +} + +#endif // SHARE_OOPS_MARKWORD_INLINE_HPP diff --git a/src/hotspot/share/oops/objArrayKlass.cpp b/src/hotspot/share/oops/objArrayKlass.cpp index bee010b6d72..0697901d174 100644 --- a/src/hotspot/share/oops/objArrayKlass.cpp +++ b/src/hotspot/share/oops/objArrayKlass.cpp @@ -143,7 +143,10 @@ ObjArrayKlass::ObjArrayKlass(int n, Klass* element_klass, Symbol* name) : ArrayK } size_t ObjArrayKlass::oop_size(oop obj) const { - assert(obj->is_objArray(), "must be object array"); + // In this assert, we cannot safely access the Klass* with compact headers, + // because size_given_klass() calls oop_size() on objects that might be + // concurrently forwarded, which would overwrite the Klass*. + assert(UseCompactObjectHeaders || obj->is_objArray(), "must be object array"); return objArrayOop(obj)->object_size(); } diff --git a/src/hotspot/share/oops/objArrayKlass.inline.hpp b/src/hotspot/share/oops/objArrayKlass.inline.hpp index 6c9165509c7..a92c42d21a8 100644 --- a/src/hotspot/share/oops/objArrayKlass.inline.hpp +++ b/src/hotspot/share/oops/objArrayKlass.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,7 +70,7 @@ void ObjArrayKlass::oop_oop_iterate_elements_bounded( template void ObjArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { - assert (obj->is_array(), "obj must be array"); + assert(obj->is_array(), "obj must be array"); objArrayOop a = objArrayOop(obj); if (Devirtualizer::do_metadata(closure)) { diff --git a/src/hotspot/share/oops/oop.cpp b/src/hotspot/share/oops/oop.cpp index acb47d4c7cf..9385379a617 100644 --- a/src/hotspot/share/oops/oop.cpp +++ b/src/hotspot/share/oops/oop.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -153,8 +153,7 @@ bool oopDesc::is_objArray_noinline() const { return is_objArray(); } bool oopDesc::is_typeArray_noinline() const { return is_typeArray(); } bool oopDesc::has_klass_gap() { - // Only has a klass gap when compressed class pointers are used. - return UseCompressedClassPointers; + return UseCompressedClassPointers && !UseCompactObjectHeaders; } #if INCLUDE_CDS_JAVA_HEAP diff --git a/src/hotspot/share/oops/oop.hpp b/src/hotspot/share/oops/oop.hpp index a330b3df529..dcf42c7343b 100644 --- a/src/hotspot/share/oops/oop.hpp +++ b/src/hotspot/share/oops/oop.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,6 +62,8 @@ class oopDesc { // make use of the C++ copy/assign incorrect. NONCOPYABLE(oopDesc); + inline oop cas_set_forwardee(markWord new_mark, markWord old_mark, atomic_memory_order order); + public: // Must be trivial; see verifying static assert after the class. oopDesc() = default; @@ -78,6 +80,9 @@ class oopDesc { inline markWord cas_set_mark(markWord new_mark, markWord old_mark); inline markWord cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order); + // Returns the prototype mark that should be used for this object. + inline markWord prototype_mark() const; + // Used only to re-initialize the mark word (e.g., of promoted // objects during a GC) -- requires a valid klass pointer inline void init_mark(); @@ -95,8 +100,14 @@ class oopDesc { // For klass field compression static inline void set_klass_gap(HeapWord* mem, int z); - // size of object header, aligned to platform wordSize - static constexpr int header_size() { return sizeof(oopDesc)/HeapWordSize; } + // Size of object header, aligned to platform wordSize + static int header_size() { + if (UseCompactObjectHeaders) { + return sizeof(markWord) / HeapWordSize; + } else { + return sizeof(oopDesc) / HeapWordSize; + } + } // Returns whether this is an instance of k or an instance of a subclass of k inline bool is_a(Klass* k) const; @@ -258,16 +269,22 @@ class oopDesc { // Forward pointer operations for scavenge inline bool is_forwarded() const; + inline bool is_self_forwarded() const; inline void forward_to(oop p); + inline void forward_to_self(); // Like "forward_to", but inserts the forwarding pointer atomically. // Exactly one thread succeeds in inserting the forwarding pointer, and // this call returns null for that thread; any other thread has the // value of the forwarding pointer returned and does not modify "this". inline oop forward_to_atomic(oop p, markWord compare, atomic_memory_order order = memory_order_conservative); + inline oop forward_to_self_atomic(markWord compare, atomic_memory_order order = memory_order_conservative); inline oop forwardee() const; + inline oop forwardee(markWord header) const; + + inline void unset_self_forwarded(); // Age of object during scavenge inline uint age() const; @@ -311,12 +328,39 @@ class oopDesc { // for code generation static int mark_offset_in_bytes() { return (int)offset_of(oopDesc, _mark); } - static int klass_offset_in_bytes() { return (int)offset_of(oopDesc, _metadata._klass); } + static int klass_offset_in_bytes() { +#ifdef _LP64 + if (UseCompactObjectHeaders) { + // NOTE: The only places where this is used with compact headers are the C2 + // compiler and JVMCI, and even there we don't use it to access the (narrow)Klass* + // directly. It is used only as a placeholder to identify the special memory slice + // containing Klass* info. This value could be any value that is not a valid + // field offset. Use an offset halfway into the markWord, as the markWord is never + // partially loaded from C2 and JVMCI. + return mark_offset_in_bytes() + 4; + } else +#endif + { + return (int)offset_of(oopDesc, _metadata._klass); + } + } static int klass_gap_offset_in_bytes() { assert(has_klass_gap(), "only applicable to compressed klass pointers"); return klass_offset_in_bytes() + sizeof(narrowKlass); } + static int base_offset_in_bytes() { + if (UseCompactObjectHeaders) { + // With compact headers, the Klass* field is not used for the Klass* + // and is used for the object fields instead. + return sizeof(markWord); + } else if (UseCompressedClassPointers) { + return sizeof(markWord) + sizeof(narrowKlass); + } else { + return sizeof(markWord) + sizeof(Klass*); + } + } + // for error reporting static void* load_oop_raw(oop obj, int offset); }; diff --git a/src/hotspot/share/oops/oop.inline.hpp b/src/hotspot/share/oops/oop.inline.hpp index 95b99b215fa..098e6bfa90f 100644 --- a/src/hotspot/share/oops/oop.inline.hpp +++ b/src/hotspot/share/oops/oop.inline.hpp @@ -34,7 +34,7 @@ #include "oops/arrayOop.hpp" #include "oops/compressedKlass.inline.hpp" #include "oops/instanceKlass.hpp" -#include "oops/markWord.hpp" +#include "oops/markWord.inline.hpp" #include "oops/oopsHierarchy.hpp" #include "runtime/atomic.hpp" #include "runtime/globals.hpp" @@ -82,20 +82,32 @@ markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memo return Atomic::cmpxchg(&_mark, old_mark, new_mark, order); } +markWord oopDesc::prototype_mark() const { + if (UseCompactObjectHeaders) { + return klass()->prototype_header(); + } else { + return markWord::prototype(); + } +} + void oopDesc::init_mark() { - set_mark(markWord::prototype()); + set_mark(prototype_mark()); } Klass* oopDesc::klass() const { - if (UseCompressedClassPointers) { - return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass); + if (UseCompactObjectHeaders) { + return mark().klass(); + } else if (UseCompressedClassPointers) { + return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass); } else { return _metadata._klass; } } Klass* oopDesc::klass_or_null() const { - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + return mark().klass_or_null(); + } else if (UseCompressedClassPointers) { return CompressedKlassPointers::decode(_metadata._compressed_klass); } else { return _metadata._klass; @@ -103,16 +115,20 @@ Klass* oopDesc::klass_or_null() const { } Klass* oopDesc::klass_or_null_acquire() const { - if (UseCompressedClassPointers) { - narrowKlass nklass = Atomic::load_acquire(&_metadata._compressed_klass); - return CompressedKlassPointers::decode(nklass); + if (UseCompactObjectHeaders) { + return mark_acquire().klass(); + } else if (UseCompressedClassPointers) { + narrowKlass narrow_klass = Atomic::load_acquire(&_metadata._compressed_klass); + return CompressedKlassPointers::decode(narrow_klass); } else { return Atomic::load_acquire(&_metadata._klass); } } Klass* oopDesc::klass_without_asserts() const { - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + return mark().klass_without_asserts(); + } else if (UseCompressedClassPointers) { return CompressedKlassPointers::decode_without_asserts(_metadata._compressed_klass); } else { return _metadata._klass; @@ -121,6 +137,7 @@ Klass* oopDesc::klass_without_asserts() const { void oopDesc::set_klass(Klass* k) { assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass"); + assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers"); if (UseCompressedClassPointers) { _metadata._compressed_klass = CompressedKlassPointers::encode_not_null(k); } else { @@ -130,6 +147,7 @@ void oopDesc::set_klass(Klass* k) { void oopDesc::release_set_klass(HeapWord* mem, Klass* k) { assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass"); + assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers"); char* raw_mem = ((char*)mem + klass_offset_in_bytes()); if (UseCompressedClassPointers) { Atomic::release_store((narrowKlass*)raw_mem, @@ -140,9 +158,8 @@ void oopDesc::release_set_klass(HeapWord* mem, Klass* k) { } void oopDesc::set_klass_gap(HeapWord* mem, int v) { - if (UseCompressedClassPointers) { - *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v; - } + assert(has_klass_gap(), "precondition"); + *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v; } bool oopDesc::is_a(Klass* k) const { @@ -267,21 +284,53 @@ bool oopDesc::is_forwarded() const { return mark().is_forwarded(); } +bool oopDesc::is_self_forwarded() const { + return mark().is_self_forwarded(); +} + // Used by scavengers void oopDesc::forward_to(oop p) { + assert(cast_from_oop(p) != this, + "must not be used for self-forwarding, use forward_to_self() instead"); markWord m = markWord::encode_pointer_as_mark(p); assert(m.decode_pointer() == p, "encoding must be reversible"); set_mark(m); } -oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) { - markWord m = markWord::encode_pointer_as_mark(p); - assert(m.decode_pointer() == p, "encoding must be reversible"); - markWord old_mark = cas_set_mark(m, compare, order); +void oopDesc::forward_to_self() { + set_mark(mark().set_self_forwarded()); +} + +oop oopDesc::cas_set_forwardee(markWord new_mark, markWord compare, atomic_memory_order order) { + markWord old_mark = cas_set_mark(new_mark, compare, order); if (old_mark == compare) { return nullptr; } else { - return cast_to_oop(old_mark.decode_pointer()); + assert(old_mark.is_forwarded(), "must be forwarded here"); + return forwardee(old_mark); + } +} + +oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) { + assert(cast_from_oop(p) != this, + "must not be used for self-forwarding, use forward_to_self_atomic() instead"); + markWord m = markWord::encode_pointer_as_mark(p); + assert(forwardee(m) == p, "encoding must be reversible"); + return cas_set_forwardee(m, compare, order); +} + +oop oopDesc::forward_to_self_atomic(markWord old_mark, atomic_memory_order order) { + markWord new_mark = old_mark.set_self_forwarded(); + assert(forwardee(new_mark) == cast_to_oop(this), "encoding must be reversible"); + return cas_set_forwardee(new_mark, old_mark, order); +} + +oop oopDesc::forwardee(markWord mark) const { + assert(mark.is_forwarded(), "only decode when actually forwarded"); + if (mark.is_self_forwarded()) { + return cast_to_oop(this); + } else { + return mark.forwardee(); } } @@ -289,7 +338,11 @@ oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order orde // The forwardee is used when copying during scavenge and mark-sweep. // It does need to clear the low two locking- and GC-related bits. oop oopDesc::forwardee() const { - return mark().forwardee(); + return forwardee(mark()); +} + +void oopDesc::unset_self_forwarded() { + set_mark(mark().unset_self_forwarded()); } // The following method needs to be MT safe. @@ -346,6 +399,7 @@ void oopDesc::oop_iterate_backwards(OopClosureType* cl) { template void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) { + // In this assert, we cannot safely access the Klass* with compact headers. assert(k == klass(), "wrong klass"); OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, k); } diff --git a/src/hotspot/share/oops/typeArrayKlass.cpp b/src/hotspot/share/oops/typeArrayKlass.cpp index 38e28edd157..ddf60d4382e 100644 --- a/src/hotspot/share/oops/typeArrayKlass.cpp +++ b/src/hotspot/share/oops/typeArrayKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -171,7 +171,8 @@ void TypeArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos } size_t TypeArrayKlass::oop_size(oop obj) const { - assert(obj->is_typeArray(),"must be a type array"); + // In this assert, we cannot safely access the Klass* with compact headers. + assert(UseCompactObjectHeaders || obj->is_typeArray(),"must be a type array"); typeArrayOop t = typeArrayOop(obj); return t->object_size(this); } diff --git a/src/hotspot/share/opto/callnode.cpp b/src/hotspot/share/opto/callnode.cpp index e800b3c736b..83ea2eea1a8 100644 --- a/src/hotspot/share/opto/callnode.cpp +++ b/src/hotspot/share/opto/callnode.cpp @@ -1615,8 +1615,14 @@ void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) } Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) { Node* mark_node = nullptr; - // For now only enable fast locking for non-array types - mark_node = phase->MakeConX(markWord::prototype().value()); + if (UseCompactObjectHeaders) { + Node* klass_node = in(AllocateNode::KlassNode); + Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset())))); + mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); + } else { + // For now only enable fast locking for non-array types + mark_node = phase->MakeConX(markWord::prototype().value()); + } return mark_node; } diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index a2428625d4d..9507475a30f 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -1701,6 +1701,10 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr } } if (flat->isa_klassptr()) { + if (UseCompactObjectHeaders) { + if (flat->offset() == in_bytes(Klass::prototype_header_offset())) + alias_type(idx)->set_rewritable(false); + } if (flat->offset() == in_bytes(Klass::super_check_offset_offset())) alias_type(idx)->set_rewritable(false); if (flat->offset() == in_bytes(Klass::modifier_flags_offset())) diff --git a/src/hotspot/share/opto/lcm.cpp b/src/hotspot/share/opto/lcm.cpp index 2a40cf000d8..88dc00ab463 100644 --- a/src/hotspot/share/opto/lcm.cpp +++ b/src/hotspot/share/opto/lcm.cpp @@ -275,8 +275,8 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo // cannot reason about it; is probably not implicit null exception } else { const TypePtr* tptr; - if ((UseCompressedOops || UseCompressedClassPointers) && - (CompressedOops::shift() == 0 || CompressedKlassPointers::shift() == 0)) { + if ((UseCompressedOops && CompressedOops::shift() == 0) || + (UseCompressedClassPointers && CompressedKlassPointers::shift() == 0)) { // 32-bits narrow oop can be the base of address expressions tptr = base->get_ptr_type(); } else { diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp index e3cd60e6d91..babc487acfd 100644 --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -1288,7 +1288,6 @@ bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) { bool call_opt_stub = (StubRoutines::_string_indexof_array[ae] != nullptr); if (call_opt_stub) { - assert(arrayOopDesc::base_offset_in_bytes(T_BYTE) >= 16, "Needed for indexOf"); Node* call = make_runtime_call(RC_LEAF, OptoRuntime::string_IndexOf_Type(), StubRoutines::_string_indexof_array[ae], "stringIndexOf", TypePtr::BOTTOM, src_start, diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp index 31a453c9093..289ea30a633 100644 --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -1706,7 +1706,9 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc, } rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type()); - rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA); + if (!UseCompactObjectHeaders) { + rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA); + } int header_size = alloc->minimum_header_size(); // conservatively small // Array length diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp index 919d23fea8d..ad351fb81ad 100644 --- a/src/hotspot/share/opto/memnode.cpp +++ b/src/hotspot/share/opto/memnode.cpp @@ -1971,6 +1971,8 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) { const Type* LoadNode::load_array_final_field(const TypeKlassPtr *tkls, ciKlass* klass) const { + assert(!UseCompactObjectHeaders || tkls->offset() != in_bytes(Klass::prototype_header_offset()), + "must not happen"); if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) { // The field is Klass::_modifier_flags. Return its (constant) value. // (Folds up the 2nd indirection in aClassConstant.getModifiers().) @@ -2149,6 +2151,13 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset"); return TypeInt::make(klass->super_check_offset()); } + if (UseCompactObjectHeaders) { + if (tkls->offset() == in_bytes(Klass::prototype_header_offset())) { + // The field is Klass::_prototype_header. Return its (constant) value. + assert(this->Opcode() == Op_LoadX, "must load a proper type from _prototype_header"); + return TypeX::make(klass->prototype_header()); + } + } // Compute index into primary_supers array juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*); // Check for overflowing; use unsigned compare to handle the negative case. @@ -2238,9 +2247,11 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { } } - Node* alloc = is_new_object_mark_load(); - if (alloc != nullptr) { - return TypeX::make(markWord::prototype().value()); + if (!UseCompactObjectHeaders) { + Node* alloc = is_new_object_mark_load(); + if (alloc != nullptr) { + return TypeX::make(markWord::prototype().value()); + } } return _type; diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index bcb6b919023..e9262a48086 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -1429,13 +1429,6 @@ void Arguments::set_use_compressed_oops() { #endif // _LP64 } -void Arguments::set_use_compressed_klass_ptrs() { -#ifdef _LP64 - assert(!UseCompressedClassPointers || CompressedClassSpaceSize <= KlassEncodingMetaspaceMax, - "CompressedClassSpaceSize is too large for UseCompressedClassPointers"); -#endif // _LP64 -} - void Arguments::set_conservative_max_heap_alignment() { // The conservative maximum required alignment for the heap is the maximum of // the alignments imposed by several sources: any requirements from the heap @@ -1454,7 +1447,6 @@ jint Arguments::set_ergonomics_flags() { #ifdef _LP64 set_use_compressed_oops(); - set_use_compressed_klass_ptrs(); // Also checks that certain machines are slower with compressed oops // in vm_version initialization code. @@ -1825,6 +1817,15 @@ bool Arguments::check_vm_args_consistency() { } #endif +#ifndef _LP64 + if (LockingMode == LM_LEGACY) { + FLAG_SET_CMDLINE(LockingMode, LM_LIGHTWEIGHT); + // Self-forwarding in bit 3 of the mark-word conflicts + // with 4-byte-aligned stack-locks. + warning("Legacy locking not supported on this platform"); + } +#endif + if (UseObjectMonitorTable && LockingMode != LM_LIGHTWEIGHT) { // ObjectMonitorTable requires lightweight locking. FLAG_SET_CMDLINE(UseObjectMonitorTable, false); @@ -3648,6 +3649,32 @@ jint Arguments::parse(const JavaVMInitArgs* initial_cmd_args) { Arguments::print_on(&st); } +#ifdef _LP64 + if (UseCompactObjectHeaders && FLAG_IS_CMDLINE(UseCompressedClassPointers) && !UseCompressedClassPointers) { + warning("Compact object headers require compressed class pointers. Disabling compact object headers."); + FLAG_SET_DEFAULT(UseCompactObjectHeaders, false); + } + if (UseCompactObjectHeaders && LockingMode != LM_LIGHTWEIGHT) { + FLAG_SET_DEFAULT(LockingMode, LM_LIGHTWEIGHT); + } + if (UseCompactObjectHeaders && !UseObjectMonitorTable) { + // If UseCompactObjectHeaders is on the command line, turn on UseObjectMonitorTable. + if (FLAG_IS_CMDLINE(UseCompactObjectHeaders)) { + FLAG_SET_DEFAULT(UseObjectMonitorTable, true); + + // If UseObjectMonitorTable is on the command line, turn off UseCompactObjectHeaders. + } else if (FLAG_IS_CMDLINE(UseObjectMonitorTable)) { + FLAG_SET_DEFAULT(UseCompactObjectHeaders, false); + // If neither on the command line, the defaults are incompatible, but turn on UseObjectMonitorTable. + } else { + FLAG_SET_DEFAULT(UseObjectMonitorTable, true); + } + } + if (UseCompactObjectHeaders && !UseCompressedClassPointers) { + FLAG_SET_DEFAULT(UseCompressedClassPointers, true); + } +#endif + return JNI_OK; } @@ -3661,6 +3688,10 @@ jint Arguments::apply_ergo() { GCConfig::arguments()->initialize(); + if (UseCompressedClassPointers) { + CompressedKlassPointers::pre_initialize(); + } + CDSConfig::initialize(); // Initialize Metaspace flags and alignments diff --git a/src/hotspot/share/runtime/arguments.hpp b/src/hotspot/share/runtime/arguments.hpp index ac842285fd8..2105d21430a 100644 --- a/src/hotspot/share/runtime/arguments.hpp +++ b/src/hotspot/share/runtime/arguments.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -263,7 +263,6 @@ class Arguments : AllStatic { // GC ergonomics static void set_conservative_max_heap_alignment(); static void set_use_compressed_oops(); - static void set_use_compressed_klass_ptrs(); static jint set_ergonomics_flags(); // Limits the given heap size by the maximum amount of virtual // memory this process is currently allowed to use. It also takes diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp index b568e769304..cca58b0c22e 100644 --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -128,6 +128,9 @@ const size_t minimumSymbolTableSize = 1024; "Use 32-bit class pointers in 64-bit VM. " \ "lp64_product means flag is always constant in 32 bit VM") \ \ + product(bool, UseCompactObjectHeaders, false, EXPERIMENTAL, \ + "Use compact 64-bit object headers in 64-bit VM") \ + \ product(int, ObjectAlignmentInBytes, 8, \ "Default object alignment in bytes, 8 is minimum") \ range(8, 256) \ @@ -144,6 +147,7 @@ const size_t minimumSymbolTableSize = 1024; constraint) const bool UseCompressedOops = false; const bool UseCompressedClassPointers = false; +const bool UseCompactObjectHeaders = false; const int ObjectAlignmentInBytes = 8; #endif // _LP64 diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index 3060e225427..800102f238c 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -2004,8 +2004,6 @@ declare_constant(BytesPerWord) \ declare_constant(BytesPerLong) \ \ - declare_constant(LogKlassAlignmentInBytes) \ - \ declare_constant(HeapWordSize) \ declare_constant(LogHeapWordSize) \ \ @@ -2510,6 +2508,7 @@ declare_constant(markWord::lock_shift) \ declare_constant(markWord::age_shift) \ declare_constant(markWord::hash_shift) \ + LP64_ONLY(declare_constant(markWord::klass_shift)) \ \ declare_constant(markWord::lock_mask) \ declare_constant(markWord::lock_mask_in_place) \ diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java index 67bba331149..7a7c761e89f 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/DebuggerBase.java @@ -24,6 +24,9 @@ package sun.jvm.hotspot.debugger; +import sun.jvm.hotspot.oops.Mark; +import sun.jvm.hotspot.runtime.VM; + /**

DebuggerBase is a recommended base class for debugger implementations. It can use a PageCache to cache data from the target process. Note that this class would not be suitable if the @@ -394,7 +397,15 @@ public abstract class DebuggerBase implements Debugger { protected long readCompKlassAddressValue(long address) throws UnmappedAddressException, UnalignedAddressException { - long value = readCInteger(address, getKlassPtrSize(), true); + long value; + if (VM.getVM().isCompactObjectHeadersEnabled()) { + // With compact headers, the compressed Klass* is currently read from the mark + // word. We need to load the whole mark, and shift the upper parts. + value = readCInteger(address, machDesc.getAddressSize(), true); + value = value >>> Mark.getKlassShift(); + } else { + value = readCInteger(address, getKlassPtrSize(), true); + } if (value != 0) { value = (long)(narrowKlassBase + (long)(value << narrowKlassShift)); } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java index 3f96f72cf04..c4eeaf4a367 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Array.java @@ -81,7 +81,9 @@ public class Array extends Oop { if (lengthOffsetInBytes != 0) { return lengthOffsetInBytes; } - if (VM.getVM().isCompressedKlassPointersEnabled()) { + if (VM.getVM().isCompactObjectHeadersEnabled()) { + lengthOffsetInBytes = Oop.getHeaderSize(); + } else if (VM.getVM().isCompressedKlassPointersEnabled()) { lengthOffsetInBytes = typeSize - VM.getVM().getIntSize(); } else { lengthOffsetInBytes = typeSize; diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java index b837d869ea0..fd364d6a174 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Instance.java @@ -55,7 +55,9 @@ public class Instance extends Oop { // Returns header size in bytes. public static long getHeaderSize() { - if (VM.getVM().isCompressedKlassPointersEnabled()) { + if (VM.getVM().isCompactObjectHeadersEnabled()) { + return Oop.getHeaderSize(); + } else if (VM.getVM().isCompressedKlassPointersEnabled()) { return typeSize - VM.getVM().getIntSize(); } else { return typeSize; diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java index a3a06ec73f5..edffd56d2cd 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java @@ -54,6 +54,9 @@ public class Mark extends VMObject { lockShift = db.lookupLongConstant("markWord::lock_shift").longValue(); ageShift = db.lookupLongConstant("markWord::age_shift").longValue(); hashShift = db.lookupLongConstant("markWord::hash_shift").longValue(); + if (VM.getVM().isLP64()) { + klassShift = db.lookupLongConstant("markWord::klass_shift").longValue(); + } lockMask = db.lookupLongConstant("markWord::lock_mask").longValue(); lockMaskInPlace = db.lookupLongConstant("markWord::lock_mask_in_place").longValue(); ageMask = db.lookupLongConstant("markWord::age_mask").longValue(); @@ -82,6 +85,7 @@ public class Mark extends VMObject { private static long lockShift; private static long ageShift; private static long hashShift; + private static long klassShift; private static long lockMask; private static long lockMaskInPlace; @@ -102,6 +106,10 @@ public class Mark extends VMObject { private static long maxAge; + public static long getKlassShift() { + return klassShift; + } + public Mark(Address addr) { super(addr); } @@ -191,6 +199,11 @@ public class Mark extends VMObject { return hash() == noHash; } + public Klass getKlass() { + assert(VM.getVM().isCompactObjectHeadersEnabled()); + return (Klass)Metadata.instantiateWrapperFor(addr.getCompKlassAddressAt(0)); + } + // Debugging public void printOn(PrintStream tty) { if (isLocked()) { diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java index bf957941a56..825b1bf1437 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Oop.java @@ -46,9 +46,14 @@ public class Oop { private static synchronized void initialize(TypeDataBase db) throws WrongTypeException { Type type = db.lookupType("oopDesc"); mark = new CIntField(type.getCIntegerField("_mark"), 0); - klass = new MetadataField(type.getAddressField("_metadata._klass"), 0); - compressedKlass = new NarrowKlassField(type.getAddressField("_metadata._compressed_klass"), 0); - headerSize = type.getSize(); + if (VM.getVM().isCompactObjectHeadersEnabled()) { + Type markType = db.lookupType("markWord"); + headerSize = markType.getSize(); + } else { + headerSize = type.getSize(); + klass = new MetadataField(type.getAddressField("_metadata._klass"), 0); + compressedKlass = new NarrowKlassField(type.getAddressField("_metadata._compressed_klass"), 0); + } } private OopHandle handle; @@ -75,8 +80,12 @@ public class Oop { // Accessors for declared fields public Mark getMark() { return new Mark(getHandle()); } + public Klass getKlass() { - if (VM.getVM().isCompressedKlassPointersEnabled()) { + if (VM.getVM().isCompactObjectHeadersEnabled()) { + assert(VM.getVM().isCompressedKlassPointersEnabled()); + return getMark().getKlass(); + } else if (VM.getVM().isCompressedKlassPointersEnabled()) { return (Klass)compressedKlass.getValue(getHandle()); } else { return (Klass)klass.getValue(getHandle()); @@ -147,10 +156,12 @@ public class Oop { void iterateFields(OopVisitor visitor, boolean doVMFields) { if (doVMFields) { visitor.doCInt(mark, true); - if (VM.getVM().isCompressedKlassPointersEnabled()) { - visitor.doMetadata(compressedKlass, true); - } else { - visitor.doMetadata(klass, true); + if (!VM.getVM().isCompactObjectHeadersEnabled()) { + if (VM.getVM().isCompressedKlassPointersEnabled()) { + visitor.doMetadata(compressedKlass, true); + } else { + visitor.doMetadata(klass, true); + } } } } @@ -206,7 +217,10 @@ public class Oop { if (handle == null) { return null; } - if (VM.getVM().isCompressedKlassPointersEnabled()) { + if (VM.getVM().isCompactObjectHeadersEnabled()) { + Mark mark = new Mark(handle); + return mark.getKlass(); + } else if (VM.getVM().isCompressedKlassPointersEnabled()) { return (Klass)Metadata.instantiateWrapperFor(handle.getCompKlassAddressAt(compressedKlass.getOffset())); } else { return (Klass)Metadata.instantiateWrapperFor(handle.getAddressAt(klass.getOffset())); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java index da900884045..202217b6071 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java @@ -149,6 +149,7 @@ public class VM { private Boolean sharingEnabled; private Boolean compressedOopsEnabled; private Boolean compressedKlassPointersEnabled; + private Boolean compactObjectHeadersEnabled; // command line flags supplied to VM - see struct JVMFlag in jvmFlag.hpp public static final class Flag { @@ -969,6 +970,15 @@ public class VM { return compressedKlassPointersEnabled.booleanValue(); } + public boolean isCompactObjectHeadersEnabled() { + if (compactObjectHeadersEnabled == null) { + Flag flag = getCommandLineFlag("UseCompactObjectHeaders"); + compactObjectHeadersEnabled = (flag == null) ? Boolean.FALSE: + (flag.getBool()? Boolean.TRUE: Boolean.FALSE); + } + return compactObjectHeadersEnabled.booleanValue(); + } + public int getObjectAlignmentInBytes() { if (objectAlignmentInBytes == 0) { Flag flag = getCommandLineFlag("ObjectAlignmentInBytes"); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java index 3f701b8d24e..6a8b794a935 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/RobustOopDeterminator.java @@ -26,6 +26,7 @@ package sun.jvm.hotspot.utilities; import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.oops.Metadata; +import sun.jvm.hotspot.oops.Oop; import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.types.*; @@ -37,26 +38,6 @@ import sun.jvm.hotspot.types.*; states than the ObjectHeap code. */ public class RobustOopDeterminator { - private static AddressField klassField; - - static { - VM.registerVMInitializedObserver(new Observer() { - public void update(Observable o, Object data) { - initialize(VM.getVM().getTypeDataBase()); - } - }); - } - - private static void initialize(TypeDataBase db) { - Type type = db.lookupType("oopDesc"); - - if (VM.getVM().isCompressedKlassPointersEnabled()) { - klassField = type.getAddressField("_metadata._compressed_klass"); - } else { - klassField = type.getAddressField("_metadata._klass"); - } - } - public static boolean oopLooksValid(OopHandle oop) { if (oop == null) { return false; @@ -66,11 +47,7 @@ public class RobustOopDeterminator { } try { // Try to instantiate the Klass - if (VM.getVM().isCompressedKlassPointersEnabled()) { - Metadata.instantiateWrapperFor(oop.getCompKlassAddressAt(klassField.getOffset())); - } else { - Metadata.instantiateWrapperFor(klassField.getValue(oop)); - } + Oop.getKlassForOopHandle(oop); return true; } catch (AddressException | WrongTypeException e) { return false; diff --git a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/HotSpotVMConfig.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/HotSpotVMConfig.java index 16d9cf3625e..5d5956a667c 100644 --- a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/HotSpotVMConfig.java +++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/HotSpotVMConfig.java @@ -69,7 +69,7 @@ class HotSpotVMConfig extends HotSpotVMConfigAccess { final int objectAlignment = getFlag("ObjectAlignmentInBytes", Integer.class); - final int hubOffset = getFieldOffset("oopDesc::_metadata._klass", Integer.class, "Klass*"); + final int klassOffsetInBytes = getFieldValue("CompilerToVM::Data::oopDesc_klass_offset_in_bytes", Integer.class, "int"); final int subklassOffset = getFieldOffset("Klass::_subklass", Integer.class, "Klass*"); final int superOffset = getFieldOffset("Klass::_super", Integer.class, "Klass*"); diff --git a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/SharedLibraryJVMCIReflection.java b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/SharedLibraryJVMCIReflection.java index 2877a09968f..ab8b3d335cb 100644 --- a/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/SharedLibraryJVMCIReflection.java +++ b/src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/SharedLibraryJVMCIReflection.java @@ -191,7 +191,11 @@ class SharedLibraryJVMCIReflection extends HotSpotJVMCIReflection { String name = theClass.getName().replace('.', '/'); return (HotSpotResolvedObjectTypeImpl) runtime().compilerToVm.lookupType(theClass.getClassLoader(), name); } - return runtime().compilerToVm.getResolvedJavaType(object, runtime().getConfig().hubOffset, false); + // HotSpot tests if the offset is oopDesc::klass_offset_in_bytes() and returns + // the object type accordingly. When UseCompactClassPointers is enabled, + // oopDesc::klass_offset_in_bytes() will return an offset halfway into the + // object's markWord as a placeholder referring to the klass pointer. + return runtime().compilerToVm.getResolvedJavaType(object, runtime().getConfig().klassOffsetInBytes, false); } @Override diff --git a/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp index 5f9a361105e..0b438572b2d 100644 --- a/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp +++ b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,59 +23,48 @@ #include "precompiled.hpp" #include "gc/shared/preservedMarks.inline.hpp" +#include "gc/shared/fullGCForwarding.inline.hpp" #include "oops/oop.inline.hpp" #include "unittest.hpp" -// Class to create a "fake" oop with a mark that will -// return true for calls to must_be_preserved(). -class FakeOop { - oopDesc _oop; - -public: - FakeOop() : _oop() { _oop.set_mark(originalMark()); } - - oop get_oop() { return &_oop; } - markWord mark() { return _oop.mark(); } - void set_mark(markWord m) { _oop.set_mark(m); } - void forward_to(oop obj) { - markWord m = markWord::encode_pointer_as_mark(obj); - _oop.set_mark(m); - } - - static markWord originalMark() { return markWord(markWord::lock_mask_in_place); } - static markWord changedMark() { return markWord(0x4711); } -}; +static markWord originalMark() { return markWord(markWord::lock_mask_in_place); } +static markWord changedMark() { return markWord(0x4711); } #define ASSERT_MARK_WORD_EQ(a, b) ASSERT_EQ((a).value(), (b).value()) TEST_VM(PreservedMarks, iterate_and_restore) { PreservedMarks pm; - FakeOop o1; - FakeOop o2; - FakeOop o3; - FakeOop o4; + + HeapWord fakeheap[32] = { nullptr }; + HeapWord* heap = align_up(fakeheap, 8 * sizeof(HeapWord)); + FullGCForwarding::initialize(MemRegion(&heap[0], &heap[16])); + + oop o1 = cast_to_oop(&heap[0]); o1->set_mark(originalMark()); + oop o2 = cast_to_oop(&heap[2]); o2->set_mark(originalMark()); + oop o3 = cast_to_oop(&heap[4]); o3->set_mark(originalMark()); + oop o4 = cast_to_oop(&heap[6]); o4->set_mark(originalMark()); // Make sure initial marks are correct. - ASSERT_MARK_WORD_EQ(o1.mark(), FakeOop::originalMark()); - ASSERT_MARK_WORD_EQ(o2.mark(), FakeOop::originalMark()); - ASSERT_MARK_WORD_EQ(o3.mark(), FakeOop::originalMark()); - ASSERT_MARK_WORD_EQ(o4.mark(), FakeOop::originalMark()); + ASSERT_MARK_WORD_EQ(o1->mark(), originalMark()); + ASSERT_MARK_WORD_EQ(o2->mark(), originalMark()); + ASSERT_MARK_WORD_EQ(o3->mark(), originalMark()); + ASSERT_MARK_WORD_EQ(o4->mark(), originalMark()); // Change the marks and verify change. - o1.set_mark(FakeOop::changedMark()); - o2.set_mark(FakeOop::changedMark()); - ASSERT_MARK_WORD_EQ(o1.mark(), FakeOop::changedMark()); - ASSERT_MARK_WORD_EQ(o2.mark(), FakeOop::changedMark()); + o1->set_mark(changedMark()); + o2->set_mark(changedMark()); + ASSERT_MARK_WORD_EQ(o1->mark(), changedMark()); + ASSERT_MARK_WORD_EQ(o2->mark(), changedMark()); // Push o1 and o2 to have their marks preserved. - pm.push_if_necessary(o1.get_oop(), o1.mark()); - pm.push_if_necessary(o2.get_oop(), o2.mark()); + pm.push_if_necessary(o1, o1->mark()); + pm.push_if_necessary(o2, o2->mark()); // Fake a move from o1->o3 and o2->o4. - o1.forward_to(o3.get_oop()); - o2.forward_to(o4.get_oop()); - ASSERT_EQ(o1.get_oop()->forwardee(), o3.get_oop()); - ASSERT_EQ(o2.get_oop()->forwardee(), o4.get_oop()); + FullGCForwarding::forward_to(o1, o3); + FullGCForwarding::forward_to(o2, o4); + ASSERT_EQ(FullGCForwarding::forwardee(o1), o3); + ASSERT_EQ(FullGCForwarding::forwardee(o2), o4); // Adjust will update the PreservedMarks stack to // make sure the mark is updated at the new location. pm.adjust_during_full_gc(); @@ -83,6 +72,6 @@ TEST_VM(PreservedMarks, iterate_and_restore) { // Restore all preserved and verify that the changed // mark is now present at o3 and o4. pm.restore(); - ASSERT_MARK_WORD_EQ(o3.mark(), FakeOop::changedMark()); - ASSERT_MARK_WORD_EQ(o4.mark(), FakeOop::changedMark()); + ASSERT_MARK_WORD_EQ(o3->mark(), changedMark()); + ASSERT_MARK_WORD_EQ(o4->mark(), changedMark()); } diff --git a/test/hotspot/gtest/metaspace/metaspaceGtestContexts.hpp b/test/hotspot/gtest/metaspace/metaspaceGtestContexts.hpp index f1a3ac0141a..8a5aabf6cec 100644 --- a/test/hotspot/gtest/metaspace/metaspaceGtestContexts.hpp +++ b/test/hotspot/gtest/metaspace/metaspaceGtestContexts.hpp @@ -37,10 +37,14 @@ using metaspace::chunklevel_t; using namespace metaspace::chunklevel; class MetaspaceGtestContext : public metaspace::MetaspaceTestContext { + int _num_arenas_created; public: MetaspaceGtestContext(size_t commit_limit = 0, size_t reserve_limit = 0) : - metaspace::MetaspaceTestContext("gtest-metaspace-context", commit_limit, reserve_limit) - {} + metaspace::MetaspaceTestContext("gtest-metaspace-context", commit_limit, reserve_limit), + _num_arenas_created(0) {} + + int num_arenas_created() const { return _num_arenas_created; } + void inc_num_arenas_created() { _num_arenas_created ++; } }; class ChunkGtestContext : public MetaspaceGtestContext { diff --git a/test/hotspot/gtest/metaspace/test_binlist.cpp b/test/hotspot/gtest/metaspace/test_binlist.cpp index dd26d858e24..df12b40c30f 100644 --- a/test/hotspot/gtest/metaspace/test_binlist.cpp +++ b/test/hotspot/gtest/metaspace/test_binlist.cpp @@ -26,12 +26,14 @@ #include "precompiled.hpp" #include "memory/metaspace/binList.hpp" #include "memory/metaspace/counters.hpp" +#include "memory/metaspace/metablock.hpp" //#define LOG_PLEASE #include "metaspaceGtestCommon.hpp" using metaspace::BinList32; using metaspace::BinListImpl; using metaspace::MemRangeCounter; +using metaspace::MetaBlock; #define CHECK_BL_CONTENT(bl, expected_num, expected_size) { \ EXPECT_EQ(bl.count(), (unsigned)expected_num); \ @@ -43,6 +45,19 @@ using metaspace::MemRangeCounter; } \ } +template +struct TestedBinList : public BinListImpl { + typedef BinListImpl ListType; + void add_block(MetaWord* p, size_t word_size) { + ListType::add_block(MetaBlock(p, word_size)); + } + MetaWord* remove_block(size_t requested_size, size_t* real_size) { + MetaBlock result = ListType::remove_block(requested_size); + (*real_size) = result.word_size(); + return result.base(); + } +}; + template struct BinListBasicTest { @@ -206,14 +221,14 @@ struct BinListBasicTest { template const size_t BinListBasicTest::maxws = BINLISTTYPE::MaxWordSize; -TEST_VM(metaspace, BinList_basic_1) { BinListBasicTest< BinListImpl<1> >::basic_test(); } -TEST_VM(metaspace, BinList_basic_8) { BinListBasicTest< BinListImpl<8> >::basic_test(); } -TEST_VM(metaspace, BinList_basic_32) { BinListBasicTest::basic_test(); } +TEST_VM(metaspace, BinList_basic_1) { BinListBasicTest< TestedBinList<1> >::basic_test(); } +TEST_VM(metaspace, BinList_basic_8) { BinListBasicTest< TestedBinList<8> >::basic_test(); } +TEST_VM(metaspace, BinList_basic_32) { BinListBasicTest< TestedBinList<32> >::basic_test(); } -TEST_VM(metaspace, BinList_basic_2_1) { BinListBasicTest< BinListImpl<1> >::basic_test_2(); } -TEST_VM(metaspace, BinList_basic_2_8) { BinListBasicTest< BinListImpl<8> >::basic_test_2(); } -TEST_VM(metaspace, BinList_basic_2_32) { BinListBasicTest::basic_test_2(); } +TEST_VM(metaspace, BinList_basic_2_1) { BinListBasicTest< TestedBinList<1> >::basic_test_2(); } +TEST_VM(metaspace, BinList_basic_2_8) { BinListBasicTest< TestedBinList<8> >::basic_test_2(); } +TEST_VM(metaspace, BinList_basic_2_32) { BinListBasicTest< TestedBinList<32> >::basic_test_2(); } -TEST_VM(metaspace, BinList_basic_rand_1) { BinListBasicTest< BinListImpl<1> >::random_test(); } -TEST_VM(metaspace, BinList_basic_rand_8) { BinListBasicTest< BinListImpl<8> >::random_test(); } -TEST_VM(metaspace, BinList_basic_rand_32) { BinListBasicTest::random_test(); } +TEST_VM(metaspace, BinList_basic_rand_1) { BinListBasicTest< TestedBinList<1> >::random_test(); } +TEST_VM(metaspace, BinList_basic_rand_8) { BinListBasicTest< TestedBinList<8> >::random_test(); } +TEST_VM(metaspace, BinList_basic_rand_32) { BinListBasicTest< TestedBinList<32> >::random_test(); } diff --git a/test/hotspot/gtest/metaspace/test_blocktree.cpp b/test/hotspot/gtest/metaspace/test_blocktree.cpp index 50ec021ec11..05a3025f529 100644 --- a/test/hotspot/gtest/metaspace/test_blocktree.cpp +++ b/test/hotspot/gtest/metaspace/test_blocktree.cpp @@ -26,16 +26,29 @@ #include "precompiled.hpp" #include "memory/metaspace/blockTree.hpp" #include "memory/metaspace/counters.hpp" +#include "memory/metaspace/metablock.hpp" #include "memory/resourceArea.hpp" // #define LOG_PLEASE #include "metaspaceGtestCommon.hpp" using metaspace::BlockTree; using metaspace::MemRangeCounter; +using metaspace::MetaBlock; + +struct TestedBlockTree : public BlockTree { + void add_block(MetaWord* p, size_t word_size) { + BlockTree::add_block(MetaBlock(p, word_size)); + } + MetaWord* remove_block(size_t requested_size, size_t* real_size) { + MetaBlock result = BlockTree::remove_block(requested_size); + (*real_size) = result.word_size(); + return result.base(); + } +}; // Small helper. Given a 0-terminated array of sizes, a feeder buffer and a tree, // add blocks of these sizes to the tree in the order they appear in the array. -static void create_nodes(const size_t sizes[], FeederBuffer& fb, BlockTree& bt) { +static void create_nodes(const size_t sizes[], FeederBuffer& fb, TestedBlockTree& bt) { for (int i = 0; sizes[i] > 0; i ++) { size_t s = sizes[i]; MetaWord* p = fb.get(s); @@ -55,7 +68,7 @@ static void create_nodes(const size_t sizes[], FeederBuffer& fb, BlockTree& bt) TEST_VM(metaspace, BlockTree_basic) { - BlockTree bt; + TestedBlockTree bt; CHECK_BT_CONTENT(bt, 0, 0); size_t real_size = 0; @@ -112,7 +125,7 @@ static size_t helper_find_nearest_fit(const size_t sizes[], size_t request_size) // for a request size and check that it is the expected result. static void test_find_nearest_fit_with_tree(const size_t sizes[], size_t request_size) { - BlockTree bt; + TestedBlockTree bt; FeederBuffer fb(4 * K); create_nodes(sizes, fb, bt); @@ -155,7 +168,7 @@ TEST_VM(metaspace, BlockTree_find_nearest_fit) { 0 // stop }; - BlockTree bt; + TestedBlockTree bt; FeederBuffer fb(4 * K); create_nodes(sizes, fb, bt); @@ -170,7 +183,7 @@ TEST_VM(metaspace, BlockTree_find_nearest_fit) { // should exercise the list-part of the tree. TEST_VM(metaspace, BlockTree_basic_siblings) { - BlockTree bt; + TestedBlockTree bt; FeederBuffer fb(4 * K); CHECK_BT_CONTENT(bt, 0, 0); @@ -204,7 +217,7 @@ TEST_VM(metaspace, BlockTree_print_test) { 0 // stop }; - BlockTree bt; + TestedBlockTree bt; FeederBuffer fb(4 * K); create_nodes(sizes, fb, bt); @@ -222,7 +235,7 @@ TEST_VM_ASSERT_MSG(metaspace, BlockTree_overwriter_test, ".*failed: Invalid node static const size_t sizes1[] = { 30, 17, 0 }; static const size_t sizes2[] = { 12, 12, 0 }; - BlockTree bt; + TestedBlockTree bt; FeederBuffer fb(4 * K); // some nodes... @@ -249,7 +262,7 @@ class BlockTreeTest { FeederBuffer _fb; - BlockTree _bt[2]; + TestedBlockTree _bt[2]; MemRangeCounter _cnt[2]; RandSizeGenerator _rgen; @@ -356,7 +369,7 @@ class BlockTreeTest { void drain_all() { for (int which = 0; which < 2; which++) { - BlockTree* bt = _bt + which; + TestedBlockTree* bt = _bt + which; size_t last_size = 0; while (!bt->is_empty()) { diff --git a/test/hotspot/gtest/metaspace/test_clms.cpp b/test/hotspot/gtest/metaspace/test_clms.cpp new file mode 100644 index 00000000000..efe83c415e0 --- /dev/null +++ b/test/hotspot/gtest/metaspace/test_clms.cpp @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2024 Red Hat, Inc. All rights reserved. + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/classLoaderMetaspace.hpp" +#include "memory/metaspace/freeBlocks.hpp" +#include "memory/metaspace/metablock.inline.hpp" +#include "memory/metaspace/metaspaceArena.hpp" +#include "memory/metaspace/metaspaceSettings.hpp" +#include "memory/metaspace/metaspaceStatistics.hpp" +#include "memory/metaspace.hpp" +#include "oops/klass.hpp" +#include "runtime/mutex.hpp" +#include "utilities/debug.hpp" +#include "utilities/align.hpp" +#include "utilities/globalDefinitions.hpp" + +#ifdef _LP64 + +#define LOG_PLEASE +#include "metaspaceGtestCommon.hpp" +#include "metaspaceGtestContexts.hpp" +#include "metaspaceGtestRangeHelpers.hpp" +#include "metaspaceGtestSparseArray.hpp" + +#define HANDLE_FAILURE \ + if (testing::Test::HasFailure()) { \ + return; \ + } + +namespace metaspace { + +class ClmsTester { + + Mutex _lock; + MetaspaceContext* _class_context; + MetaspaceContext* _nonclass_context; + ClassLoaderMetaspace* _clms; + const size_t _klass_arena_alignment_words; + unsigned _num_allocations; + + struct Deltas { + int num_chunks_delta; + ssize_t used_words_delta; + int num_freeblocks_delta; + ssize_t freeblocks_words_delta; + }; + + Deltas calc_deltas(const ArenaStats& before, const ArenaStats& after) { + Deltas d; + d.num_chunks_delta = after.totals()._num - before.totals()._num; + d.used_words_delta = after.totals()._used_words - before.totals()._used_words; + d.num_freeblocks_delta = (int)after._free_blocks_num - (int)before._free_blocks_num; + d.freeblocks_words_delta = after._free_blocks_word_size - before._free_blocks_word_size; + return d; + } + +public: + + ClmsTester(size_t klass_alignment_words, Metaspace::MetaspaceType space_type, + MetaspaceContext* class_context, MetaspaceContext* nonclass_context) + : _lock(Monitor::nosafepoint, "CLMSTest_lock"), + _class_context(class_context), _nonclass_context(nonclass_context), + _clms(nullptr), _klass_arena_alignment_words(klass_alignment_words), _num_allocations(0) { + _clms = new ClassLoaderMetaspace(&_lock, space_type, nonclass_context, class_context, klass_alignment_words); + } + + ~ClmsTester() { + delete _clms; + EXPECT_EQ(_class_context->used_words(), (size_t)0); + EXPECT_EQ(_nonclass_context->used_words(), (size_t)0); + } + + MetaBlock allocate_and_check(size_t word_size, bool is_class) { + + // take stats before allocation + ClmsStats stats_before; + _clms->add_to_statistics(&stats_before); + + // allocate + MetaWord* p = _clms->allocate(word_size, is_class ? Metaspace::ClassType : Metaspace::NonClassType); + _num_allocations ++; + + // take stats after allocation + ClmsStats stats_after; + _clms->add_to_statistics(&stats_after); + + // for less verbose testing: + const ArenaStats& ca_before = stats_before._arena_stats_class; + const ArenaStats& ca_after = stats_after._arena_stats_class; + const ArenaStats& nca_before = stats_before._arena_stats_nonclass; + const ArenaStats& nca_after = stats_after._arena_stats_nonclass; + + // deltas + const Deltas d_ca = calc_deltas(ca_before, ca_after); + const Deltas d_nca = calc_deltas(nca_before, nca_after); + +#define EXPECT_FREEBLOCKS_UNCHANGED(arena_prefix) \ + EXPECT_EQ(d_##arena_prefix.num_freeblocks_delta, 0); \ + EXPECT_EQ(d_##arena_prefix.freeblocks_words_delta, (ssize_t)0); + +#define EXPECT_ARENA_UNCHANGED(arena_prefix) \ + EXPECT_EQ(d_##arena_prefix.num_chunks_delta, 0); \ + EXPECT_EQ(d_##arena_prefix.used_words_delta, (ssize_t)0); + + if (p != nullptr) { + + MetaBlock bl(p, word_size); + + if (is_class) { + + EXPECT_TRUE(bl.is_aligned_base(_klass_arena_alignment_words)); + + if (_num_allocations == 1) { + // first allocation: nonclass arena unchanged, class arena grows by 1 chunk and wordsize, + // class arena freeblocks unchanged + EXPECT_ARENA_UNCHANGED(nca); + EXPECT_FREEBLOCKS_UNCHANGED(nca); + EXPECT_EQ(d_ca.num_chunks_delta, 1); + EXPECT_EQ((size_t)d_ca.used_words_delta, word_size); + EXPECT_FREEBLOCKS_UNCHANGED(ca); + return bl; + } + + // Had this been taken from class arena freeblocks? + if (d_ca.num_freeblocks_delta == -1) { + // the class arena freeblocks should have gone down, and the non-class arena freeblocks may have gone + // up in case the block was larger than required + const size_t wordsize_block_taken = (size_t)(-d_ca.freeblocks_words_delta); + EXPECT_GE(wordsize_block_taken, word_size); // the block we took must be at least allocation size + const size_t expected_freeblock_remainder = wordsize_block_taken - word_size; + if (expected_freeblock_remainder > 0) { + // the remainder, if it existed, should have been added to nonclass freeblocks + EXPECT_EQ(d_nca.num_freeblocks_delta, 1); + EXPECT_EQ((size_t)d_nca.freeblocks_words_delta, expected_freeblock_remainder); + } + // finally, nothing should have happened in the arenas proper. + EXPECT_ARENA_UNCHANGED(ca); + EXPECT_ARENA_UNCHANGED(nca); + return bl; + } + + // block was taken from class arena proper + + // We expect allocation waste due to alignment, should have been added to the freeblocks + // of nonclass arena. Allocation waste can be 0. If no chunk turnover happened, it must be + // smaller than klass alignment, otherwise it can get as large as a commit granule. + const size_t max_expected_allocation_waste = + d_ca.num_chunks_delta == 0 ? (_klass_arena_alignment_words - 1) : Settings::commit_granule_words(); + EXPECT_GE(d_ca.num_chunks_delta, 0); + EXPECT_LE(d_ca.num_chunks_delta, 1); + EXPECT_GE((size_t)d_ca.used_words_delta, word_size); + EXPECT_LE((size_t)d_ca.used_words_delta, word_size + max_expected_allocation_waste); + EXPECT_FREEBLOCKS_UNCHANGED(ca); + EXPECT_ARENA_UNCHANGED(nca); + if (max_expected_allocation_waste > 0) { + EXPECT_GE(d_nca.num_freeblocks_delta, 0); + EXPECT_LE(d_nca.num_freeblocks_delta, 1); + EXPECT_GE(d_nca.freeblocks_words_delta, 0); + EXPECT_LE((size_t)d_nca.freeblocks_words_delta, max_expected_allocation_waste); + } else { + EXPECT_FREEBLOCKS_UNCHANGED(nca); + } + return bl; + // end: is_class + } else { + // Nonclass arena allocation. + // Allocation waste can happen: + // - if we allocate from nonclass freeblocks, the block remainder + // - if we allocate from arena proper, by chunk turnover + + if (d_nca.freeblocks_words_delta < 0) { + // We allocated a block from the nonclass arena freeblocks. + const size_t wordsize_block_taken = (size_t)(-d_nca.freeblocks_words_delta); + EXPECT_EQ(wordsize_block_taken, word_size); + // The number of blocks may or may not have decreased (depending on whether there + // was a wastage block) + EXPECT_GE(d_nca.num_chunks_delta, -1); + EXPECT_LE(d_nca.num_chunks_delta, 0); + EXPECT_ARENA_UNCHANGED(nca); + EXPECT_ARENA_UNCHANGED(ca); + EXPECT_FREEBLOCKS_UNCHANGED(ca); + return bl; + } + + // We don't expect alignment waste. Only wastage happens at chunk turnover. + const size_t max_expected_allocation_waste = + d_nca.num_chunks_delta == 0 ? 0 : Settings::commit_granule_words(); + EXPECT_ARENA_UNCHANGED(ca); + EXPECT_FREEBLOCKS_UNCHANGED(ca); + EXPECT_GE(d_nca.num_chunks_delta, 0); + EXPECT_LE(d_nca.num_chunks_delta, 1); + EXPECT_GE((size_t)d_nca.used_words_delta, word_size); + EXPECT_LE((size_t)d_nca.used_words_delta, word_size + max_expected_allocation_waste); + if (max_expected_allocation_waste == 0) { + EXPECT_FREEBLOCKS_UNCHANGED(nca); + } + } + return bl; + + } // end: allocation successful + + // allocation failed. + EXPECT_ARENA_UNCHANGED(ca); + EXPECT_FREEBLOCKS_UNCHANGED(ca); + EXPECT_ARENA_UNCHANGED(nca); + EXPECT_FREEBLOCKS_UNCHANGED(nca); + + return MetaBlock(); + } + + MetaBlock allocate_expect_success(size_t word_size, bool is_class) { + MetaBlock bl = allocate_and_check(word_size, is_class); + EXPECT_TRUE(bl.is_nonempty()); + return bl; + } + + MetaBlock allocate_expect_failure(size_t word_size, bool is_class) { + MetaBlock bl = allocate_and_check(word_size, is_class); + EXPECT_TRUE(bl.is_empty()); + return bl; + } + + void deallocate_and_check(MetaBlock bl) { + + // take stats before deallocation + ClmsStats stats_before; + _clms->add_to_statistics(&stats_before); + + // allocate + _clms->deallocate(bl.base(), bl.word_size()); + + // take stats after deallocation + ClmsStats stats_after; + _clms->add_to_statistics(&stats_after); + + // for less verbose testing: + const ArenaStats& ca_before = stats_before._arena_stats_class; + const ArenaStats& ca_after = stats_after._arena_stats_class; + const ArenaStats& nca_before = stats_before._arena_stats_nonclass; + const ArenaStats& nca_after = stats_after._arena_stats_nonclass; + + // deltas + // deltas + const Deltas d_ca = calc_deltas(ca_before, ca_after); + const Deltas d_nca = calc_deltas(nca_before, nca_after); + + EXPECT_ARENA_UNCHANGED(ca); + EXPECT_ARENA_UNCHANGED(nca); + // Depending on whether the returned block was suitable for Klass, + // it may have gone to either the non-class freelist or the class freelist + if (d_ca.num_freeblocks_delta == 1) { + EXPECT_EQ(d_ca.num_freeblocks_delta, 1); + EXPECT_EQ((size_t)d_ca.freeblocks_words_delta, bl.word_size()); + EXPECT_FREEBLOCKS_UNCHANGED(nca); + } else { + EXPECT_EQ(d_nca.num_freeblocks_delta, 1); + EXPECT_EQ((size_t)d_nca.freeblocks_words_delta, bl.word_size()); + EXPECT_FREEBLOCKS_UNCHANGED(ca); + } + } +}; + +static constexpr size_t klass_size = sizeof(Klass) / BytesPerWord; + +static void basic_test(size_t klass_arena_alignment) { + MetaspaceGtestContext class_context, nonclass_context; + { + ClmsTester tester(klass_arena_alignment, Metaspace::StandardMetaspaceType, class_context.context(), nonclass_context.context()); + + MetaBlock bl1 = tester.allocate_expect_success(klass_size, true); + HANDLE_FAILURE; + + MetaBlock bl2 = tester.allocate_expect_success(klass_size, true); + HANDLE_FAILURE; + + tester.deallocate_and_check(bl1); + HANDLE_FAILURE; + + MetaBlock bl3 = tester.allocate_expect_success(klass_size, true); + HANDLE_FAILURE; + + MetaBlock bl4 = tester.allocate_expect_success(Metaspace::min_allocation_word_size, false); + HANDLE_FAILURE; + + MetaBlock bl5 = tester.allocate_expect_success(K, false); + HANDLE_FAILURE; + + tester.deallocate_and_check(bl5); + HANDLE_FAILURE; + + MetaBlock bl6 = tester.allocate_expect_success(K, false); + HANDLE_FAILURE; + + EXPECT_EQ(bl5, bl6); // should have gotten the same block back from freelist + } + EXPECT_EQ(class_context.used_words(), (size_t)0); + EXPECT_EQ(nonclass_context.used_words(), (size_t)0); + // we should have used exactly one commit granule (64K), not more, for each context + EXPECT_EQ(class_context.committed_words(), Settings::commit_granule_words()); + EXPECT_EQ(nonclass_context.committed_words(), Settings::commit_granule_words()); +} + +#define TEST_BASIC_N(n) \ +TEST_VM(metaspace, CLMS_basics_##n) { \ + basic_test(n); \ +} + +TEST_BASIC_N(1) +TEST_BASIC_N(4) +TEST_BASIC_N(16) +TEST_BASIC_N(32) +TEST_BASIC_N(128) + +static void test_random(size_t klass_arena_alignment) { + MetaspaceGtestContext class_context, nonclass_context; + constexpr int max_allocations = 1024; + const SizeRange nonclass_alloc_range(Metaspace::min_allocation_alignment_words, 1024); + const SizeRange class_alloc_range(klass_size, 1024); + const IntRange one_out_of_ten(0, 10); + for (int runs = 9; runs >= 0; runs--) { + { + ClmsTester tester(64, Metaspace::StandardMetaspaceType, class_context.context(), nonclass_context.context()); + struct LifeBlock { + MetaBlock bl; + bool is_class; + }; + LifeBlock life_allocations[max_allocations]; + for (int i = 0; i < max_allocations; i++) { + life_allocations[i].bl.reset(); + } + + unsigned num_class_allocs = 0, num_nonclass_allocs = 0, num_class_deallocs = 0, num_nonclass_deallocs = 0; + for (int i = 0; i < 5000; i ++) { + const int slot = IntRange(0, max_allocations).random_value(); + if (life_allocations[slot].bl.is_empty()) { + const bool is_class = one_out_of_ten.random_value() == 0; + const size_t word_size = + is_class ? class_alloc_range.random_value() : nonclass_alloc_range.random_value(); + MetaBlock bl = tester.allocate_expect_success(word_size, is_class); + HANDLE_FAILURE; + life_allocations[slot].bl = bl; + life_allocations[slot].is_class = is_class; + if (is_class) { + num_class_allocs ++; + } else { + num_nonclass_allocs ++; + } + } else { + tester.deallocate_and_check(life_allocations[slot].bl); + HANDLE_FAILURE; + life_allocations[slot].bl.reset(); + if (life_allocations[slot].is_class) { + num_class_deallocs ++; + } else { + num_nonclass_deallocs ++; + } + } + } + LOG("num class allocs: %u, num nonclass allocs: %u, num class deallocs: %u, num nonclass deallocs: %u", + num_class_allocs, num_nonclass_allocs, num_class_deallocs, num_nonclass_deallocs); + } + EXPECT_EQ(class_context.used_words(), (size_t)0); + EXPECT_EQ(nonclass_context.used_words(), (size_t)0); + constexpr float fragmentation_factor = 3.0f; + const size_t max_expected_nonclass_committed = max_allocations * nonclass_alloc_range.highest() * fragmentation_factor; + const size_t max_expected_class_committed = max_allocations * class_alloc_range.highest() * fragmentation_factor; + // we should have used exactly one commit granule (64K), not more, for each context + EXPECT_LT(class_context.committed_words(), max_expected_class_committed); + EXPECT_LT(nonclass_context.committed_words(), max_expected_nonclass_committed); + } +} + +#define TEST_RANDOM_N(n) \ +TEST_VM(metaspace, CLMS_random_##n) { \ + test_random(n); \ +} + +TEST_RANDOM_N(1) +TEST_RANDOM_N(4) +TEST_RANDOM_N(16) +TEST_RANDOM_N(32) +TEST_RANDOM_N(128) + +} // namespace metaspace + +#endif // _LP64 diff --git a/test/hotspot/gtest/metaspace/test_freeblocks.cpp b/test/hotspot/gtest/metaspace/test_freeblocks.cpp index fbf2e822c11..239729bc696 100644 --- a/test/hotspot/gtest/metaspace/test_freeblocks.cpp +++ b/test/hotspot/gtest/metaspace/test_freeblocks.cpp @@ -26,10 +26,13 @@ #include "precompiled.hpp" #include "memory/metaspace/counters.hpp" #include "memory/metaspace/freeBlocks.hpp" +#include "memory/metaspace/metablock.hpp" + //#define LOG_PLEASE #include "metaspaceGtestCommon.hpp" using metaspace::FreeBlocks; +using metaspace::MetaBlock; using metaspace::SizeCounter; #define CHECK_CONTENT(fb, num_blocks_expected, word_size_expected) \ @@ -43,199 +46,21 @@ using metaspace::SizeCounter; EXPECT_EQ(fb.count(), (int)num_blocks_expected); \ } -class FreeBlocksTest { - - FeederBuffer _fb; - FreeBlocks _freeblocks; - - // random generator for block feeding - RandSizeGenerator _rgen_feeding; - - // random generator for allocations (and, hence, deallocations) - RandSizeGenerator _rgen_allocations; - - SizeCounter _allocated_words; - - struct allocation_t { - allocation_t* next; - size_t word_size; - MetaWord* p; - }; - - // Array of the same size as the pool max capacity; holds the allocated elements. - allocation_t* _allocations; - - int _num_allocs; - int _num_deallocs; - int _num_feeds; - - bool feed_some() { - size_t word_size = _rgen_feeding.get(); - MetaWord* p = _fb.get(word_size); - if (p != nullptr) { - _freeblocks.add_block(p, word_size); - return true; - } - return false; - } - - bool deallocate_top() { - - allocation_t* a = _allocations; - if (a != nullptr) { - _allocations = a->next; - check_marked_range(a->p, a->word_size); - _freeblocks.add_block(a->p, a->word_size); - delete a; - DEBUG_ONLY(_freeblocks.verify();) - return true; - } - return false; - } - - void deallocate_all() { - while (deallocate_top()); - } - - bool allocate() { - - size_t word_size = MAX2(_rgen_allocations.get(), _freeblocks.MinWordSize); - MetaWord* p = _freeblocks.remove_block(word_size); - if (p != nullptr) { - _allocated_words.increment_by(word_size); - allocation_t* a = new allocation_t; - a->p = p; a->word_size = word_size; - a->next = _allocations; - _allocations = a; - DEBUG_ONLY(_freeblocks.verify();) - mark_range(p, word_size); - return true; - } - return false; - } - - void test_all_marked_ranges() { - for (allocation_t* a = _allocations; a != nullptr; a = a->next) { - check_marked_range(a->p, a->word_size); - } - } - - void test_loop() { - // We loop and in each iteration execute one of three operations: - // - allocation from fbl - // - deallocation to fbl of a previously allocated block - // - feeding a new larger block into the fbl (mimicks chunk retiring) - // When we have fed all large blocks into the fbl (feedbuffer empty), we - // switch to draining the fbl completely (only allocs) - bool forcefeed = false; - bool draining = false; - bool stop = false; - int iter = 25000; // safety stop - while (!stop && iter > 0) { - iter --; - int surprise = (int)os::random() % 10; - if (!draining && (surprise >= 7 || forcefeed)) { - forcefeed = false; - if (feed_some()) { - _num_feeds++; - } else { - // We fed all input memory into the fbl. Now lets proceed until the fbl is drained. - draining = true; - } - } else if (!draining && surprise < 1) { - deallocate_top(); - _num_deallocs++; - } else { - if (allocate()) { - _num_allocs++; - } else { - if (draining) { - stop = _freeblocks.total_size() < 512; - } else { - forcefeed = true; - } - } - } - if ((iter % 1000) == 0) { - DEBUG_ONLY(_freeblocks.verify();) - test_all_marked_ranges(); - LOG("a %d (" SIZE_FORMAT "), d %d, f %d", _num_allocs, _allocated_words.get(), _num_deallocs, _num_feeds); -#ifdef LOG_PLEASE - _freeblocks.print(tty, true); - tty->cr(); -#endif - } - } - - // Drain - - } - -public: - - FreeBlocksTest(size_t avg_alloc_size) : - _fb(512 * K), _freeblocks(), - _rgen_feeding(128, 4096), - _rgen_allocations(avg_alloc_size / 4, avg_alloc_size * 2, 0.01f, avg_alloc_size / 3, avg_alloc_size * 30), - _allocations(nullptr), - _num_allocs(0), - _num_deallocs(0), - _num_feeds(0) - { - CHECK_CONTENT(_freeblocks, 0, 0); - // some initial feeding - _freeblocks.add_block(_fb.get(1024), 1024); - CHECK_CONTENT(_freeblocks, 1, 1024); - } - - ~FreeBlocksTest() { - deallocate_all(); - } - - static void test_small_allocations() { - FreeBlocksTest test(10); - test.test_loop(); - } - - static void test_medium_allocations() { - FreeBlocksTest test(30); - test.test_loop(); - } - - static void test_large_allocations() { - FreeBlocksTest test(150); - test.test_loop(); - } - -}; - TEST_VM(metaspace, freeblocks_basics) { FreeBlocks fbl; MetaWord tmp[1024]; CHECK_CONTENT(fbl, 0, 0); - fbl.add_block(tmp, 1024); + MetaBlock bl(tmp, 1024); + fbl.add_block(bl); DEBUG_ONLY(fbl.verify();) ASSERT_FALSE(fbl.is_empty()); CHECK_CONTENT(fbl, 1, 1024); - MetaWord* p = fbl.remove_block(1024); - EXPECT_EQ(p, tmp); + MetaBlock bl2 = fbl.remove_block(1024); + ASSERT_EQ(bl, bl2); DEBUG_ONLY(fbl.verify();) CHECK_CONTENT(fbl, 0, 0); } - -TEST_VM(metaspace, freeblocks_small) { - FreeBlocksTest::test_small_allocations(); -} - -TEST_VM(metaspace, freeblocks_medium) { - FreeBlocksTest::test_medium_allocations(); -} - -TEST_VM(metaspace, freeblocks_large) { - FreeBlocksTest::test_large_allocations(); -} - diff --git a/test/hotspot/gtest/metaspace/test_metablock.cpp b/test/hotspot/gtest/metaspace/test_metablock.cpp new file mode 100644 index 00000000000..af103a2b462 --- /dev/null +++ b/test/hotspot/gtest/metaspace/test_metablock.cpp @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020 SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/metaspace/metablock.inline.hpp" +//#define LOG_PLEASE +#include "metaspaceGtestCommon.hpp" + +using metaspace::MetaBlock; + + +#define CHECK_BLOCK_EMPTY(block) { \ + EXPECT_TRUE(block.is_empty()); \ + DEBUG_ONLY(block.verify()); \ +} + +#define CHECK_BLOCK(block, expected_base, expected_size) { \ + EXPECT_EQ(block.base(), (MetaWord*)expected_base); \ + EXPECT_EQ((size_t)expected_size, block.word_size()); \ + EXPECT_EQ(block.end(), expected_base + expected_size); \ + DEBUG_ONLY(block.verify()); \ +} + +static constexpr uintptr_t large_pointer = NOT_LP64(0x99999990) LP64_ONLY(0x9999999999999990ULL); + +TEST(metaspace, MetaBlock_1) { + MetaBlock bl; + CHECK_BLOCK_EMPTY(bl); +} + +TEST(metaspace, MetaBlock_2) { + MetaWord* const p = (MetaWord*)large_pointer; + constexpr size_t s = G; + MetaBlock bl(p, s); + CHECK_BLOCK(bl, p, s); +} + +TEST(metaspace, MetaBlock_3) { + MetaWord* const p = (MetaWord*)large_pointer; + MetaBlock bl(p, 0); + CHECK_BLOCK_EMPTY(bl); +} + +TEST_VM(metaspace, MetaBlock_4) { + MetaWord* const p = (MetaWord*)large_pointer; + MetaBlock bl(p, G); + CHECK_BLOCK(bl, p, G); + + MetaBlock bl_copy = bl, bl2; + + bl2 = bl.split_off_tail(M); + CHECK_BLOCK(bl, p, G - M); + CHECK_BLOCK(bl2, p + G - M, M); + + bl = bl_copy; + +bl.print_on(tty); +bl2.print_on(tty); + bl2 = bl.split_off_tail(G); + bl.print_on(tty); + bl2.print_on(tty); + + ASSERT_EQ(bl2, bl_copy); + ASSERT_TRUE(bl.is_empty()); + + bl = bl_copy; + + bl2 = bl.split_off_tail(0); + ASSERT_EQ(bl, bl_copy); + ASSERT_TRUE(bl2.is_empty()); + + MetaBlock empty; + bl = empty.split_off_tail(0); + ASSERT_TRUE(bl.is_empty()); +} diff --git a/test/hotspot/gtest/metaspace/test_metaspaceUtils.cpp b/test/hotspot/gtest/metaspace/test_metaspaceUtils.cpp index 9f6ac9e988a..8a46f71be5d 100644 --- a/test/hotspot/gtest/metaspace/test_metaspaceUtils.cpp +++ b/test/hotspot/gtest/metaspace/test_metaspaceUtils.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -28,7 +28,7 @@ #include "memory/metaspaceUtils.hpp" #include "unittest.hpp" -TEST_VM(MetaspaceUtils, reserved) { +TEST_VM(metaspace, MetaspaceUtils_reserved) { size_t reserved = MetaspaceUtils::reserved_bytes(); EXPECT_GT(reserved, 0UL); @@ -37,7 +37,7 @@ TEST_VM(MetaspaceUtils, reserved) { EXPECT_LE(reserved_metadata, reserved); } -TEST_VM(MetaspaceUtils, reserved_compressed_class_pointers) { +TEST_VM(metaspace, MetaspaceUtils_reserved_compressed_class_pointers) { if (!UseCompressedClassPointers) { return; } @@ -49,7 +49,7 @@ TEST_VM(MetaspaceUtils, reserved_compressed_class_pointers) { EXPECT_LE(reserved_class, reserved); } -TEST_VM(MetaspaceUtils, committed) { +TEST_VM(metaspace, MetaspaceUtils_committed) { size_t committed = MetaspaceUtils::committed_bytes(); EXPECT_GT(committed, 0UL); @@ -61,7 +61,7 @@ TEST_VM(MetaspaceUtils, committed) { EXPECT_LE(committed_metadata, committed); } -TEST_VM(MetaspaceUtils, committed_compressed_class_pointers) { +TEST_VM(metaspace, MetaspaceUtils_committed_compressed_class_pointers) { if (!UseCompressedClassPointers) { return; } @@ -73,7 +73,7 @@ TEST_VM(MetaspaceUtils, committed_compressed_class_pointers) { EXPECT_LE(committed_class, committed); } -TEST_VM(MetaspaceUtils, non_compressed_class_pointers) { +TEST_VM(metaspace, MetaspaceUtils_non_compressed_class_pointers) { if (UseCompressedClassPointers) { return; } @@ -99,7 +99,7 @@ static void check_metaspace_stats_are_not_null(const MetaspaceStats& stats) { EXPECT_GT(stats.used(), 0UL); } -TEST_VM(MetaspaceUtils, get_statistics) { +TEST_VM(MetaspaceUtils, MetaspaceUtils_get_statistics) { MetaspaceCombinedStats combined_stats = MetaspaceUtils::get_combined_statistics(); check_metaspace_stats_are_not_null(combined_stats); check_metaspace_stats_are_consistent(combined_stats); diff --git a/test/hotspot/gtest/metaspace/test_metaspacearena.cpp b/test/hotspot/gtest/metaspace/test_metaspacearena.cpp index 2e5a6d40ce7..2f7627ccffe 100644 --- a/test/hotspot/gtest/metaspace/test_metaspacearena.cpp +++ b/test/hotspot/gtest/metaspace/test_metaspacearena.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2023 SAP SE. All rights reserved. + * Copyright (c) 2023 Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,98 +29,125 @@ #include "memory/metaspace/commitLimiter.hpp" #include "memory/metaspace/counters.hpp" #include "memory/metaspace/internalStats.hpp" +#include "memory/metaspace/freeBlocks.hpp" +#include "memory/metaspace/metablock.inline.hpp" #include "memory/metaspace/metaspaceArena.hpp" #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp" +#include "memory/metaspace/metachunkList.hpp" #include "memory/metaspace/metaspaceCommon.hpp" #include "memory/metaspace/metaspaceSettings.hpp" #include "memory/metaspace/metaspaceStatistics.hpp" +#include "memory/metaspace.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" -//#define LOG_PLEASE +#define LOG_PLEASE #include "metaspaceGtestCommon.hpp" #include "metaspaceGtestContexts.hpp" #include "metaspaceGtestRangeHelpers.hpp" -using metaspace::AllocationAlignmentByteSize; -using metaspace::ArenaGrowthPolicy; -using metaspace::CommitLimiter; -using metaspace::InternalStats; -using metaspace::MemRangeCounter; -using metaspace::MetaspaceArena; -using metaspace::SizeAtomicCounter; -using metaspace::Settings; -using metaspace::ArenaStats; +#define HANDLE_FAILURE \ + if (testing::Test::HasFailure()) { \ + return; \ + } + +namespace metaspace { + +class MetaspaceArenaTestFriend { + const MetaspaceArena* const _arena; +public: + MetaspaceArenaTestFriend(const MetaspaceArena* arena) : _arena(arena) {} + const MetachunkList& chunks() const { return _arena->_chunks; } + const FreeBlocks* fbl() const { return _arena->_fbl; } +}; class MetaspaceArenaTestHelper { MetaspaceGtestContext& _context; + const ArenaGrowthPolicy* const _growth_policy; - const ArenaGrowthPolicy* _growth_policy; - SizeAtomicCounter _used_words_counter; MetaspaceArena* _arena; - void initialize(const ArenaGrowthPolicy* growth_policy, const char* name = "gtest-MetaspaceArena") { - _growth_policy = growth_policy; - _arena = new MetaspaceArena(&_context.cm(), _growth_policy, &_used_words_counter, name); - DEBUG_ONLY(_arena->verify()); - } - public: - // Create a helper; growth policy for arena is determined by the given spacetype|class tupel - MetaspaceArenaTestHelper(MetaspaceGtestContext& helper, - Metaspace::MetaspaceType space_type, bool is_class, - const char* name = "gtest-MetaspaceArena") : - _context(helper) - { - initialize(ArenaGrowthPolicy::policy_for_space_type(space_type, is_class), name); - } - // Create a helper; growth policy is directly specified MetaspaceArenaTestHelper(MetaspaceGtestContext& helper, const ArenaGrowthPolicy* growth_policy, - const char* name = "gtest-MetaspaceArena") : - _context(helper) + size_t allocation_alignment_words = Metaspace::min_allocation_alignment_words) : + _context(helper), _growth_policy(growth_policy), _arena(nullptr) { - initialize(growth_policy, name); + _arena = new MetaspaceArena(_context.context(), _growth_policy, allocation_alignment_words, "gtest-MetaspaceArena"); + DEBUG_ONLY(_arena->verify()); + _context.inc_num_arenas_created(); } + + // Create a helper; growth policy for arena is determined by the given spacetype|class tupel + MetaspaceArenaTestHelper(MetaspaceGtestContext& helper, + Metaspace::MetaspaceType space_type, bool is_class, + size_t allocation_alignment_words = Metaspace::min_allocation_alignment_words) : + MetaspaceArenaTestHelper(helper, ArenaGrowthPolicy::policy_for_space_type(space_type, is_class), allocation_alignment_words) + {} + ~MetaspaceArenaTestHelper() { delete_arena_with_tests(); } - const CommitLimiter& limiter() const { return _context.commit_limiter(); } MetaspaceArena* arena() const { return _arena; } - SizeAtomicCounter& used_words_counter() { return _used_words_counter; } // Note: all test functions return void due to gtests limitation that we cannot use ASSERT // in non-void returning tests. void delete_arena_with_tests() { if (_arena != nullptr) { - size_t used_words_before = _used_words_counter.get(); - size_t committed_words_before = limiter().committed_words(); + size_t used_words_before = _context.used_words(); + size_t committed_words_before = _context.committed_words(); DEBUG_ONLY(_arena->verify()); delete _arena; _arena = nullptr; - size_t used_words_after = _used_words_counter.get(); - size_t committed_words_after = limiter().committed_words(); - ASSERT_0(used_words_after); + size_t used_words_after = _context.used_words(); + size_t committed_words_after = _context.committed_words(); + assert(_context.num_arenas_created() >= 1, "Sanity"); + if (_context.num_arenas_created() == 1) { + ASSERT_0(used_words_after); + } else { + ASSERT_LE(used_words_after, used_words_before); + } ASSERT_LE(committed_words_after, committed_words_before); } } void usage_numbers_with_test(size_t* p_used, size_t* p_committed, size_t* p_capacity) const { - _arena->usage_numbers(p_used, p_committed, p_capacity); - if (p_used != nullptr) { - if (p_committed != nullptr) { - ASSERT_GE(*p_committed, *p_used); - } - // Since we own the used words counter, it should reflect our usage number 1:1 - ASSERT_EQ(_used_words_counter.get(), *p_used); + size_t arena_used = 0, arena_committed = 0, arena_reserved = 0; + _arena->usage_numbers(&arena_used, &arena_committed, &arena_reserved); + EXPECT_GE(arena_committed, arena_used); + EXPECT_GE(arena_reserved, arena_committed); + + size_t context_used = _context.used_words(); + size_t context_committed = _context.committed_words(); + size_t context_reserved = _context.reserved_words(); + EXPECT_GE(context_committed, context_used); + EXPECT_GE(context_reserved, context_committed); + + // If only one arena uses the context, usage numbers must match. + if (_context.num_arenas_created() == 1) { + EXPECT_EQ(context_used, arena_used); + } else { + assert(_context.num_arenas_created() > 1, "Sanity"); + EXPECT_GE(context_used, arena_used); } - if (p_committed != nullptr && p_capacity != nullptr) { - ASSERT_GE(*p_capacity, *p_committed); + + // commit, reserve numbers don't have to match since free chunks may exist + EXPECT_GE(context_committed, arena_committed); + EXPECT_GE(context_reserved, arena_reserved); + + if (p_used) { + *p_used = arena_used; + } + if (p_committed) { + *p_committed = arena_committed; + } + if (p_capacity) { + *p_capacity = arena_reserved; } } @@ -142,23 +170,33 @@ public: ASSERT_NULL(dummy); } - // Allocate; it may or may not work; return value in *p_return_value void allocate_from_arena_with_tests(MetaWord** p_return_value, size_t word_size) { + MetaBlock result, wastage; + allocate_from_arena_with_tests(word_size, result, wastage); + if (wastage.is_nonempty()) { + _arena->deallocate(wastage); + wastage.reset(); + } + (*p_return_value) = result.base(); + } + + // Allocate; it may or may not work; return value in *p_return_value + void allocate_from_arena_with_tests(size_t word_size, MetaBlock& result, MetaBlock& wastage) { // Note: usage_numbers walks all chunks in use and counts. size_t used = 0, committed = 0, capacity = 0; usage_numbers_with_test(&used, &committed, &capacity); - size_t possible_expansion = limiter().possible_expansion_words(); + size_t possible_expansion = _context.commit_limiter().possible_expansion_words(); - MetaWord* p = _arena->allocate(word_size); + result = _arena->allocate(word_size, wastage); SOMETIMES(DEBUG_ONLY(_arena->verify();)) size_t used2 = 0, committed2 = 0, capacity2 = 0; usage_numbers_with_test(&used2, &committed2, &capacity2); - if (p == nullptr) { + if (result.is_empty()) { // Allocation failed. ASSERT_LT(possible_expansion, word_size); ASSERT_EQ(used, used2); @@ -166,7 +204,8 @@ public: ASSERT_EQ(capacity, capacity2); } else { // Allocation succeeded. Should be correctly aligned. - ASSERT_TRUE(is_aligned(p, AllocationAlignmentByteSize)); + ASSERT_TRUE(result.is_aligned_base(_arena->allocation_alignment_words())); + // used: may go up or may not (since our request may have been satisfied from the freeblocklist // whose content already counts as used). // committed: may go up, may not @@ -175,8 +214,6 @@ public: ASSERT_GE(committed2, committed); ASSERT_GE(capacity2, capacity); } - - *p_return_value = p; } // Allocate; it may or may not work; but caller does not care for the result value @@ -189,7 +226,7 @@ public: size_t used = 0, committed = 0, capacity = 0; usage_numbers_with_test(&used, &committed, &capacity); - _arena->deallocate(p, word_size); + _arena->deallocate(MetaBlock(p, word_size)); SOMETIMES(DEBUG_ONLY(_arena->verify();)) @@ -209,9 +246,13 @@ public: return stats; } + MetaspaceArenaTestFriend internal_access() const { + return MetaspaceArenaTestFriend (_arena); + } + // Convenience method to return number of chunks in arena (including current chunk) int get_number_of_chunks() const { - return get_arena_statistics().totals()._num; + return internal_access().chunks().count(); } }; @@ -391,23 +432,28 @@ TEST_VM(metaspace, MetaspaceArena_deallocate) { MetaWord* p1 = nullptr; helper.allocate_from_arena_with_tests_expect_success(&p1, s); + ASSERT_FALSE(HasFailure()); size_t used1 = 0, capacity1 = 0; helper.usage_numbers_with_test(&used1, nullptr, &capacity1); + ASSERT_FALSE(HasFailure()); ASSERT_EQ(used1, s); helper.deallocate_with_tests(p1, s); size_t used2 = 0, capacity2 = 0; helper.usage_numbers_with_test(&used2, nullptr, &capacity2); + ASSERT_FALSE(HasFailure()); ASSERT_EQ(used1, used2); ASSERT_EQ(capacity2, capacity2); MetaWord* p2 = nullptr; helper.allocate_from_arena_with_tests_expect_success(&p2, s); + ASSERT_FALSE(HasFailure()); size_t used3 = 0, capacity3 = 0; helper.usage_numbers_with_test(&used3, nullptr, &capacity3); + ASSERT_FALSE(HasFailure()); ASSERT_EQ(used3, used2); ASSERT_EQ(capacity3, capacity2); @@ -450,6 +496,7 @@ static void test_recover_from_commit_limit_hit() { helper1.allocate_from_arena_with_tests_expect_success(1); helper2.allocate_from_arena_with_tests_expect_success(1); allocated_from_1_and_2 += 2; + HANDLE_FAILURE } // Now, allocating from helper3, creep up on the limit @@ -494,11 +541,11 @@ static void test_controlled_growth(Metaspace::MetaspaceType type, bool is_class, // have different initial capacities. MetaspaceGtestContext context; - MetaspaceArenaTestHelper smhelper(context, type, is_class, "Grower"); + MetaspaceArenaTestHelper smhelper(context, type, is_class); const Metaspace::MetaspaceType other_type = (type == Metaspace::StandardMetaspaceType) ? Metaspace::ClassMirrorHolderMetaspaceType : Metaspace::StandardMetaspaceType; - MetaspaceArenaTestHelper smhelper_harrasser(context, other_type, true, "Harasser"); + MetaspaceArenaTestHelper smhelper_harrasser(context, other_type, true); size_t used = 0, committed = 0, capacity = 0; const size_t alloc_words = 16; @@ -561,12 +608,14 @@ static void test_controlled_growth(Metaspace::MetaspaceType type, bool is_class, } smhelper.allocate_from_arena_with_tests_expect_success(alloc_words); + HANDLE_FAILURE words_allocated += metaspace::get_raw_word_size_for_requested_word_size(alloc_words); num_allocated++; size_t used2 = 0, committed2 = 0, capacity2 = 0; smhelper.arena()->usage_numbers(&used2, &committed2, &capacity2); + HANDLE_FAILURE // used should not grow larger than what we allocated, plus possible overhead. ASSERT_GE(used2, used); @@ -608,6 +657,10 @@ static void test_controlled_growth(Metaspace::MetaspaceType type, bool is_class, } + // No FBL should exist, we did not deallocate + ASSERT_EQ(smhelper.internal_access().fbl(), (FreeBlocks*)nullptr); + ASSERT_EQ(smhelper_harrasser.internal_access().fbl(), (FreeBlocks*)nullptr); + // After all this work, we should see an increase in number of chunk-in-place-enlargements // (this especially is vulnerable to regression: the decisions of when to do in-place-enlargements are somewhat // complicated, see MetaspaceArena::attempt_enlarge_current_chunk()) @@ -691,7 +744,8 @@ TEST_VM(metaspace, MetaspaceArena_growth_boot_nc_not_inplace) { // block should be reused by the next allocation). static void test_repeatedly_allocate_and_deallocate(bool is_topmost) { // Test various sizes, including (important) the max. possible block size = 1 root chunk - for (size_t blocksize = Metaspace::max_allocation_word_size(); blocksize >= 1; blocksize /= 2) { + for (size_t blocksize = Metaspace::max_allocation_word_size(); + blocksize >= Metaspace::min_allocation_word_size; blocksize /= 2) { size_t used1 = 0, used2 = 0, committed1 = 0, committed2 = 0; MetaWord* p = nullptr, *p2 = nullptr; @@ -703,6 +757,7 @@ static void test_repeatedly_allocate_and_deallocate(bool is_topmost) { if (!is_topmost) { // another one on top, size does not matter. helper.allocate_from_arena_with_tests_expect_success(0x10); + HANDLE_FAILURE } // Measure @@ -712,6 +767,7 @@ static void test_repeatedly_allocate_and_deallocate(bool is_topmost) { for (int i = 0; i < 5; i ++) { helper.deallocate_with_tests(p, blocksize); helper.allocate_from_arena_with_tests_expect_success(&p2, blocksize); + HANDLE_FAILURE // We should get the same pointer back. EXPECT_EQ(p2, p); } @@ -730,3 +786,120 @@ TEST_VM(metaspace, MetaspaceArena_test_repeatedly_allocate_and_deallocate_top_al TEST_VM(metaspace, MetaspaceArena_test_repeatedly_allocate_and_deallocate_nontop_allocation) { test_repeatedly_allocate_and_deallocate(false); } + +static void test_random_aligned_allocation(size_t arena_alignment_words, SizeRange range) { + // We let the arena use 4K chunks, unless the alloc size is larger. + chunklevel_t level = CHUNK_LEVEL_4K; + const ArenaGrowthPolicy policy (&level, 1); + const size_t chunk_word_size = word_size_for_level(level); + + size_t expected_used = 0; + + MetaspaceGtestContext context; + MetaspaceArenaTestHelper helper(context, &policy, arena_alignment_words); + + size_t last_alloc_size = 0; + unsigned num_allocations = 0; + + const size_t max_used = MIN2(MAX2(chunk_word_size * 10, (range.highest() * 100)), + LP64_ONLY(64) NOT_LP64(16) * M); // word size! + while (expected_used < max_used) { + + const int chunks_before = helper.get_number_of_chunks(); + + MetaBlock result, wastage; + size_t alloc_words = range.random_value(); + NOT_LP64(alloc_words = align_up(alloc_words, Metaspace::min_allocation_alignment_words)); + helper.allocate_from_arena_with_tests(alloc_words, result, wastage); + + ASSERT_TRUE(result.is_nonempty()); + ASSERT_TRUE(result.is_aligned_base(arena_alignment_words)); + ASSERT_EQ(result.word_size(), alloc_words); + + expected_used += alloc_words + wastage.word_size(); + const int chunks_now = helper.get_number_of_chunks(); + ASSERT_GE(chunks_now, chunks_before); + ASSERT_LE(chunks_now, chunks_before + 1); + + // Estimate wastage: + // Guessing at wastage is somewhat simple since we don't expect to ever use the fbl (we + // don't deallocate). Therefore, wastage can only be caused by alignment gap or by + // salvaging an old chunk before a new chunk is added. + const bool expect_alignment_gap = !is_aligned(last_alloc_size, arena_alignment_words); + const bool new_chunk_added = chunks_now > chunks_before; + + if (num_allocations == 0) { + // expect no wastage if its the first allocation in the arena + ASSERT_TRUE(wastage.is_empty()); + } else { + if (expect_alignment_gap) { + // expect wastage if the alignment requires it + ASSERT_TRUE(wastage.is_nonempty()); + } + } + + if (wastage.is_nonempty()) { + // If we have wastage, we expect it to be either too small or unaligned. That would not be true + // for wastage from the fbl, which could have any size; however, in this test we don't deallocate, + // so we don't expect wastage from the fbl. + if (wastage.is_aligned_base(arena_alignment_words)) { + ASSERT_LT(wastage.word_size(), alloc_words); + } + if (new_chunk_added) { + // chunk turnover: no more wastage than size of a commit granule, since we salvage the + // committed remainder of the old chunk. + ASSERT_LT(wastage.word_size(), Settings::commit_granule_words()); + } else { + // No chunk turnover: no more wastage than what alignment requires. + ASSERT_LT(wastage.word_size(), arena_alignment_words); + } + } + + // Check stats too + size_t used, committed, reserved; + helper.usage_numbers_with_test(&used, &committed, &reserved); + ASSERT_EQ(used, expected_used); + + // No FBL should exist, we did not deallocate + ASSERT_EQ(helper.internal_access().fbl(), (FreeBlocks*)nullptr); + + HANDLE_FAILURE + + last_alloc_size = alloc_words; + num_allocations ++; + } + LOG("allocs: %u", num_allocations); +} + +#define TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(al) \ +TEST_VM(metaspace, MetaspaceArena_test_random_small_aligned_allocation_##al) { \ + static const SizeRange range(Metaspace::min_allocation_word_size, 128); \ + test_random_aligned_allocation(al, range); \ +} + +#ifdef _LP64 +TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(1); +#endif +TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(2); +TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(8); +TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(32); +TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(128); +TEST_ARENA_WITH_ALIGNMENT_SMALL_RANGE(MIN_CHUNK_WORD_SIZE); + +#define TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(al) \ +TEST_VM(metaspace, MetaspaceArena_test_random_large_aligned_allocation_##al) { \ + static const SizeRange range(Metaspace::max_allocation_word_size() / 2, \ + Metaspace::max_allocation_word_size()); \ + test_random_aligned_allocation(al, range); \ +} + +#ifdef _LP64 +TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(1); +#endif +TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(2); +TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(8); +TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(32); +TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(128); +TEST_ARENA_WITH_ALIGNMENT_LARGE_RANGE(MIN_CHUNK_WORD_SIZE); + +} // namespace metaspace diff --git a/test/hotspot/gtest/metaspace/test_metaspacearena_stress.cpp b/test/hotspot/gtest/metaspace/test_metaspacearena_stress.cpp index bb536dfd0e2..950f21fb92f 100644 --- a/test/hotspot/gtest/metaspace/test_metaspacearena_stress.cpp +++ b/test/hotspot/gtest/metaspace/test_metaspacearena_stress.cpp @@ -26,8 +26,10 @@ #include "precompiled.hpp" #include "memory/metaspace/chunkManager.hpp" #include "memory/metaspace/counters.hpp" +#include "memory/metaspace/metablock.hpp" #include "memory/metaspace/metaspaceArena.hpp" #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp" +#include "memory/metaspace/metaspaceContext.hpp" #include "memory/metaspace/metaspaceSettings.hpp" #include "memory/metaspace/metaspaceStatistics.hpp" #include "runtime/mutexLocker.hpp" @@ -43,7 +45,9 @@ using metaspace::ArenaGrowthPolicy; using metaspace::ChunkManager; using metaspace::IntCounter; using metaspace::MemRangeCounter; +using metaspace::MetaBlock; using metaspace::MetaspaceArena; +using metaspace::MetaspaceContext; using metaspace::SizeAtomicCounter; using metaspace::ArenaStats; using metaspace::InUseChunkStats; @@ -124,16 +128,14 @@ public: MetaspaceArena* arena() { return _arena; } - MetaspaceArenaTestBed(ChunkManager* cm, const ArenaGrowthPolicy* alloc_sequence, - SizeAtomicCounter* used_words_counter, SizeRange allocation_range) : - _arena(nullptr), - _allocation_range(allocation_range), - _size_of_last_failed_allocation(0), - _allocations(nullptr), - _alloc_count(), - _dealloc_count() + MetaspaceArenaTestBed(MetaspaceContext* context, const ArenaGrowthPolicy* growth_policy, + size_t allocation_alignment_words, SizeRange allocation_range) + : _arena(nullptr) + , _allocation_range(allocation_range) + , _size_of_last_failed_allocation(0) + , _allocations(nullptr) { - _arena = new MetaspaceArena(cm, alloc_sequence, used_words_counter, "gtest-MetaspaceArenaTestBed-sm"); + _arena = new MetaspaceArena(context, growth_policy, Metaspace::min_allocation_alignment_words, "gtest-MetaspaceArenaTestBed-sm"); } ~MetaspaceArenaTestBed() { @@ -163,13 +165,20 @@ public: // Allocate a random amount. Return false if the allocation failed. bool checked_random_allocate() { size_t word_size = 1 + _allocation_range.random_value(); - MetaWord* p = _arena->allocate(word_size); - if (p != nullptr) { - EXPECT_TRUE(is_aligned(p, AllocationAlignmentByteSize)); + MetaBlock wastage; + MetaBlock bl = _arena->allocate(word_size, wastage); + // We only expect wastage if either alignment was not met or the chunk remainder + // was not large enough. + if (wastage.is_nonempty()) { + _arena->deallocate(wastage); + wastage.reset(); + } + if (bl.is_nonempty()) { + EXPECT_TRUE(is_aligned(bl.base(), AllocationAlignmentByteSize)); allocation_t* a = NEW_C_HEAP_OBJ(allocation_t, mtInternal); a->word_size = word_size; - a->p = p; + a->p = bl.base(); a->mark(); a->next = _allocations; _allocations = a; @@ -193,7 +202,7 @@ public: } if (a != nullptr && a->p != nullptr) { a->verify(); - _arena->deallocate(a->p, a->word_size); + _arena->deallocate(MetaBlock(a->p, a->word_size)); _dealloc_count.add(a->word_size); a->p = nullptr; a->word_size = 0; if ((_dealloc_count.count() % 20) == 0) { @@ -218,8 +227,8 @@ class MetaspaceArenaTest { void create_new_test_bed_at(int slotindex, const ArenaGrowthPolicy* growth_policy, SizeRange allocation_range) { DEBUG_ONLY(_testbeds.check_slot_is_null(slotindex)); - MetaspaceArenaTestBed* bed = new MetaspaceArenaTestBed(&_context.cm(), growth_policy, - &_used_words_counter, allocation_range); + MetaspaceArenaTestBed* bed = new MetaspaceArenaTestBed(_context.context(), growth_policy, + Metaspace::min_allocation_alignment_words, allocation_range); _testbeds.set_at(slotindex, bed); _num_beds.increment(); } diff --git a/test/hotspot/gtest/oops/test_arrayOop.cpp b/test/hotspot/gtest/oops/test_arrayOop.cpp index e67e6e6c13b..5670aedafc1 100644 --- a/test/hotspot/gtest/oops/test_arrayOop.cpp +++ b/test/hotspot/gtest/oops/test_arrayOop.cpp @@ -82,7 +82,23 @@ TEST_VM(arrayOopDesc, narrowOop) { TEST_VM(arrayOopDesc, base_offset) { #ifdef _LP64 - if (UseCompressedClassPointers) { + if (UseCompactObjectHeaders) { + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BOOLEAN), 12); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BYTE), 12); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_SHORT), 12); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_CHAR), 12); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_INT), 12); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_FLOAT), 12); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_LONG), 16); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_DOUBLE), 16); + if (UseCompressedOops) { + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_OBJECT), 12); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_ARRAY), 12); + } else { + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_OBJECT), 16); + EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_ARRAY), 16); + } + } else if (UseCompressedClassPointers) { EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BOOLEAN), 16); EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_BYTE), 16); EXPECT_EQ(arrayOopDesc::base_offset_in_bytes(T_SHORT), 16); diff --git a/test/hotspot/gtest/oops/test_compressedKlass.cpp b/test/hotspot/gtest/oops/test_compressedKlass.cpp index b2fc5064581..48026f46852 100644 --- a/test/hotspot/gtest/oops/test_compressedKlass.cpp +++ b/test/hotspot/gtest/oops/test_compressedKlass.cpp @@ -36,6 +36,7 @@ TEST_VM(CompressedKlass, basics) { ASSERT_LE(CompressedKlassPointers::base(), CompressedKlassPointers::klass_range_start()); ASSERT_LT(CompressedKlassPointers::klass_range_start(), CompressedKlassPointers::klass_range_end()); ASSERT_LE(CompressedKlassPointers::klass_range_end(), CompressedKlassPointers::encoding_range_end()); + switch (CompressedKlassPointers::shift()) { case 0: ASSERT_EQ(CompressedKlassPointers::encoding_range_end() - CompressedKlassPointers::base(), (ptrdiff_t)(4 * G)); @@ -44,10 +45,23 @@ TEST_VM(CompressedKlass, basics) { ASSERT_EQ(CompressedKlassPointers::encoding_range_end() - CompressedKlassPointers::base(), (ptrdiff_t)(32 * G)); break; default: - ShouldNotReachHere(); + const size_t expected_size = nth_bit(CompressedKlassPointers::narrow_klass_pointer_bits() + CompressedKlassPointers::shift()); + ASSERT_EQ(CompressedKlassPointers::encoding_range_end() - CompressedKlassPointers::base(), (ptrdiff_t)expected_size); } } +TEST_VM(CompressedKlass, ccp_off) { + if (UseCompressedClassPointers) { + return; + } + ASSERT_EQ(CompressedKlassPointers::klass_range_start(), (address)nullptr); + ASSERT_EQ(CompressedKlassPointers::klass_range_end(), (address)nullptr); + // We should be able to call CompressedKlassPointers::is_encodable, and it should + // always return false + ASSERT_FALSE(CompressedKlassPointers::is_encodable((address)0x12345)); +} + + TEST_VM(CompressedKlass, test_too_low_address) { if (!UseCompressedClassPointers) { return; @@ -68,12 +82,29 @@ TEST_VM(CompressedKlass, test_too_high_address) { ASSERT_FALSE(CompressedKlassPointers::is_encodable(high)); } +TEST_VM(CompressedKlass, test_unaligned_address) { + if (!UseCompressedClassPointers) { + return; + } + const size_t alignment = CompressedKlassPointers::klass_alignment_in_bytes(); + address addr = CompressedKlassPointers::klass_range_start() + alignment - 1; + ASSERT_FALSE(CompressedKlassPointers::is_encodable(addr)); + // Try word-aligned, but not sufficiently aligned + if (alignment > BytesPerWord) { + addr = CompressedKlassPointers::klass_range_start() + BytesPerWord; + ASSERT_FALSE(CompressedKlassPointers::is_encodable(addr)); + } + addr = CompressedKlassPointers::klass_range_end() - 1; + ASSERT_FALSE(CompressedKlassPointers::is_encodable(addr)); +} + TEST_VM(CompressedKlass, test_good_address) { if (!UseCompressedClassPointers) { return; } + const size_t alignment = CompressedKlassPointers::klass_alignment_in_bytes(); address addr = CompressedKlassPointers::klass_range_start(); ASSERT_TRUE(CompressedKlassPointers::is_encodable(addr)); - addr = CompressedKlassPointers::klass_range_end() - 1; + addr = CompressedKlassPointers::klass_range_end() - alignment; ASSERT_TRUE(CompressedKlassPointers::is_encodable(addr)); } diff --git a/test/hotspot/gtest/oops/test_objArrayOop.cpp b/test/hotspot/gtest/oops/test_objArrayOop.cpp index 60cf6242dd5..deb4919ce46 100644 --- a/test/hotspot/gtest/oops/test_objArrayOop.cpp +++ b/test/hotspot/gtest/oops/test_objArrayOop.cpp @@ -28,29 +28,36 @@ TEST_VM(objArrayOop, osize) { static const struct { - int objal; bool ccp; bool coops; int result; + int objal; bool ccp; bool coops; bool coh; int result; } x[] = { -// ObjAligInB, UseCCP, UseCoops, object size in heap words +// ObjAligInB, UseCCP, UseCoops, UseCOH, object size in heap words #ifdef _LP64 - { 8, false, false, 4 }, // 20 byte header, 8 byte oops - { 8, false, true, 3 }, // 20 byte header, 4 byte oops - { 8, true, false, 3 }, // 16 byte header, 8 byte oops - { 8, true, true, 3 }, // 16 byte header, 4 byte oops - { 16, false, false, 4 }, // 20 byte header, 8 byte oops, 16-byte align - { 16, false, true, 4 }, // 20 byte header, 4 byte oops, 16-byte align - { 16, true, false, 4 }, // 16 byte header, 8 byte oops, 16-byte align - { 16, true, true, 4 }, // 16 byte header, 4 byte oops, 16-byte align - { 256, false, false, 32 }, // 20 byte header, 8 byte oops, 256-byte align - { 256, false, true, 32 }, // 20 byte header, 4 byte oops, 256-byte align - { 256, true, false, 32 }, // 16 byte header, 8 byte oops, 256-byte align - { 256, true, true, 32 }, // 16 byte header, 4 byte oops, 256-byte align + { 8, false, false, false, 4 }, // 20 byte header, 8 byte oops + { 8, false, true, false, 3 }, // 20 byte header, 4 byte oops + { 8, true, false, false, 3 }, // 16 byte header, 8 byte oops + { 8, true, true, false, 3 }, // 16 byte header, 4 byte oops + { 8, true, false, true, 3 }, // 12 byte header, 8 byte oops + { 8, true, true, true, 2 }, // 12 byte header, 4 byte oops + { 16, false, false, false, 4 }, // 20 byte header, 8 byte oops, 16-byte align + { 16, false, true, false, 4 }, // 20 byte header, 4 byte oops, 16-byte align + { 16, true, false, false, 4 }, // 16 byte header, 8 byte oops, 16-byte align + { 16, true, true, false, 4 }, // 16 byte header, 4 byte oops, 16-byte align + { 16, true, false, true, 4 }, // 12 byte header, 8 byte oops, 16-byte align + { 16, true, true, true, 2 }, // 12 byte header, 4 byte oops, 16-byte align + { 256, false, false, false, 32 }, // 20 byte header, 8 byte oops, 256-byte align + { 256, false, true, false, 32 }, // 20 byte header, 4 byte oops, 256-byte align + { 256, true, false, false, 32 }, // 16 byte header, 8 byte oops, 256-byte align + { 256, true, true, false, 32 }, // 16 byte header, 4 byte oops, 256-byte align + { 256, true, false, true, 32 }, // 12 byte header, 8 byte oops, 256-byte align + { 256, true, true, true, 32 }, // 12 byte header, 4 byte oops, 256-byte align #else - { 8, false, false, 4 }, // 12 byte header, 4 byte oops, wordsize 4 + { 8, false, false, false, 4 }, // 12 byte header, 4 byte oops, wordsize 4 #endif - { -1, false, false, -1 } + { -1, false, false, false, -1 } }; for (int i = 0; x[i].result != -1; i++) { - if (x[i].objal == (int)ObjectAlignmentInBytes && x[i].ccp == UseCompressedClassPointers && x[i].coops == UseCompressedOops) { + if (x[i].objal == (int)ObjectAlignmentInBytes && x[i].ccp == UseCompressedClassPointers && x[i].coops == UseCompressedOops && + x[i].coh == UseCompactObjectHeaders) { EXPECT_EQ(objArrayOopDesc::object_size(1), (size_t)x[i].result); } } diff --git a/test/hotspot/gtest/oops/test_typeArrayOop.cpp b/test/hotspot/gtest/oops/test_typeArrayOop.cpp index a7565a23d58..b8c55867077 100644 --- a/test/hotspot/gtest/oops/test_typeArrayOop.cpp +++ b/test/hotspot/gtest/oops/test_typeArrayOop.cpp @@ -36,7 +36,11 @@ TEST_VM(typeArrayOopDesc, bool_at_put) { char* addr = align_up(mem, 16); typeArrayOop o = (typeArrayOop) cast_to_oop(addr); - o->set_klass(Universe::boolArrayKlass()); + if (UseCompactObjectHeaders) { + o->set_mark(Universe::boolArrayKlass()->prototype_header()); + } else { + o->set_klass(Universe::boolArrayKlass()); + } o->set_length(10); diff --git a/test/hotspot/jtreg/ProblemList.txt b/test/hotspot/jtreg/ProblemList.txt index 000f26d71c6..ab4b44c9dd0 100644 --- a/test/hotspot/jtreg/ProblemList.txt +++ b/test/hotspot/jtreg/ProblemList.txt @@ -114,6 +114,10 @@ runtime/StackGuardPages/TestStackGuardPagesNative.java 8303612 linux-all runtime/ErrorHandling/MachCodeFramesInErrorFile.java 8313315 linux-ppc64le runtime/cds/appcds/customLoader/HelloCustom_JFR.java 8241075 linux-all,windows-x64 +# Fails with +UseCompactObjectHeaders on aarch64 +runtime/cds/appcds/SharedBaseAddress.java 8340212 linux-aarch64,macosx-aarch64 +runtime/cds/SharedBaseAddress.java 8340212 linux-aarch64,macosx-aarch64 + applications/jcstress/copy.java 8229852 linux-all containers/docker/TestJcmd.java 8278102 linux-all diff --git a/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationMismatchedAccess.java b/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationMismatchedAccess.java index 2d17753ba94..b511476bf52 100644 --- a/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationMismatchedAccess.java +++ b/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationMismatchedAccess.java @@ -153,6 +153,8 @@ public class TestVectorizationMismatchedAccess { @Test @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }, applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"}, applyIfPlatform = {"64-bit", "true"}) // 32-bit: offsets are badly aligned (UNSAFE.ARRAY_BYTE_BASE_OFFSET is 4 byte aligned, but not 8 byte aligned). @@ -165,6 +167,8 @@ public class TestVectorizationMismatchedAccess { @Test @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }, applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"}, applyIfPlatform = {"64-bit", "true"}) // 32-bit: address has ConvL2I for cast of long to address, not supported. @@ -176,6 +180,8 @@ public class TestVectorizationMismatchedAccess { @Test @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }, applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"}) public static void testByteLong1c(byte[] dest, long[] src) { long base = 64; // make sure it is big enough and 8 byte aligned (required for 32-bit) @@ -186,6 +192,8 @@ public class TestVectorizationMismatchedAccess { @Test @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }, applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"}, applyIfPlatform = {"64-bit", "true"}) // 32-bit: address has ConvL2I for cast of long to address, not supported. @@ -235,6 +243,8 @@ public class TestVectorizationMismatchedAccess { @Test @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }, applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"}, applyIfPlatform = {"64-bit", "true"}) // 32-bit: offsets are badly aligned (UNSAFE.ARRAY_BYTE_BASE_OFFSET is 4 byte aligned, but not 8 byte aligned). @@ -247,6 +257,8 @@ public class TestVectorizationMismatchedAccess { @Test @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }, applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"}, applyIfPlatform = {"64-bit", "true"}) // 32-bit: address has ConvL2I for cast of long to address, not supported. @@ -298,6 +310,8 @@ public class TestVectorizationMismatchedAccess { @Test @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }, applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"}, applyIfPlatform = {"64-bit", "true"}) // 32-bit: offsets are badly aligned (UNSAFE.ARRAY_BYTE_BASE_OFFSET is 4 byte aligned, but not 8 byte aligned). @@ -310,6 +324,8 @@ public class TestVectorizationMismatchedAccess { @Test @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }, applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"}, applyIfPlatform = {"64-bit", "true"}) // 32-bit: address has ConvL2I for cast of long to address, not supported. @@ -328,6 +344,8 @@ public class TestVectorizationMismatchedAccess { @Test @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }, applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"}, applyIfPlatform = {"64-bit", "true"}) // 32-bit: offsets are badly aligned (UNSAFE.ARRAY_BYTE_BASE_OFFSET is 4 byte aligned, but not 8 byte aligned). @@ -340,6 +358,8 @@ public class TestVectorizationMismatchedAccess { @Test @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }, applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"}, applyIfPlatform = {"64-bit", "true"}) // 32-bit: address has ConvL2I for cast of long to address, not supported. @@ -357,6 +377,8 @@ public class TestVectorizationMismatchedAccess { @Test @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }, applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"}, applyIfPlatform = {"64-bit", "true"}) // 32-bit: offsets are badly aligned (UNSAFE.ARRAY_BYTE_BASE_OFFSET is 4 byte aligned, but not 8 byte aligned). @@ -369,6 +391,8 @@ public class TestVectorizationMismatchedAccess { @Test @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }, applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"}, applyIfPlatform = {"64-bit", "true"}) // 32-bit: address has ConvL2I for cast of long to address, not supported. diff --git a/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationNotRun.java b/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationNotRun.java index 5968b7221c7..d61b8c658d6 100644 --- a/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationNotRun.java +++ b/test/hotspot/jtreg/compiler/c2/irTests/TestVectorizationNotRun.java @@ -51,7 +51,9 @@ public class TestVectorizationNotRun { static long[] longArray = new long[size]; @Test - @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }) + @IR(counts = { IRNode.LOAD_VECTOR_L, ">=1", IRNode.STORE_VECTOR, ">=1" }, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }) public static void test(byte[] dest, long[] src) { for (int i = 0; i < src.length; i++) { if ((i < 0) || (8 > sizeBytes - i)) { diff --git a/test/hotspot/jtreg/compiler/intrinsics/bmi/BMITestRunner.java b/test/hotspot/jtreg/compiler/intrinsics/bmi/BMITestRunner.java index 159e471e3fc..b005d767287 100644 --- a/test/hotspot/jtreg/compiler/intrinsics/bmi/BMITestRunner.java +++ b/test/hotspot/jtreg/compiler/intrinsics/bmi/BMITestRunner.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -122,6 +122,10 @@ public class BMITestRunner { List vmOpts = new LinkedList(); Collections.addAll(vmOpts, additionalVMOpts); + // Hide timestamps from warnings (e.g. due to potential CDS + // saved/runtime state mismatch), to avoid false positives when + // comparing output across runs. + vmOpts.add("-Xlog:all=warning:stdout:level,tags"); //setup mode-specific options switch (testVMMode) { diff --git a/test/hotspot/jtreg/compiler/lib/ir_framework/TestFramework.java b/test/hotspot/jtreg/compiler/lib/ir_framework/TestFramework.java index d477aa44763..caef911f73a 100644 --- a/test/hotspot/jtreg/compiler/lib/ir_framework/TestFramework.java +++ b/test/hotspot/jtreg/compiler/lib/ir_framework/TestFramework.java @@ -142,7 +142,8 @@ public class TestFramework { "UseZbb", "UseRVV", "Xlog", - "LogCompilation" + "LogCompilation", + "UseCompactObjectHeaders" ) ); diff --git a/test/hotspot/jtreg/compiler/loopopts/superword/TestAlignVector.java b/test/hotspot/jtreg/compiler/loopopts/superword/TestAlignVector.java index c77f4f6fa2e..efd328dc5cc 100644 --- a/test/hotspot/jtreg/compiler/loopopts/superword/TestAlignVector.java +++ b/test/hotspot/jtreg/compiler/loopopts/superword/TestAlignVector.java @@ -398,6 +398,7 @@ public class TestAlignVector { @IR(counts = {IRNode.LOAD_VECTOR_B, "> 0", IRNode.AND_VB, "> 0", IRNode.STORE_VECTOR, "> 0"}, + applyIf = {"UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test1(byte[] a, byte[] b, byte mask) { @@ -706,7 +707,7 @@ public class TestAlignVector { @IR(counts = {IRNode.LOAD_VECTOR_S, IRNode.VECTOR_SIZE_4, "> 0", IRNode.AND_VS, IRNode.VECTOR_SIZE_4, "> 0", IRNode.STORE_VECTOR, "> 0"}, - applyIf = {"MaxVectorSize", ">=16"}, + applyIfAnd = {"MaxVectorSize", ">=16", "UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test10d(short[] a, short[] b, short mask) { @@ -1001,6 +1002,7 @@ public class TestAlignVector { IRNode.ADD_VB, "> 0", IRNode.ADD_VI, "> 0", IRNode.STORE_VECTOR, "> 0"}, + applyIf = {"UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test13aIB(int[] a, byte[] b) { @@ -1017,6 +1019,7 @@ public class TestAlignVector { IRNode.ADD_VI, "> 0", IRNode.ADD_VS, "> 0", IRNode.STORE_VECTOR, "> 0"}, + applyIf = {"UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test13aIS(int[] a, short[] b) { @@ -1037,6 +1040,7 @@ public class TestAlignVector { IRNode.ADD_VI, "> 0", IRNode.ADD_VL, "> 0", IRNode.STORE_VECTOR, "> 0"}, + applyIf = {"UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test13aBSIL(byte[] a, short[] b, int[] c, long[] d) { @@ -1072,6 +1076,7 @@ public class TestAlignVector { IRNode.ADD_VB, "> 0", IRNode.ADD_VI, "> 0", IRNode.STORE_VECTOR, "> 0"}, + applyIf = {"UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test13bIB(int[] a, byte[] b) { @@ -1088,6 +1093,7 @@ public class TestAlignVector { IRNode.ADD_VI, "> 0", IRNode.ADD_VS, "> 0", IRNode.STORE_VECTOR, "> 0"}, + applyIf = {"UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test13bIS(int[] a, short[] b) { @@ -1108,6 +1114,7 @@ public class TestAlignVector { IRNode.ADD_VI, "> 0", IRNode.ADD_VL, "> 0", IRNode.STORE_VECTOR, "> 0"}, + applyIf = {"UseCompactObjectHeaders", "false"}, applyIfPlatform = {"64-bit", "true"}, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"}) static Object[] test13bBSIL(byte[] a, short[] b, int[] c, long[] d) { diff --git a/test/hotspot/jtreg/compiler/loopopts/superword/TestMulAddS2I.java b/test/hotspot/jtreg/compiler/loopopts/superword/TestMulAddS2I.java index fb99fc5983a..9aaa7cdd8a9 100644 --- a/test/hotspot/jtreg/compiler/loopopts/superword/TestMulAddS2I.java +++ b/test/hotspot/jtreg/compiler/loopopts/superword/TestMulAddS2I.java @@ -163,11 +163,13 @@ public class TestMulAddS2I { @Test @IR(applyIfCPUFeature = {"sse2", "true"}, applyIfPlatform = {"64-bit", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"asimd", "true"}, - applyIf = {"MaxVectorSize", "16"}, // AD file requires vector_length = 16 + applyIfAnd = {"MaxVectorSize", "16", "UseCompactObjectHeaders", "false"}, // AD file requires vector_length = 16 counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"avx512_vnni", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI_VNNI, "> 0"}) public static int[] testd(int[] out) { for (int i = 0; i < ITER-2; i+=2) { @@ -181,11 +183,13 @@ public class TestMulAddS2I { @Test @IR(applyIfCPUFeature = {"sse2", "true"}, applyIfPlatform = {"64-bit", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"asimd", "true"}, - applyIf = {"MaxVectorSize", "16"}, // AD file requires vector_length = 16 + applyIfAnd = {"MaxVectorSize", "16", "UseCompactObjectHeaders", "false" }, // AD file requires vector_length = 16 counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"avx512_vnni", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI_VNNI, "> 0"}) public static int[] teste(int[] out) { for (int i = 0; i < ITER-2; i+=2) { @@ -199,11 +203,13 @@ public class TestMulAddS2I { @Test @IR(applyIfCPUFeature = {"sse2", "true"}, applyIfPlatform = {"64-bit", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"asimd", "true"}, - applyIf = {"MaxVectorSize", "16"}, // AD file requires vector_length = 16 + applyIfAnd = {"MaxVectorSize", "16", "UseCompactObjectHeaders", "false" }, // AD file requires vector_length = 16 counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"avx512_vnni", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI_VNNI, "> 0"}) public static int[] testf(int[] out) { for (int i = 0; i < ITER-2; i+=2) { @@ -217,11 +223,13 @@ public class TestMulAddS2I { @Test @IR(applyIfCPUFeature = {"sse2", "true"}, applyIfPlatform = {"64-bit", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"asimd", "true"}, - applyIf = {"MaxVectorSize", "16"}, // AD file requires vector_length = 16 + applyIfAnd = {"MaxVectorSize", "16", "UseCompactObjectHeaders", "false" }, // AD file requires vector_length = 16 counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"avx512_vnni", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI_VNNI, "> 0"}) public static int[] testg(int[] out) { for (int i = 0; i < ITER-2; i+=2) { @@ -235,11 +243,13 @@ public class TestMulAddS2I { @Test @IR(applyIfCPUFeature = {"sse2", "true"}, applyIfPlatform = {"64-bit", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"asimd", "true"}, - applyIf = {"MaxVectorSize", "16"}, // AD file requires vector_length = 16 + applyIfAnd = {"MaxVectorSize", "16", "UseCompactObjectHeaders", "false" }, // AD file requires vector_length = 16 counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI, "> 0"}) @IR(applyIfCPUFeature = {"avx512_vnni", "true"}, + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.MUL_ADD_S2I, "> 0", IRNode.MUL_ADD_VS2VI_VNNI, "> 0"}) public static int[] testh(int[] out) { for (int i = 0; i < ITER-2; i+=2) { diff --git a/test/hotspot/jtreg/compiler/vectorization/runner/LoopCombinedOpTest.java b/test/hotspot/jtreg/compiler/vectorization/runner/LoopCombinedOpTest.java index cddc5207d42..16d04102082 100644 --- a/test/hotspot/jtreg/compiler/vectorization/runner/LoopCombinedOpTest.java +++ b/test/hotspot/jtreg/compiler/vectorization/runner/LoopCombinedOpTest.java @@ -1,6 +1,6 @@ /* * Copyright (c) 2022, 2023, Arm Limited. All rights reserved. - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -223,6 +223,8 @@ public class LoopCombinedOpTest extends VectorizationTestRunner { @Test @IR(applyIfCPUFeatureOr = {"asimd", "true", "sse2", "true"}, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.STORE_VECTOR, ">0"}) public int[] multipleOpsWith2DifferentTypesAndInvariant() { short[] res1 = new short[SIZE]; @@ -236,6 +238,8 @@ public class LoopCombinedOpTest extends VectorizationTestRunner { @Test @IR(applyIfCPUFeatureOr = {"asimd", "true", "sse2", "true"}, + // This test fails with compact headers, but only with UseSSE<=3. + applyIf = { "UseCompactObjectHeaders", "false" }, counts = {IRNode.STORE_VECTOR, ">0"}) public int[] multipleOpsWith2DifferentTypesAndComplexExpression() { short[] res1 = new short[SIZE]; diff --git a/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java b/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java index d37bf567381..2a7886a2106 100644 --- a/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java +++ b/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -259,7 +259,6 @@ public class TestGCLogMessages { LogMessageWithLevel exhFailureMessages[] = new LogMessageWithLevel[] { new LogMessageWithLevel("Recalculate Used Memory \\(ms\\):", Level.DEBUG), - new LogMessageWithLevel("Restore Preserved Marks \\(ms\\):", Level.DEBUG), new LogMessageWithLevel("Restore Evacuation Failed Regions \\(ms\\):", Level.DEBUG), new LogMessageWithLevel("Process Evacuation Failed Regions \\(ms\\):", Level.DEBUG), new LogMessageWithLevel("Evacuation Failed Regions:", Level.DEBUG), diff --git a/test/hotspot/jtreg/gc/g1/plab/TestPLABPromotion.java b/test/hotspot/jtreg/gc/g1/plab/TestPLABPromotion.java index 2e7ebc2370f..88d11c10fa0 100644 --- a/test/hotspot/jtreg/gc/g1/plab/TestPLABPromotion.java +++ b/test/hotspot/jtreg/gc/g1/plab/TestPLABPromotion.java @@ -32,7 +32,7 @@ * @modules java.management * @build jdk.test.whitebox.WhiteBox * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox - * @run main/timeout=240 gc.g1.plab.TestPLABPromotion + * @run main/othervm/timeout=240 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI gc.g1.plab.TestPLABPromotion */ package gc.g1.plab; @@ -48,12 +48,15 @@ import gc.g1.plab.lib.PlabInfo; import jdk.test.lib.Platform; import jdk.test.lib.process.OutputAnalyzer; import jdk.test.lib.process.ProcessTools; +import jdk.test.whitebox.WhiteBox; /** * Test checks PLAB promotion of different size objects. */ public class TestPLABPromotion { + private static final boolean COMPACT_HEADERS = Platform.is64bit() && WhiteBox.getWhiteBox().getBooleanVMFlag("UseCompactObjectHeaders"); + // GC ID with survivor PLAB statistics private final static long GC_ID_SURVIVOR_STATS = 1l; // GC ID with old PLAB statistics @@ -74,7 +77,7 @@ public class TestPLABPromotion { private static final int PLAB_SIZE_HIGH = 65536; private static final int OBJECT_SIZE_SMALL = 10 * HEAP_WORD_SIZE; private static final int OBJECT_SIZE_MEDIUM = 128 * HEAP_WORD_SIZE; - private static final int OBJECT_SIZE_HIGH = 3072 * HEAP_WORD_SIZE; + private static final int OBJECT_SIZE_HIGH = (COMPACT_HEADERS ? 3266 : 3250) * HEAP_WORD_SIZE; private static final int GC_NUM_SMALL = 1; private static final int GC_NUM_MEDIUM = 3; private static final int GC_NUM_HIGH = 7; diff --git a/test/hotspot/jtreg/gtest/CompressedKlassGtest.java b/test/hotspot/jtreg/gtest/CompressedKlassGtest.java index e6377a8570d..fce30285312 100644 --- a/test/hotspot/jtreg/gtest/CompressedKlassGtest.java +++ b/test/hotspot/jtreg/gtest/CompressedKlassGtest.java @@ -35,5 +35,26 @@ * @library /test/lib * @modules java.base/jdk.internal.misc * java.xml - * @run main/native GTestWrapper --gtest_filter=CompressedKlass* -Xlog:metaspace* -Xmx6g -Xms128m -Xshare:off -XX:CompressedClassSpaceSize=128m + * @run main/native GTestWrapper --gtest_filter=CompressedKlass* -XX:+UnlockExperimentalVMOptions -XX:-UseCompactObjectHeaders -Xlog:metaspace* -Xmx6g -Xms128m -Xshare:off -XX:CompressedClassSpaceSize=128m + */ + +/* @test id=ccp_off + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.xml + * @run main/native GTestWrapper --gtest_filter=CompressedKlass* -XX:-UseCompressedClassPointers -Xlog:metaspace* -Xmx6g -Xms128m + */ + +/* @test id=use-zero-based-encoding-coh + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.xml + * @run main/native GTestWrapper --gtest_filter=CompressedKlass* -XX:+UnlockExperimentalVMOptions -XX:+UseCompactObjectHeaders -Xlog:metaspace* -Xmx6g -Xms128m -Xshare:off -XX:CompressedClassSpaceSize=128m + */ + +/* @test id=use-zero-based-encoding-coh-large-class-space + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.xml + * @run main/native GTestWrapper --gtest_filter=CompressedKlass* -XX:+UnlockExperimentalVMOptions -XX:+UseCompactObjectHeaders -Xlog:metaspace* -Xmx6g -Xms128m -Xshare:off -XX:CompressedClassSpaceSize=4g */ diff --git a/test/hotspot/jtreg/gtest/MetaspaceGtests.java b/test/hotspot/jtreg/gtest/MetaspaceGtests.java index f1f811d6a71..fac88588e31 100644 --- a/test/hotspot/jtreg/gtest/MetaspaceGtests.java +++ b/test/hotspot/jtreg/gtest/MetaspaceGtests.java @@ -49,3 +49,14 @@ * @requires vm.flagless * @run main/native GTestWrapper --gtest_filter=metaspace* -XX:+UnlockDiagnosticVMOptions -XX:-UseCompressedClassPointers */ + +/* @test id=UseCompactObjectHeaders + * @summary Run metaspace-related gtests with tiny classpointers + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.xml + * @requires vm.bits == 64 + * @requires vm.flagless + * @requires vm.debug + * @run main/native GTestWrapper --gtest_filter=metaspace* -XX:+UnlockExperimentalVMOptions -XX:+UseCompactObjectHeaders + */ diff --git a/test/hotspot/jtreg/gtest/MetaspaceUtilsGtests.java b/test/hotspot/jtreg/gtest/MetaspaceUtilsGtests.java index 7a9b1c8dfb3..e69de29bb2d 100644 --- a/test/hotspot/jtreg/gtest/MetaspaceUtilsGtests.java +++ b/test/hotspot/jtreg/gtest/MetaspaceUtilsGtests.java @@ -1,40 +0,0 @@ -/* - * Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -/* - * Note: This runs the metaspace utils related parts of gtest in configurations which - * are not tested explicitly in the standard gtests. - * - */ - -/* @test - * @bug 8264008 - * @summary Run metaspace utils related gtests with compressed class pointers off - * @requires vm.bits == 64 - * @library /test/lib - * @modules java.base/jdk.internal.misc - * java.xml - * @requires vm.flagless - * @run main/native GTestWrapper --gtest_filter=MetaspaceUtils* -XX:-UseCompressedClassPointers - */ diff --git a/test/hotspot/jtreg/runtime/CompressedOops/CompressedCPUSpecificClassSpaceReservation.java b/test/hotspot/jtreg/runtime/CompressedOops/CompressedCPUSpecificClassSpaceReservation.java index f1b4c7143b4..574a680aceb 100644 --- a/test/hotspot/jtreg/runtime/CompressedOops/CompressedCPUSpecificClassSpaceReservation.java +++ b/test/hotspot/jtreg/runtime/CompressedOops/CompressedCPUSpecificClassSpaceReservation.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,6 +56,7 @@ public class CompressedCPUSpecificClassSpaceReservation { "-Xshare:" + (CDS ? "on" : "off"), "-Xmx128m", "-XX:CompressedClassSpaceSize=128m", + "-XX:+UnlockExperimentalVMOptions", "-XX:-UseCompactObjectHeaders", "-Xlog:metaspace*", "-Xlog:metaspace+map=trace", "-Xlog:os+map=trace", "-XX:+SimulateFullAddressSpace", // So that no resevation attempt will succeed "-version"); diff --git a/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointers.java b/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointers.java index 4feadfb5565..e4b6f2a0548 100644 --- a/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointers.java +++ b/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointers.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,6 +46,7 @@ public class CompressedClassPointers { static final String logging_option = "-Xlog:gc+metaspace=trace,metaspace=info,cds=trace"; static final String reserveCCSAnywhere = "Reserving compressed class space anywhere"; + static final String usesCompactObjectHeadersPat = "UseCompactObjectHeaders 1"; // Returns true if we are to test the narrow klass base; we only do this on // platforms where we can be reasonably shure that we get reproducable placement). @@ -57,6 +58,11 @@ public class CompressedClassPointers { } + // Returns true if the output indicates that the VM uses compact object headers + static boolean usesCompactObjectHeaders(OutputAnalyzer output) { + return output.getOutput().contains(usesCompactObjectHeadersPat); + } + // Returns true if the output indicates that the ccs is reserved anywhere. static boolean isCCSReservedAnywhere(OutputAnalyzer output) { if (output.getOutput().contains(reserveCCSAnywhere)) { @@ -221,7 +227,7 @@ public class CompressedClassPointers { "-Xlog:cds=trace", "-XX:+VerifyBeforeGC", "-version"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); - if (!isCCSReservedAnywhere(output)) { + if (!isCCSReservedAnywhere(output) && !usesCompactObjectHeaders(output)) { output.shouldContain("Narrow klass base: 0x0000000000000000"); } output.shouldHaveExitValue(0); @@ -239,10 +245,10 @@ public class CompressedClassPointers { "-Xlog:cds=trace", "-XX:+VerifyBeforeGC", "-version"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); - if (!isCCSReservedAnywhere(output)) { + if (!isCCSReservedAnywhere(output) && !usesCompactObjectHeaders(output)) { output.shouldContain("Narrow klass base: 0x0000000000000000"); } - if (!Platform.isAArch64() && !Platform.isPPC()) { + if (!Platform.isAArch64() && !usesCompactObjectHeaders(output) && !Platform.isPPC()) { // Currently relax this test for Aarch64 and ppc. output.shouldContain("Narrow klass shift: 0"); } @@ -261,10 +267,10 @@ public class CompressedClassPointers { "-Xlog:cds=trace", "-XX:+VerifyBeforeGC", "-version"); OutputAnalyzer output = new OutputAnalyzer(pb.start()); - if (!isCCSReservedAnywhere(output)) { + if (!isCCSReservedAnywhere(output) && !usesCompactObjectHeaders(output)) { output.shouldContain("Narrow klass base: 0x0000000000000000"); } - if (!Platform.isAArch64() && !Platform.isPPC()) { + if (!Platform.isAArch64() && !usesCompactObjectHeaders(output) && !Platform.isPPC()) { // Currently relax this test for Aarch64 and ppc. output.shouldContain("Narrow klass shift: 0"); } diff --git a/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointersEncodingScheme.java b/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointersEncodingScheme.java index 070c856a322..665c4cb8b9f 100644 --- a/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointersEncodingScheme.java +++ b/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointersEncodingScheme.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,13 +42,16 @@ import java.io.IOException; public class CompressedClassPointersEncodingScheme { - private static void test(long forceAddress, long classSpaceSize, long expectedEncodingBase, int expectedEncodingShift) throws IOException { + private static void test(long forceAddress, boolean COH, long classSpaceSize, long expectedEncodingBase, int expectedEncodingShift) throws IOException { String forceAddressString = String.format("0x%016X", forceAddress).toLowerCase(); String expectedEncodingBaseString = String.format("0x%016X", expectedEncodingBase).toLowerCase(); ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder( "-Xshare:off", // to make CompressedClassSpaceBaseAddress work "-XX:+UnlockDiagnosticVMOptions", "-XX:-UseCompressedOops", // keep VM from optimizing heap location + "-XX:+UnlockExperimentalVMOptions", + "-XX:" + (COH ? "+" : "-") + "UseCompactObjectHeaders", + "-XX:" + (COH ? "+" : "-") + "UseObjectMonitorTable", "-XX:CompressedClassSpaceBaseAddress=" + forceAddress, "-XX:CompressedClassSpaceSize=" + classSpaceSize, "-Xmx128m", @@ -60,7 +63,8 @@ public class CompressedClassPointersEncodingScheme { // We ignore cases where we were not able to map at the force address if (output.contains("reserving class space failed")) { - throw new SkippedException("Skipping because we cannot force ccs to " + forceAddressString); + System.out.println("Skipping because we cannot force ccs to " + forceAddressString); + return; } output.shouldHaveExitValue(0); @@ -73,9 +77,51 @@ public class CompressedClassPointersEncodingScheme { public static void main(String[] args) throws Exception { // Test ccs nestling right at the end of the 4G range // Expecting base=0, shift=0 - test(4 * G - 128 * M, 128 * M, 0, 0); + test(4 * G - 128 * M, false, 128 * M, 0, 0); + // Test ccs nestling right at the end of the 32G range + // Expecting: + // - non-aarch64: base=0, shift=3 + // - aarch64: base to start of class range, shift 0 + if (Platform.isAArch64()) { + // The best we can do on aarch64 is to be *near* the end of the 32g range, since a valid encoding base + // on aarch64 must be 4G aligned, and the max. class space size is 3G. + long forceAddress = 0x7_0000_0000L; // 28g, and also a valid EOR immediate + test(forceAddress, false, 3 * G, forceAddress, 0); + } else { + test(32 * G - 128 * M, false, 128 * M, 0, 3); + } + + // Test ccs starting *below* 4G, but extending upwards beyond 4G. All platforms except aarch64 should pick + // zero based encoding. On aarch64, this test is excluded since the only valid mode would be XOR, but bit + // pattern for base and bit pattern would overlap. + if (!Platform.isAArch64()) { + test(4 * G - 128 * M, false, 2 * 128 * M, 0, 3); + } // add more... + // Compact Object Header Mode: + // On aarch64 and x64 we expect the VM to chose the smallest possible shift value needed to cover + // the encoding range. We expect the encoding Base to start at the class space start - but to enforce that, + // we choose a high address. + if (Platform.isAArch64() || Platform.isX64() || Platform.isRISCV64()) { + long forceAddress = 32 * G; + + long ccsSize = 128 * M; + int expectedShift = 6; + test(forceAddress, true, ccsSize, forceAddress, expectedShift); + + ccsSize = 512 * M; + expectedShift = 8; + test(forceAddress, true, ccsSize, forceAddress, expectedShift); + + ccsSize = G; + expectedShift = 9; + test(forceAddress, true, ccsSize, forceAddress, expectedShift); + + ccsSize = 3 * G; + expectedShift = 10; + test(forceAddress, true, ccsSize, forceAddress, expectedShift); + } } } diff --git a/test/hotspot/jtreg/runtime/FieldLayout/ArrayBaseOffsets.java b/test/hotspot/jtreg/runtime/FieldLayout/ArrayBaseOffsets.java index b679e866ac8..e69de29bb2d 100644 --- a/test/hotspot/jtreg/runtime/FieldLayout/ArrayBaseOffsets.java +++ b/test/hotspot/jtreg/runtime/FieldLayout/ArrayBaseOffsets.java @@ -1,113 +0,0 @@ -/* - * Copyright Amazon.com Inc. or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * @test id=with-coops-no-ccp - * @library /test/lib - * @requires vm.bits == "64" - * @modules java.base/jdk.internal.misc - * @run main/othervm -XX:+UseCompressedOops -XX:-UseCompressedClassPointers ArrayBaseOffsets - */ -/* - * @test id=with-coops-with-ccp - * @library /test/lib - * @requires vm.bits == "64" - * @requires vm.opt.UseCompressedClassPointers != false - * @modules java.base/jdk.internal.misc - * @run main/othervm -XX:+UseCompressedOops -XX:+UseCompressedClassPointers ArrayBaseOffsets - */ -/* - * @test id=no-coops-no-ccp - * @library /test/lib - * @requires vm.bits == "64" - * @modules java.base/jdk.internal.misc - * @run main/othervm -XX:-UseCompressedOops -XX:-UseCompressedClassPointers ArrayBaseOffsets - */ -/* - * @test id=no-coops-with-ccp - * @library /test/lib - * @requires vm.bits == "64" - * @requires vm.opt.UseCompressedClassPointers != false - * @modules java.base/jdk.internal.misc - * @run main/othervm -XX:-UseCompressedOops -XX:+UseCompressedClassPointers ArrayBaseOffsets - */ -/* - * @test id=32bit - * @library /test/lib - * @requires vm.bits == "32" - * @modules java.base/jdk.internal.misc - * @run main/othervm ArrayBaseOffsets - */ - -import jdk.internal.misc.Unsafe; - -import java.lang.management.ManagementFactory; -import java.lang.management.RuntimeMXBean; -import java.util.List; - -import jdk.test.lib.Asserts; -import jdk.test.lib.Platform; - -public class ArrayBaseOffsets { - - private static final boolean COOP; - private static final boolean CCP; - - static { - if (Platform.is64bit()) { - RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean(); - List vmargs = runtime.getInputArguments(); - CCP = !vmargs.contains("-XX:-UseCompressedClassPointers"); - COOP = System.getProperty("java.vm.compressedOopsMode") != null; - } else { - COOP = CCP = false; - } - } - - static public void main(String[] args) { - Unsafe unsafe = Unsafe.getUnsafe(); - int intOffset, longOffset; - if (Platform.is64bit()) { - if (CCP) { - intOffset = 16; - longOffset = 16; - } else { - intOffset = 20; - longOffset = 24; - } - } else { - intOffset = 12; - longOffset = 16; - } - Asserts.assertEquals(unsafe.arrayBaseOffset(boolean[].class), intOffset, "Misplaced boolean array base"); - Asserts.assertEquals(unsafe.arrayBaseOffset(byte[].class), intOffset, "Misplaced byte array base"); - Asserts.assertEquals(unsafe.arrayBaseOffset(char[].class), intOffset, "Misplaced char array base"); - Asserts.assertEquals(unsafe.arrayBaseOffset(short[].class), intOffset, "Misplaced short array base"); - Asserts.assertEquals(unsafe.arrayBaseOffset(int[].class), intOffset, "Misplaced int array base"); - Asserts.assertEquals(unsafe.arrayBaseOffset(long[].class), longOffset, "Misplaced long array base"); - Asserts.assertEquals(unsafe.arrayBaseOffset(float[].class), intOffset, "Misplaced float array base"); - Asserts.assertEquals(unsafe.arrayBaseOffset(double[].class), longOffset, "Misplaced double array base"); - int expectedObjArrayOffset = (COOP || !Platform.is64bit()) ? intOffset : longOffset; - Asserts.assertEquals(unsafe.arrayBaseOffset(Object[].class), expectedObjArrayOffset, "Misplaced object array base"); - } -} diff --git a/test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java b/test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java new file mode 100644 index 00000000000..e4f88b5d8d5 --- /dev/null +++ b/test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test id=with-coops-with-ccp + * @library /test/lib / + * @requires vm.bits == "64" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:-UseCompactObjectHeaders BaseOffsets + */ +/* + * @test id=no-coops-with-ccp + * @library /test/lib / + * @requires vm.bits == "64" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:-UseCompressedOops -XX:+UseCompressedClassPointers -XX:-UseCompactObjectHeaders BaseOffsets + */ +/* + * @test id=with-coops-no-ccp + * @library /test/lib / + * @requires vm.bits == "64" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+UseCompressedOops -XX:-UseCompressedClassPointers -XX:-UseCompactObjectHeaders BaseOffsets + */ +/* + * @test id=no-coops-no-ccp + * @library /test/lib / + * @requires vm.bits == "64" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:-UseCompactObjectHeaders BaseOffsets + */ +/* + * @test id=with-coop--with-coh + * @library /test/lib / + * @requires vm.bits == "64" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+UseCompressedOops -XX:+UseCompactObjectHeaders BaseOffsets + */ +/* + * @test id=no-coops-with-coh + * @library /test/lib / + * @requires vm.bits == "64" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:-UseCompressedOops -XX:+UseCompactObjectHeaders BaseOffsets + */ +/* + * @test id=32bit + * @library /test/lib / + * @requires vm.bits == "32" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI BaseOffsets + */ + +import java.lang.reflect.Field; +import java.util.Arrays; +import java.util.Comparator; +import jdk.internal.misc.Unsafe; + +import jdk.test.lib.Asserts; +import jdk.test.lib.Platform; +import jdk.test.whitebox.WhiteBox; + +public class BaseOffsets { + + static class LIClass { + public int i; + } + + public static final WhiteBox WB = WhiteBox.getWhiteBox(); + + static final long INT_OFFSET; + static final int INT_ARRAY_OFFSET; + static final int LONG_ARRAY_OFFSET; + static { + if (!Platform.is64bit() || WB.getBooleanVMFlag("UseCompactObjectHeaders")) { + INT_OFFSET = 8; + INT_ARRAY_OFFSET = 12; + LONG_ARRAY_OFFSET = 16; + } else if (WB.getBooleanVMFlag("UseCompressedClassPointers")) { + INT_OFFSET = 12; + INT_ARRAY_OFFSET = 16; + LONG_ARRAY_OFFSET = 16; + } else { + INT_OFFSET = 16; + INT_ARRAY_OFFSET = 20; + LONG_ARRAY_OFFSET = 24; + } + } + + static public void main(String[] args) { + Unsafe unsafe = Unsafe.getUnsafe(); + Class c = LIClass.class; + Field[] fields = c.getFields(); + for (int i = 0; i < fields.length; i++) { + long offset = unsafe.objectFieldOffset(fields[i]); + if (fields[i].getType() == int.class) { + Asserts.assertEquals(offset, INT_OFFSET, "Misplaced int field"); + } else { + Asserts.fail("Unexpected field type"); + } + } + + Asserts.assertEquals(unsafe.arrayBaseOffset(boolean[].class), INT_ARRAY_OFFSET, "Misplaced boolean array base"); + Asserts.assertEquals(unsafe.arrayBaseOffset(byte[].class), INT_ARRAY_OFFSET, "Misplaced byte array base"); + Asserts.assertEquals(unsafe.arrayBaseOffset(char[].class), INT_ARRAY_OFFSET, "Misplaced char array base"); + Asserts.assertEquals(unsafe.arrayBaseOffset(short[].class), INT_ARRAY_OFFSET, "Misplaced short array base"); + Asserts.assertEquals(unsafe.arrayBaseOffset(int[].class), INT_ARRAY_OFFSET, "Misplaced int array base"); + Asserts.assertEquals(unsafe.arrayBaseOffset(long[].class), LONG_ARRAY_OFFSET, "Misplaced long array base"); + Asserts.assertEquals(unsafe.arrayBaseOffset(float[].class), INT_ARRAY_OFFSET, "Misplaced float array base"); + Asserts.assertEquals(unsafe.arrayBaseOffset(double[].class), LONG_ARRAY_OFFSET, "Misplaced double array base"); + boolean narrowOops = System.getProperty("java.vm.compressedOopsMode") != null || + !Platform.is64bit(); + int expected_objary_offset = narrowOops ? INT_ARRAY_OFFSET : LONG_ARRAY_OFFSET; + Asserts.assertEquals(unsafe.arrayBaseOffset(Object[].class), expected_objary_offset, "Misplaced object array base"); + } +} diff --git a/test/hotspot/jtreg/runtime/cds/TestDefaultArchiveLoading.java b/test/hotspot/jtreg/runtime/cds/TestDefaultArchiveLoading.java new file mode 100644 index 00000000000..590f22feed7 --- /dev/null +++ b/test/hotspot/jtreg/runtime/cds/TestDefaultArchiveLoading.java @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test id=nocoops_nocoh + * @summary Test Loading of default archives in all configurations + * @requires vm.cds + * @requires vm.bits == 64 + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestDefaultArchiveLoading nocoops_nocoh + */ + +/** + * @test id=nocoops_coh + * @summary Test Loading of default archives in all configurations (requires --enable-cds-archive-coh) + * @requires vm.cds + * @requires vm.bits == 64 + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestDefaultArchiveLoading nocoops_coh + */ + +/** + * @test id=coops_nocoh + * @summary Test Loading of default archives in all configurations + * @requires vm.cds + * @requires vm.bits == 64 + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestDefaultArchiveLoading coops_nocoh + */ + +/** + * @test id=coops_coh + * @summary Test Loading of default archives in all configurations (requires --enable-cds-archive-coh) + * @requires vm.cds + * @requires vm.bits == 64 + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestDefaultArchiveLoading coops_coh + */ + +import jdk.test.lib.Platform; +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; +import jtreg.SkippedException; + +public class TestDefaultArchiveLoading { + public static void main(String[] args) throws Exception { + + if (args.length != 1) { + throw new RuntimeException("Expected argument"); + } + + String archiveSuffix; + char coh, coops; + + switch (args[0]) { + case "nocoops_nocoh": + coh = coops = '-'; + archiveSuffix = "_nocoops"; + break; + case "nocoops_coh": + coops = '-'; + coh = '+'; + archiveSuffix = "_nocoops_coh"; + break; + case "coops_nocoh": + coops = '+'; + coh = '-'; + archiveSuffix = ""; + break; + case "coops_coh": + coh = coops = '+'; + archiveSuffix = "_coh"; + break; + default: throw new RuntimeException("Invalid argument " + args[0]); + } + + ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder( + "-XX:+UnlockExperimentalVMOptions", + "-XX:" + coh + "UseCompactObjectHeaders", + "-XX:" + coops + "UseCompressedOops", + "-Xlog:cds", + "-Xshare:on", // fail if we cannot load archive + "-version"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + + output.shouldContain("classes" + archiveSuffix + ".jsa"); + + } +} diff --git a/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java b/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java index 89fc346ffbb..e2e0f1bdf77 100644 --- a/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java +++ b/test/hotspot/jtreg/runtime/cds/appcds/TestZGCWithCDS.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,12 +42,15 @@ public class TestZGCWithCDS { public final static String UNABLE_TO_USE_ARCHIVE = "Unable to use shared archive."; public final static String ERR_MSG = "The saved state of UseCompressedOops and UseCompressedClassPointers is different from runtime, CDS will be disabled."; public static void main(String... args) throws Exception { + String compactHeaders = "-XX:+UseCompactObjectHeaders"; String helloJar = JarBuilder.build("hello", "Hello"); System.out.println("0. Dump with ZGC"); OutputAnalyzer out = TestCommon .dump(helloJar, new String[] {"Hello"}, "-XX:+UseZGC", + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds"); out.shouldContain("Dumping shared data to file:"); out.shouldHaveExitValue(0); @@ -56,6 +59,8 @@ public class TestZGCWithCDS { out = TestCommon .exec(helloJar, "-XX:+UseZGC", + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds", "Hello"); out.shouldContain(HELLO); @@ -67,6 +72,8 @@ public class TestZGCWithCDS { "-XX:-UseZGC", "-XX:+UseCompressedOops", // in case turned off by vmoptions "-XX:+UseCompressedClassPointers", // by jtreg + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds", "Hello"); out.shouldContain(UNABLE_TO_USE_ARCHIVE); @@ -79,6 +86,8 @@ public class TestZGCWithCDS { "-XX:+UseSerialGC", "-XX:-UseCompressedOops", "-XX:-UseCompressedClassPointers", + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds", "Hello"); out.shouldContain(UNABLE_TO_USE_ARCHIVE); @@ -91,6 +100,8 @@ public class TestZGCWithCDS { "-XX:+UseSerialGC", "-XX:-UseCompressedOops", "-XX:+UseCompressedClassPointers", + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds", "Hello"); out.shouldContain(HELLO); @@ -102,6 +113,8 @@ public class TestZGCWithCDS { "-XX:+UseSerialGC", "-XX:+UseCompressedOops", "-XX:-UseCompressedClassPointers", + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds", "Hello"); out.shouldContain(UNABLE_TO_USE_ARCHIVE); @@ -114,6 +127,8 @@ public class TestZGCWithCDS { "-XX:+UseSerialGC", "-XX:+UseCompressedOops", "-XX:+UseCompressedClassPointers", + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds", "Hello"); out.shouldContain(UNABLE_TO_USE_ARCHIVE); @@ -127,6 +142,8 @@ public class TestZGCWithCDS { "-XX:+UseSerialGC", "-XX:-UseCompressedOops", "-XX:+UseCompressedClassPointers", + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds"); out.shouldContain("Dumping shared data to file:"); out.shouldHaveExitValue(0); @@ -135,6 +152,8 @@ public class TestZGCWithCDS { out = TestCommon .exec(helloJar, "-XX:+UseZGC", + "-XX:+UnlockExperimentalVMOptions", + compactHeaders, "-Xlog:cds", "Hello"); out.shouldContain(HELLO); diff --git a/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/TestAutoCreateSharedArchiveNoDefaultArchive.java b/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/TestAutoCreateSharedArchiveNoDefaultArchive.java index 46e08c7b754..133f44521d5 100644 --- a/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/TestAutoCreateSharedArchiveNoDefaultArchive.java +++ b/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/TestAutoCreateSharedArchiveNoDefaultArchive.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -144,6 +144,7 @@ public class TestAutoCreateSharedArchiveNoDefaultArchive { private static void removeDefaultArchives(String java_home_dst, String variant) { removeDefaultArchive(java_home_dst, variant, ""); removeDefaultArchive(java_home_dst, variant, "_nocoops"); + removeDefaultArchive(java_home_dst, variant, "_coh"); } private static void removeDefaultArchive(String java_home_dst, String variant, String suffix) { diff --git a/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java b/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java index bcf93498574..1f039eb73b5 100644 --- a/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java +++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,12 +93,12 @@ public class ClhsdbLongConstant { // ... // longConstant VM_Version::CPU_SHA 17179869184 // longConstant markWord::age_shift 3 - // longConstant markWord::hash_mask_in_place 549755813632 + // longConstant markWord::hash_mask_in_place 4398046509056 // ... checkLongValue("markWord::hash_mask_in_place", longConstantOutput, - Platform.is64bit() ? 549755813632L: 4294967168L); + Platform.is64bit() ? 4398046509056L: 4294967168L); String arch = System.getProperty("os.arch"); if (arch.equals("amd64") || arch.equals("i386") || arch.equals("x86")) { diff --git a/test/jdk/java/lang/instrument/GetObjectSizeIntrinsicsTest.java b/test/jdk/java/lang/instrument/GetObjectSizeIntrinsicsTest.java index 22c5069f3e7..1e473ccd974 100644 --- a/test/jdk/java/lang/instrument/GetObjectSizeIntrinsicsTest.java +++ b/test/jdk/java/lang/instrument/GetObjectSizeIntrinsicsTest.java @@ -301,6 +301,7 @@ import jdk.test.whitebox.WhiteBox; public class GetObjectSizeIntrinsicsTest extends ASimpleInstrumentationTestCase { + private static final boolean COMPACT_HEADERS = Platform.is64bit() && WhiteBox.getWhiteBox().getBooleanVMFlag("UseCompactObjectHeaders"); static final Boolean COMPRESSED_OOPS = WhiteBox.getWhiteBox().getBooleanVMFlag("UseCompressedOops"); static final long REF_SIZE = (COMPRESSED_OOPS == null || COMPRESSED_OOPS == true) ? 4 : 8; @@ -374,15 +375,25 @@ public class GetObjectSizeIntrinsicsTest extends ASimpleInstrumentationTestCase return (v + a - 1) / a * a; } + private static long expectedSmallObjSize() { + long size; + if (!Platform.is64bit() || COMPACT_HEADERS) { + size = 8; + } else { + size = 16; + } + return roundUp(size, OBJ_ALIGN); + } + private void testSize_newObject() { - long expected = roundUp(Platform.is64bit() ? 16 : 8, OBJ_ALIGN); + long expected = expectedSmallObjSize(); for (int c = 0; c < ITERS; c++) { assertEquals(expected, fInst.getObjectSize(new Object())); } } private void testSize_localObject() { - long expected = roundUp(Platform.is64bit() ? 16 : 8, OBJ_ALIGN); + long expected = expectedSmallObjSize(); Object o = new Object(); for (int c = 0; c < ITERS; c++) { assertEquals(expected, fInst.getObjectSize(o)); @@ -392,7 +403,7 @@ public class GetObjectSizeIntrinsicsTest extends ASimpleInstrumentationTestCase static Object staticO = new Object(); private void testSize_fieldObject() { - long expected = roundUp(Platform.is64bit() ? 16 : 8, OBJ_ALIGN); + long expected = expectedSmallObjSize(); for (int c = 0; c < ITERS; c++) { assertEquals(expected, fInst.getObjectSize(staticO)); } diff --git a/test/jdk/tools/jlink/plugins/CDSPluginTest.java b/test/jdk/tools/jlink/plugins/CDSPluginTest.java index d50b1c88a21..c9766183297 100644 --- a/test/jdk/tools/jlink/plugins/CDSPluginTest.java +++ b/test/jdk/tools/jlink/plugins/CDSPluginTest.java @@ -26,6 +26,7 @@ import java.io.File; import jdk.test.lib.JDKToolFinder; import jdk.test.lib.Platform; import jdk.test.lib.process.*; +import jdk.test.whitebox.WhiteBox; import tests.Helper; @@ -44,11 +45,12 @@ import jtreg.SkippedException; * jdk.jlink/jdk.tools.jimage * jdk.compiler * @build tests.* - * @run main CDSPluginTest + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. CDSPluginTest */ public class CDSPluginTest { - public static void main(String[] args) throws Throwable { if (!Platform.isDefaultCDSArchiveSupported()) @@ -75,12 +77,19 @@ public class CDSPluginTest { } subDir += "server" + sep; + WhiteBox wb = WhiteBox.getWhiteBox(); + boolean COMPACT_HEADERS = Platform.is64bit() && + wb.getBooleanVMFlag("UseCompactObjectHeaders") && + wb.isDefaultVMFlag("UseCompactObjectHeaders"); + + String suffix = COMPACT_HEADERS ? "_coh.jsa" : ".jsa"; + if (Platform.isAArch64() || Platform.isX64()) { helper.checkImage(image, module, null, null, - new String[] { subDir + "classes.jsa", subDir + "classes_nocoops.jsa" }); + new String[] { subDir + "classes" + suffix, subDir + "classes_nocoops" + suffix }); } else { helper.checkImage(image, module, null, null, - new String[] { subDir + "classes.jsa" }); + new String[] { subDir + "classes" + suffix }); } } }