8233078: fix minimal VM build on Linux ppc64(le)

Reviewed-by: mdoerr, lucy
This commit is contained in:
Matthias Baesken 2019-11-04 09:54:00 +01:00
parent a9952bb5d9
commit da61b865b4
6 changed files with 16 additions and 20 deletions

View File

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 SAP SE. All rights reserved. * Copyright (c) 2012, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -48,8 +48,12 @@ define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, FreqInlineSize, 325 ); define_pd_global(intx, FreqInlineSize, 325 );
define_pd_global(bool, ResizeTLAB, true); define_pd_global(bool, ResizeTLAB, true);
define_pd_global(uintx, ReservedCodeCacheSize, 32*M); define_pd_global(uintx, ReservedCodeCacheSize, 32*M);
define_pd_global(uintx, NonProfiledCodeHeapSize, 13*M );
define_pd_global(uintx, ProfiledCodeHeapSize, 14*M );
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
define_pd_global(uintx, CodeCacheExpansionSize, 32*K); define_pd_global(uintx, CodeCacheExpansionSize, 32*K);
define_pd_global(uintx, CodeCacheMinBlockLength, 1); define_pd_global(uintx, CodeCacheMinBlockLength, 1);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(size_t, MetaspaceSize, 12*M); define_pd_global(size_t, MetaspaceSize, 12*M);
define_pd_global(bool, NeverActAsServerClassMachine, true); define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(size_t, NewSizeThreadIncrease, 16*K); define_pd_global(size_t, NewSizeThreadIncrease, 16*K);

View File

@ -27,8 +27,6 @@
#include "code/codeCache.hpp" #include "code/codeCache.hpp"
#include "compiler/disassembler.hpp" #include "compiler/disassembler.hpp"
#include "depChecker_ppc.hpp" #include "depChecker_ppc.hpp"
#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
#include "gc/cms/parOopClosures.inline.hpp"
#include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectedHeap.hpp"
#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/genOopClosures.inline.hpp" #include "gc/shared/genOopClosures.inline.hpp"

View File

@ -50,6 +50,8 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
#if defined(COMPILER2) && (defined(AIX) || defined(LINUX)) #if defined(COMPILER2) && (defined(AIX) || defined(LINUX))
// Include Transactional Memory lock eliding optimization // Include Transactional Memory lock eliding optimization
#define INCLUDE_RTM_OPT 1 #define INCLUDE_RTM_OPT 1
#else
#define INCLUDE_RTM_OPT 0
#endif #endif
#define SUPPORT_RESERVED_STACK_AREA #define SUPPORT_RESERVED_STACK_AREA

View File

@ -571,7 +571,6 @@ void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destinatio
__ bctr(); __ bctr();
} }
#ifdef COMPILER2
static int reg2slot(VMReg r) { static int reg2slot(VMReg r) {
return r->reg2stack() + SharedRuntime::out_preserve_stack_slots(); return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
} }
@ -579,7 +578,6 @@ static int reg2slot(VMReg r) {
static int reg2offset(VMReg r) { static int reg2offset(VMReg r) {
return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
} }
#endif
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Read the array of BasicTypes from a signature, and compute where the // Read the array of BasicTypes from a signature, and compute where the
@ -1305,7 +1303,6 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry); return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
} }
#ifdef COMPILER2
// An oop arg. Must pass a handle not the oop itself. // An oop arg. Must pass a handle not the oop itself.
static void object_move(MacroAssembler* masm, static void object_move(MacroAssembler* masm,
int frame_size_in_slots, int frame_size_in_slots,
@ -1813,8 +1810,6 @@ static void gen_special_dispatch(MacroAssembler* masm,
receiver_reg, member_reg, /*for_compiler_entry:*/ true); receiver_reg, member_reg, /*for_compiler_entry:*/ true);
} }
#endif // COMPILER2
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Generate a native wrapper for a given method. The method takes arguments // Generate a native wrapper for a given method. The method takes arguments
// in the Java compiled code convention, marshals them to the native // in the Java compiled code convention, marshals them to the native
@ -1851,7 +1846,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
VMRegPair *in_regs, VMRegPair *in_regs,
BasicType ret_type, BasicType ret_type,
address critical_entry) { address critical_entry) {
#ifdef COMPILER2
if (method->is_method_handle_intrinsic()) { if (method->is_method_handle_intrinsic()) {
vmIntrinsics::ID iid = method->intrinsic_id(); vmIntrinsics::ID iid = method->intrinsic_id();
intptr_t start = (intptr_t)__ pc(); intptr_t start = (intptr_t)__ pc();
@ -2108,7 +2102,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Check ic: object class == cached class? // Check ic: object class == cached class?
if (!method_is_static) { if (!method_is_static) {
Register ic = as_Register(Matcher::inline_cache_reg_encode()); Register ic = R19_inline_cache_reg;
Register receiver_klass = r_temp_1; Register receiver_klass = r_temp_1;
__ cmpdi(CCR0, R3_ARG1, 0); __ cmpdi(CCR0, R3_ARG1, 0);
@ -2638,12 +2632,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Handler for pending exceptions (out-of-line). // Handler for pending exceptions (out-of-line).
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// Since this is a native call, we know the proper exception handler // Since this is a native call, we know the proper exception handler
// is the empty function. We just pop this frame and then jump to // is the empty function. We just pop this frame and then jump to
// forward_exception_entry. // forward_exception_entry.
if (!is_critical_native) { if (!is_critical_native) {
__ align(InteriorEntryAlignment);
__ bind(handle_pending_exception); __ bind(handle_pending_exception);
__ pop_frame(); __ pop_frame();
@ -2656,7 +2648,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
if (!method_is_static) { if (!method_is_static) {
__ align(InteriorEntryAlignment);
__ bind(ic_miss); __ bind(ic_miss);
__ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
@ -2683,10 +2674,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
} }
return nm; return nm;
#else
ShouldNotReachHere();
return NULL;
#endif // COMPILER2
} }
// This function returns the adjust size (in number of words) to a c2i adapter // This function returns the adjust size (in number of words) to a c2i adapter
@ -2863,7 +2850,7 @@ void SharedRuntime::generate_deopt_blob() {
// We can't grab a free register here, because all registers may // We can't grab a free register here, because all registers may
// contain live values, so let the RegisterSaver do the adjustment // contain live values, so let the RegisterSaver do the adjustment
// of the return pc. // of the return pc.
const int return_pc_adjustment_no_exception = -HandlerImpl::size_deopt_handler(); const int return_pc_adjustment_no_exception = -MacroAssembler::bl64_patchable_size;
// Push the "unpack frame" // Push the "unpack frame"
// Save everything in sight. // Save everything in sight.

View File

@ -3103,6 +3103,7 @@ class StubGenerator: public StubCodeGenerator {
STUB_ENTRY(checkcast_arraycopy)); STUB_ENTRY(checkcast_arraycopy));
// fill routines // fill routines
#ifdef COMPILER2
if (OptimizeFill) { if (OptimizeFill) {
StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
@ -3111,6 +3112,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
} }
#endif
} }
// Safefetch stubs. // Safefetch stubs.
@ -3579,8 +3581,6 @@ class StubGenerator: public StubCodeGenerator {
if (UseMultiplyToLenIntrinsic) { if (UseMultiplyToLenIntrinsic) {
StubRoutines::_multiplyToLen = generate_multiplyToLen(); StubRoutines::_multiplyToLen = generate_multiplyToLen();
} }
#endif
if (UseSquareToLenIntrinsic) { if (UseSquareToLenIntrinsic) {
StubRoutines::_squareToLen = generate_squareToLen(); StubRoutines::_squareToLen = generate_squareToLen();
} }
@ -3595,6 +3595,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_montgomerySquare StubRoutines::_montgomerySquare
= CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
} }
#endif
if (UseAESIntrinsics) { if (UseAESIntrinsics) {
StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();

View File

@ -312,6 +312,7 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA, false); FLAG_SET_DEFAULT(UseSHA, false);
} }
#ifdef COMPILER2
if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
UseSquareToLenIntrinsic = true; UseSquareToLenIntrinsic = true;
} }
@ -327,6 +328,7 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
UseMontgomerySquareIntrinsic = true; UseMontgomerySquareIntrinsic = true;
} }
#endif
if (UseVectorizedMismatchIntrinsic) { if (UseVectorizedMismatchIntrinsic) {
warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU."); warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
@ -373,9 +375,11 @@ void VM_Version::initialize() {
if (UseRTMDeopt) { if (UseRTMDeopt) {
FLAG_SET_DEFAULT(UseRTMDeopt, false); FLAG_SET_DEFAULT(UseRTMDeopt, false);
} }
#ifdef COMPILER2
if (PrintPreciseRTMLockingStatistics) { if (PrintPreciseRTMLockingStatistics) {
FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false); FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
} }
#endif
} }
// This machine allows unaligned memory accesses // This machine allows unaligned memory accesses