8274527: Minimal VM build fails after JDK-8273459

Reviewed-by: kvn
This commit is contained in:
Jie Fu 2021-09-30 23:11:56 +00:00
parent 7326481143
commit a8edd1b360
4 changed files with 22 additions and 17 deletions

View File

@ -1177,6 +1177,10 @@ void MacroAssembler::align64() {
align(64, (unsigned long long) pc());
}
void MacroAssembler::align32() {
align(32, (unsigned long long) pc());
}
void MacroAssembler::align(int modulus) {
// 8273459: Ensure alignment is possible with current segment alignment
assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
@ -6905,7 +6909,7 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Regi
// 128 bits per each of 4 parallel streams.
movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32));
align(32);
align32();
BIND(L_fold_512b_loop);
fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);

View File

@ -194,6 +194,7 @@ class MacroAssembler: public Assembler {
void incrementq(AddressLiteral dst);
// Alignment
void align32();
void align64();
void align(int modulus);
void align(int modulus, int target);

View File

@ -80,7 +80,7 @@ void MacroAssembler::updateBytesAdler32(Register init_d, Register data, Register
cmpptr(data, end);
jcc(Assembler::aboveEqual, SKIP_LOOP_1A);
align(32);
align32();
bind(SLOOP1A);
vbroadcastf128(ydata, Address(data, 0), Assembler::AVX_256bit);
addptr(data, CHUNKSIZE);
@ -178,7 +178,7 @@ void MacroAssembler::updateBytesAdler32(Register init_d, Register data, Register
movdl(rax, xb);
addl(b_d, rax);
align(32);
align32();
bind(FINAL_LOOP);
movzbl(rax, Address(data, 0)); //movzx eax, byte[data]
addl(a_d, rax);

View File

@ -1484,7 +1484,7 @@ class StubGenerator: public StubCodeGenerator {
__ subq(temp1, loop_size[shift]);
// Main loop with aligned copy block size of 192 bytes at 32 byte granularity.
__ align(32);
__ align32();
__ BIND(L_main_loop);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 0);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 64);
@ -1551,7 +1551,7 @@ class StubGenerator: public StubCodeGenerator {
// Main loop with aligned copy block size of 192 bytes at
// 64 byte copy granularity.
__ align(32);
__ align32();
__ BIND(L_main_loop_64bytes);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 0 , true);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 64, true);
@ -1691,7 +1691,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_main_pre_loop);
// Main loop with aligned copy block size of 192 bytes at 32 byte granularity.
__ align(32);
__ align32();
__ BIND(L_main_loop);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -64);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -128);
@ -1724,7 +1724,7 @@ class StubGenerator: public StubCodeGenerator {
// Main loop with aligned copy block size of 192 bytes at
// 64 byte copy granularity.
__ align(32);
__ align32();
__ BIND(L_main_loop_64bytes);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -64 , true);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -128, true);
@ -4274,7 +4274,7 @@ class StubGenerator: public StubCodeGenerator {
//Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
address generate_pshuffle_byte_flip_mask_sha512() {
__ align(32);
__ align32();
StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512");
address start = __ pc();
if (VM_Version::supports_avx2()) {
@ -5401,7 +5401,7 @@ address generate_avx_ghash_processBlocks() {
address base64_avx2_shuffle_addr()
{
__ align(32);
__ align32();
StubCodeMark mark(this, "StubRoutines", "avx2_shuffle_base64");
address start = __ pc();
__ emit_data64(0x0809070805060405, relocInfo::none);
@ -5413,7 +5413,7 @@ address generate_avx_ghash_processBlocks() {
address base64_avx2_input_mask_addr()
{
__ align(32);
__ align32();
StubCodeMark mark(this, "StubRoutines", "avx2_input_mask_base64");
address start = __ pc();
__ emit_data64(0x8000000000000000, relocInfo::none);
@ -5425,7 +5425,7 @@ address generate_avx_ghash_processBlocks() {
address base64_avx2_lut_addr()
{
__ align(32);
__ align32();
StubCodeMark mark(this, "StubRoutines", "avx2_lut_base64");
address start = __ pc();
__ emit_data64(0xfcfcfcfcfcfc4741, relocInfo::none);
@ -5530,7 +5530,7 @@ address generate_avx_ghash_processBlocks() {
__ evmovdquq(xmm2, Address(encode_table, 0), Assembler::AVX_512bit);
__ evpbroadcastq(xmm1, rax, Assembler::AVX_512bit);
__ align(32);
__ align32();
__ BIND(L_vbmiLoop);
__ vpermb(xmm0, xmm3, Address(source, start_offset), Assembler::AVX_512bit);
@ -5730,7 +5730,7 @@ address generate_avx_ghash_processBlocks() {
__ cmpl(length, 31);
__ jcc(Assembler::belowEqual, L_process3);
__ align(32);
__ align32();
__ BIND(L_32byteLoop);
// Get next 32 bytes
@ -6177,7 +6177,7 @@ address generate_avx_ghash_processBlocks() {
__ evmovdquq(join12, ExternalAddress(StubRoutines::x86::base64_vbmi_join_1_2_addr()), Assembler::AVX_512bit, r13);
__ evmovdquq(join23, ExternalAddress(StubRoutines::x86::base64_vbmi_join_2_3_addr()), Assembler::AVX_512bit, r13);
__ align(32);
__ align32();
__ BIND(L_process256);
// Grab input data
__ evmovdquq(input0, Address(source, start_offset, Address::times_1, 0x00), Assembler::AVX_512bit);
@ -6259,7 +6259,7 @@ address generate_avx_ghash_processBlocks() {
__ cmpl(length, 63);
__ jcc(Assembler::lessEqual, L_finalBit);
__ align(32);
__ align32();
__ BIND(L_process64Loop);
// Handle first 64-byte block
@ -6395,7 +6395,7 @@ address generate_avx_ghash_processBlocks() {
__ shrq(rax, 1);
__ jmp(L_donePadding);
__ align(32);
__ align32();
__ BIND(L_bruteForce);
} // End of if(avx512_vbmi)
@ -6439,7 +6439,7 @@ address generate_avx_ghash_processBlocks() {
__ jmp(L_bottomLoop);
__ align(32);
__ align32();
__ BIND(L_forceLoop);
__ shll(byte1, 18);
__ shll(byte2, 12);