// BEGIN Generated code -- do not edit // Generated by aarch64-asmtest.py Label back, forth; __ bind(back); // ArithOp __ add(r26, r23, r13, Assembler::LSL, 32); // add x26, x23, x13, LSL #32 __ sub(r12, r24, r9, Assembler::LSR, 37); // sub x12, x24, x9, LSR #37 __ adds(r28, r15, r8, Assembler::ASR, 39); // adds x28, x15, x8, ASR #39 __ subs(r7, r28, r30, Assembler::ASR, 57); // subs x7, x28, x30, ASR #57 __ addw(r9, r22, r27, Assembler::ASR, 15); // add w9, w22, w27, ASR #15 __ subw(r3, r13, r17, Assembler::ASR, 30); // sub w3, w13, w17, ASR #30 __ addsw(r14, r26, r8, Assembler::ASR, 17); // adds w14, w26, w8, ASR #17 __ subsw(r0, r22, r12, Assembler::ASR, 21); // subs w0, w22, w12, ASR #21 __ andr(r0, r15, r26, Assembler::LSL, 20); // and x0, x15, x26, LSL #20 __ orr(r26, r5, r17, Assembler::LSL, 61); // orr x26, x5, x17, LSL #61 __ eor(r24, r13, r2, Assembler::LSL, 32); // eor x24, x13, x2, LSL #32 __ ands(r28, r3, r17, Assembler::ASR, 35); // ands x28, x3, x17, ASR #35 __ andw(r25, r16, r29, Assembler::LSR, 18); // and w25, w16, w29, LSR #18 __ orrw(r13, r17, r11, Assembler::LSR, 9); // orr w13, w17, w11, LSR #9 __ eorw(r5, r5, r17, Assembler::LSR, 15); // eor w5, w5, w17, LSR #15 __ andsw(r2, r23, r27, Assembler::ASR, 26); // ands w2, w23, w27, ASR #26 __ bic(r27, r28, r16, Assembler::LSR, 45); // bic x27, x28, x16, LSR #45 __ orn(r8, r25, r26, Assembler::ASR, 37); // orn x8, x25, x26, ASR #37 __ eon(r29, r17, r13, Assembler::LSR, 63); // eon x29, x17, x13, LSR #63 __ bics(r28, r24, r2, Assembler::LSR, 31); // bics x28, x24, x2, LSR #31 __ bicw(r19, r26, r7, Assembler::ASR, 3); // bic w19, w26, w7, ASR #3 __ ornw(r6, r24, r10, Assembler::ASR, 3); // orn w6, w24, w10, ASR #3 __ eonw(r4, r21, r1, Assembler::LSR, 29); // eon w4, w21, w1, LSR #29 __ bicsw(r16, r21, r0, Assembler::LSR, 19); // bics w16, w21, w0, LSR #19 // AddSubImmOp __ addw(r17, r12, 379u); // add w17, w12, #379 __ addsw(r30, r1, 22u); // adds w30, w1, #22 __ subw(r29, r5, 126u); // sub w29, w5, #126 __ subsw(r6, r24, 960u); // subs w6, w24, #960 __ add(r0, r13, 104u); // add x0, x13, #104 __ adds(r8, r6, 663u); // adds x8, x6, #663 __ sub(r10, r5, 516u); // sub x10, x5, #516 __ subs(r1, r3, 1012u); // subs x1, x3, #1012 // LogicalImmOp __ andw(r6, r11, 4294049777ull); // and w6, w11, #0xfff1fff1 __ orrw(r28, r5, 4294966791ull); // orr w28, w5, #0xfffffe07 __ eorw(r1, r20, 134217216ull); // eor w1, w20, #0x7fffe00 __ andsw(r7, r17, 1048576ull); // ands w7, w17, #0x100000 __ andr(r14, r12, 9223372036854775808ull); // and x14, x12, #0x8000000000000000 __ orr(r9, r11, 562675075514368ull); // orr x9, x11, #0x1ffc000000000 __ eor(r17, r0, 18014398509481728ull); // eor x17, x0, #0x3fffffffffff00 __ ands(r1, r8, 18446744073705357315ull); // ands x1, x8, #0xffffffffffc00003 // AbsOp __ b(__ pc()); // b . __ b(back); // b back __ b(forth); // b forth __ bl(__ pc()); // bl . __ bl(back); // bl back __ bl(forth); // bl forth // RegAndAbsOp __ cbzw(r10, __ pc()); // cbz w10, . __ cbzw(r10, back); // cbz w10, back __ cbzw(r10, forth); // cbz w10, forth __ cbnzw(r8, __ pc()); // cbnz w8, . __ cbnzw(r8, back); // cbnz w8, back __ cbnzw(r8, forth); // cbnz w8, forth __ cbz(r11, __ pc()); // cbz x11, . __ cbz(r11, back); // cbz x11, back __ cbz(r11, forth); // cbz x11, forth __ cbnz(r29, __ pc()); // cbnz x29, . __ cbnz(r29, back); // cbnz x29, back __ cbnz(r29, forth); // cbnz x29, forth __ adr(r19, __ pc()); // adr x19, . __ adr(r19, back); // adr x19, back __ adr(r19, forth); // adr x19, forth __ _adrp(r19, __ pc()); // adrp x19, . // RegImmAbsOp __ tbz(r22, 6, __ pc()); // tbz x22, #6, . __ tbz(r22, 6, back); // tbz x22, #6, back __ tbz(r22, 6, forth); // tbz x22, #6, forth __ tbnz(r12, 11, __ pc()); // tbnz x12, #11, . __ tbnz(r12, 11, back); // tbnz x12, #11, back __ tbnz(r12, 11, forth); // tbnz x12, #11, forth // MoveWideImmOp __ movnw(r0, 6301, 0); // movn w0, #6301, lsl 0 __ movzw(r7, 20886, 0); // movz w7, #20886, lsl 0 __ movkw(r27, 18617, 0); // movk w27, #18617, lsl 0 __ movn(r12, 22998, 16); // movn x12, #22998, lsl 16 __ movz(r20, 1532, 16); // movz x20, #1532, lsl 16 __ movk(r8, 5167, 32); // movk x8, #5167, lsl 32 // BitfieldOp __ sbfm(r15, r17, 24, 28); // sbfm x15, x17, #24, #28 __ bfmw(r15, r9, 14, 25); // bfm w15, w9, #14, #25 __ ubfmw(r27, r25, 6, 31); // ubfm w27, w25, #6, #31 __ sbfm(r19, r2, 23, 31); // sbfm x19, x2, #23, #31 __ bfm(r12, r21, 10, 6); // bfm x12, x21, #10, #6 __ ubfm(r22, r0, 26, 16); // ubfm x22, x0, #26, #16 // ExtractOp __ extrw(r3, r3, r20, 27); // extr w3, w3, w20, #27 __ extr(r8, r30, r3, 54); // extr x8, x30, x3, #54 // CondBranchOp __ br(Assembler::EQ, __ pc()); // b.EQ . __ br(Assembler::EQ, back); // b.EQ back __ br(Assembler::EQ, forth); // b.EQ forth __ br(Assembler::NE, __ pc()); // b.NE . __ br(Assembler::NE, back); // b.NE back __ br(Assembler::NE, forth); // b.NE forth __ br(Assembler::HS, __ pc()); // b.HS . __ br(Assembler::HS, back); // b.HS back __ br(Assembler::HS, forth); // b.HS forth __ br(Assembler::CS, __ pc()); // b.CS . __ br(Assembler::CS, back); // b.CS back __ br(Assembler::CS, forth); // b.CS forth __ br(Assembler::LO, __ pc()); // b.LO . __ br(Assembler::LO, back); // b.LO back __ br(Assembler::LO, forth); // b.LO forth __ br(Assembler::CC, __ pc()); // b.CC . __ br(Assembler::CC, back); // b.CC back __ br(Assembler::CC, forth); // b.CC forth __ br(Assembler::MI, __ pc()); // b.MI . __ br(Assembler::MI, back); // b.MI back __ br(Assembler::MI, forth); // b.MI forth __ br(Assembler::PL, __ pc()); // b.PL . __ br(Assembler::PL, back); // b.PL back __ br(Assembler::PL, forth); // b.PL forth __ br(Assembler::VS, __ pc()); // b.VS . __ br(Assembler::VS, back); // b.VS back __ br(Assembler::VS, forth); // b.VS forth __ br(Assembler::VC, __ pc()); // b.VC . __ br(Assembler::VC, back); // b.VC back __ br(Assembler::VC, forth); // b.VC forth __ br(Assembler::HI, __ pc()); // b.HI . __ br(Assembler::HI, back); // b.HI back __ br(Assembler::HI, forth); // b.HI forth __ br(Assembler::LS, __ pc()); // b.LS . __ br(Assembler::LS, back); // b.LS back __ br(Assembler::LS, forth); // b.LS forth __ br(Assembler::GE, __ pc()); // b.GE . __ br(Assembler::GE, back); // b.GE back __ br(Assembler::GE, forth); // b.GE forth __ br(Assembler::LT, __ pc()); // b.LT . __ br(Assembler::LT, back); // b.LT back __ br(Assembler::LT, forth); // b.LT forth __ br(Assembler::GT, __ pc()); // b.GT . __ br(Assembler::GT, back); // b.GT back __ br(Assembler::GT, forth); // b.GT forth __ br(Assembler::LE, __ pc()); // b.LE . __ br(Assembler::LE, back); // b.LE back __ br(Assembler::LE, forth); // b.LE forth __ br(Assembler::AL, __ pc()); // b.AL . __ br(Assembler::AL, back); // b.AL back __ br(Assembler::AL, forth); // b.AL forth __ br(Assembler::NV, __ pc()); // b.NV . __ br(Assembler::NV, back); // b.NV back __ br(Assembler::NV, forth); // b.NV forth // ImmOp __ svc(12999); // svc #12999 __ hvc(2665); // hvc #2665 __ smc(9002); // smc #9002 __ brk(14843); // brk #14843 __ hlt(25964); // hlt #25964 // Op __ nop(); // nop __ yield(); // yield __ wfe(); // wfe __ sev(); // sev __ sevl(); // sevl __ autia1716(); // autia1716 __ autiasp(); // autiasp __ autiaz(); // autiaz __ autib1716(); // autib1716 __ autibsp(); // autibsp __ autibz(); // autibz __ pacia1716(); // pacia1716 __ paciasp(); // paciasp __ paciaz(); // paciaz __ pacib1716(); // pacib1716 __ pacibsp(); // pacibsp __ pacibz(); // pacibz __ eret(); // eret __ drps(); // drps __ isb(); // isb // PostfixExceptionOp __ wfi(); // wfi __ xpaclri(); // xpaclri // SystemOp __ dsb(Assembler::ST); // dsb ST __ dmb(Assembler::OSHST); // dmb OSHST // OneRegOp __ br(r16); // br x16 __ blr(r20); // blr x20 __ paciza(r10); // paciza x10 __ pacizb(r27); // pacizb x27 __ pacdza(r8); // pacdza x8 __ pacdzb(r0); // pacdzb x0 __ autiza(r1); // autiza x1 __ autizb(r21); // autizb x21 __ autdza(r17); // autdza x17 __ autdzb(r29); // autdzb x29 __ xpacd(r29); // xpacd x29 __ braaz(r28); // braaz x28 __ brabz(r1); // brabz x1 __ blraaz(r23); // blraaz x23 __ blrabz(r21); // blrabz x21 // SystemOneRegOp __ msr(3, 4, 4, 1, r20); // msr fpsr, x20 // SystemOneRegOp __ msr(3, 4, 2, 0, r22); // msr nzcv, x22 // OneRegSystemOp __ mrs(3, 4, 4, 1, r27); // mrs x27, fpsr // OneRegSystemOp __ mrs(3, 4, 2, 0, r19); // mrs x19, nzcv // OneRegSystemOp __ mrs(3, 0, 0, 7, r11); // mrs x11, dczid_el0 // OneRegSystemOp __ mrs(3, 0, 0, 1, r16); // mrs x16, ctr_el0 // PostfixExceptionOneRegOp __ xpaci(r6); // xpaci x6 // LoadStoreExclusiveOp __ stxr(r17, r0, r4); // stxr w17, x0, [x4] __ stlxr(r10, r24, r22); // stlxr w10, x24, [x22] __ ldxr(r10, r19); // ldxr x10, [x19] __ ldaxr(r1, r5); // ldaxr x1, [x5] __ stlr(r30, r8); // stlr x30, [x8] __ ldar(r12, r17); // ldar x12, [x17] // LoadStoreExclusiveOp __ stxrw(r9, r14, r7); // stxr w9, w14, [x7] __ stlxrw(r1, r5, r16); // stlxr w1, w5, [x16] __ ldxrw(r2, r12); // ldxr w2, [x12] __ ldaxrw(r10, r12); // ldaxr w10, [x12] __ stlrw(r3, r28); // stlr w3, [x28] __ ldarw(r14, r26); // ldar w14, [x26] // LoadStoreExclusiveOp __ stxrh(r30, r10, r14); // stxrh w30, w10, [x14] __ stlxrh(r21, r13, r9); // stlxrh w21, w13, [x9] __ ldxrh(r22, r27); // ldxrh w22, [x27] __ ldaxrh(r28, r19); // ldaxrh w28, [x19] __ stlrh(r11, r30); // stlrh w11, [x30] __ ldarh(r19, r2); // ldarh w19, [x2] // LoadStoreExclusiveOp __ stxrb(r2, r23, r1); // stxrb w2, w23, [x1] __ stlxrb(r0, r12, r16); // stlxrb w0, w12, [x16] __ ldxrb(r13, r15); // ldxrb w13, [x15] __ ldaxrb(r17, r21); // ldaxrb w17, [x21] __ stlrb(r13, r11); // stlrb w13, [x11] __ ldarb(r30, r8); // ldarb w30, [x8] // LoadStoreExclusiveOp __ ldxp(r24, r13, r11); // ldxp x24, x13, [x11] __ ldaxp(r1, r26, r21); // ldaxp x1, x26, [x21] __ stxp(r27, r13, r20, r3); // stxp w27, x13, x20, [x3] __ stlxp(r12, r6, r1, r29); // stlxp w12, x6, x1, [x29] // LoadStoreExclusiveOp __ ldxpw(r6, r4, r11); // ldxp w6, w4, [x11] __ ldaxpw(r16, r4, r30); // ldaxp w16, w4, [x30] __ stxpw(r30, r4, r12, r21); // stxp w30, w4, w12, [x21] __ stlxpw(r27, r15, r28, r9); // stlxp w27, w15, w28, [x9] // base_plus_unscaled_offset // LoadStoreOp __ str(r25, Address(r15, 1)); // str x25, [x15, 1] __ strw(r2, Address(r1, -79)); // str w2, [x1, -79] __ strb(r20, Address(r26, -22)); // strb w20, [x26, -22] __ strh(r23, Address(r30, 22)); // strh w23, [x30, 22] __ ldr(r26, Address(r28, -49)); // ldr x26, [x28, -49] __ ldrw(r9, Address(r24, -128)); // ldr w9, [x24, -128] __ ldrb(r12, Address(r12, -30)); // ldrb w12, [x12, -30] __ ldrh(r1, Address(r15, 5)); // ldrh w1, [x15, 5] __ ldrsb(r24, Address(r14, -31)); // ldrsb x24, [x14, -31] __ ldrsh(r24, Address(r15, -6)); // ldrsh x24, [x15, -6] __ ldrshw(r5, Address(r3, 12)); // ldrsh w5, [x3, 12] __ ldrsw(r27, Address(r24, 17)); // ldrsw x27, [x24, 17] __ ldrd(v13, Address(r29, -35)); // ldr d13, [x29, -35] __ ldrs(v22, Address(r9, -47)); // ldr s22, [x9, -47] __ strd(v11, Address(r0, 9)); // str d11, [x0, 9] __ strs(v20, Address(r0, -127)); // str s20, [x0, -127] // pre // LoadStoreOp __ str(r29, Address(__ pre(r3, -114))); // str x29, [x3, -114]! __ strw(r17, Address(__ pre(r4, -72))); // str w17, [x4, -72]! __ strb(r0, Address(__ pre(r2, -17))); // strb w0, [x2, -17]! __ strh(r29, Address(__ pre(r1, 7))); // strh w29, [x1, 7]! __ ldr(r16, Address(__ pre(r21, -133))); // ldr x16, [x21, -133]! __ ldrw(r20, Address(__ pre(r14, 19))); // ldr w20, [x14, 19]! __ ldrb(r22, Address(__ pre(r14, -3))); // ldrb w22, [x14, -3]! __ ldrh(r15, Address(__ pre(r17, 9))); // ldrh w15, [x17, 9]! __ ldrsb(r10, Address(__ pre(r15, -19))); // ldrsb x10, [x15, -19]! __ ldrsh(r20, Address(__ pre(r12, -25))); // ldrsh x20, [x12, -25]! __ ldrshw(r21, Address(__ pre(r10, -29))); // ldrsh w21, [x10, -29]! __ ldrsw(r19, Address(__ pre(r0, 5))); // ldrsw x19, [x0, 5]! __ ldrd(v0, Address(__ pre(r14, -54))); // ldr d0, [x14, -54]! __ ldrs(v3, Address(__ pre(r1, 40))); // ldr s3, [x1, 40]! __ strd(v4, Address(__ pre(r14, -94))); // str d4, [x14, -94]! __ strs(v17, Address(__ pre(r28, -54))); // str s17, [x28, -54]! // post // LoadStoreOp __ str(r22, Address(__ post(r15, -185))); // str x22, [x15], -185 __ strw(r17, Address(__ post(r14, -7))); // str w17, [x14], -7 __ strb(r30, Address(__ post(r11, -25))); // strb w30, [x11], -25 __ strh(r1, Address(__ post(r11, 20))); // strh w1, [x11], 20 __ ldr(r22, Address(__ post(r1, 2))); // ldr x22, [x1], 2 __ ldrw(r2, Address(__ post(r23, -119))); // ldr w2, [x23], -119 __ ldrb(r3, Address(__ post(r27, -12))); // ldrb w3, [x27], -12 __ ldrh(r16, Address(__ post(r7, -37))); // ldrh w16, [x7], -37 __ ldrsb(r15, Address(__ post(r26, 3))); // ldrsb x15, [x26], 3 __ ldrsh(r7, Address(__ post(r15, -30))); // ldrsh x7, [x15], -30 __ ldrshw(r3, Address(__ post(r11, -48))); // ldrsh w3, [x11], -48 __ ldrsw(r25, Address(__ post(r23, 22))); // ldrsw x25, [x23], 22 __ ldrd(v0, Address(__ post(r10, -215))); // ldr d0, [x10], -215 __ ldrs(v17, Address(__ post(r6, 55))); // ldr s17, [x6], 55 __ strd(v13, Address(__ post(r21, -234))); // str d13, [x21], -234 __ strs(v0, Address(__ post(r22, -70))); // str s0, [x22], -70 // base_plus_reg // LoadStoreOp __ str(r27, Address(r19, r0, Address::sxtx(0))); // str x27, [x19, x0, sxtx #0] __ strw(r8, Address(r6, r13, Address::lsl(0))); // str w8, [x6, x13, lsl #0] __ strb(r4, Address(r16, r22, Address::lsl(0))); // strb w4, [x16, x22, lsl #0] __ strh(r25, Address(r26, r15, Address::uxtw(0))); // strh w25, [x26, w15, uxtw #0] __ ldr(r4, Address(r5, r24, Address::sxtw(0))); // ldr x4, [x5, w24, sxtw #0] __ ldrw(r4, Address(r17, r7, Address::uxtw(0))); // ldr w4, [x17, w7, uxtw #0] __ ldrb(r17, Address(r7, r11, Address::lsl(0))); // ldrb w17, [x7, x11, lsl #0] __ ldrh(r0, Address(r30, r23, Address::lsl(0))); // ldrh w0, [x30, x23, lsl #0] __ ldrsb(r10, Address(r22, r1, Address::uxtw(0))); // ldrsb x10, [x22, w1, uxtw #0] __ ldrsh(r21, Address(r30, r30, Address::sxtw(1))); // ldrsh x21, [x30, w30, sxtw #1] __ ldrshw(r11, Address(r10, r28, Address::sxtw(1))); // ldrsh w11, [x10, w28, sxtw #1] __ ldrsw(r28, Address(r19, r10, Address::uxtw(0))); // ldrsw x28, [x19, w10, uxtw #0] __ ldrd(v29, Address(r29, r14, Address::sxtw(0))); // ldr d29, [x29, w14, sxtw #0] __ ldrs(v8, Address(r5, r5, Address::sxtw(2))); // ldr s8, [x5, w5, sxtw #2] __ strd(v24, Address(r8, r13, Address::sxtx(0))); // str d24, [x8, x13, sxtx #0] __ strs(v17, Address(r24, r26, Address::lsl(2))); // str s17, [x24, x26, lsl #2] // base_plus_scaled_offset // LoadStoreOp __ str(r19, Address(r12, 15904)); // str x19, [x12, 15904] __ strw(r23, Address(r15, 7892)); // str w23, [x15, 7892] __ strb(r29, Address(r13, 1970)); // strb w29, [x13, 1970] __ strh(r11, Address(r7, 3094)); // strh w11, [x7, 3094] __ ldr(r10, Address(r24, 14992)); // ldr x10, [x24, 14992] __ ldrw(r16, Address(r0, 6160)); // ldr w16, [x0, 6160] __ ldrb(r20, Address(r1, 2032)); // ldrb w20, [x1, 2032] __ ldrh(r1, Address(r17, 4056)); // ldrh w1, [x17, 4056] __ ldrsb(r17, Address(r25, 1889)); // ldrsb x17, [x25, 1889] __ ldrsh(r27, Address(r25, 3964)); // ldrsh x27, [x25, 3964] __ ldrshw(r14, Address(r17, 3724)); // ldrsh w14, [x17, 3724] __ ldrsw(r10, Address(r7, 6372)); // ldrsw x10, [x7, 6372] __ ldrd(v3, Address(r25, 12392)); // ldr d3, [x25, 12392] __ ldrs(v12, Address(r9, 7840)); // ldr s12, [x9, 7840] __ strd(v23, Address(r1, 12728)); // str d23, [x1, 12728] __ strs(v3, Address(r20, 6924)); // str s3, [x20, 6924] // pcrel // LoadStoreOp __ ldr(r2, back); // ldr x2, back __ ldrw(r29, __ pc()); // ldr w29, . // LoadStoreOp __ prfm(Address(r14, 93)); // prfm PLDL1KEEP, [x14, 93] // LoadStoreOp __ prfm(back); // prfm PLDL1KEEP, back // LoadStoreOp __ prfm(Address(r1, r7, Address::lsl(3))); // prfm PLDL1KEEP, [x1, x7, lsl #3] // LoadStoreOp __ prfm(Address(r17, 12288)); // prfm PLDL1KEEP, [x17, 12288] // AddSubCarryOp __ adcw(r1, r24, r3); // adc w1, w24, w3 __ adcsw(r17, r24, r20); // adcs w17, w24, w20 __ sbcw(r11, r0, r13); // sbc w11, w0, w13 __ sbcsw(r28, r10, r7); // sbcs w28, w10, w7 __ adc(r4, r15, r16); // adc x4, x15, x16 __ adcs(r2, r12, r20); // adcs x2, x12, x20 __ sbc(r29, r13, r13); // sbc x29, x13, x13 __ sbcs(r14, r6, r12); // sbcs x14, x6, x12 // AddSubExtendedOp __ addw(r20, r12, r17, ext::sxtx, 4); // add w20, w12, w17, sxtx #4 __ addsw(r27, r11, r0, ext::uxtx, 3); // adds w27, w11, w0, uxtx #3 __ sub(r7, r1, r9, ext::sxtx, 4); // sub x7, x1, x9, sxtx #4 __ subsw(r3, r27, r1, ext::uxtb, 3); // subs w3, w27, w1, uxtb #3 __ add(r13, r26, r12, ext::sxth, 4); // add x13, x26, x12, sxth #4 __ adds(r17, r5, r10, ext::sxtb, 2); // adds x17, x5, x10, sxtb #2 __ sub(r30, r8, r15, ext::uxtw, 4); // sub x30, x8, x15, uxtw #4 __ subs(r19, r23, r19, ext::uxth, 4); // subs x19, x23, x19, uxth #4 // ConditionalCompareOp __ ccmnw(r29, r5, 10u, Assembler::LO); // ccmn w29, w5, #10, LO __ ccmpw(r9, r13, 11u, Assembler::LO); // ccmp w9, w13, #11, LO __ ccmn(r10, r4, 6u, Assembler::HS); // ccmn x10, x4, #6, HS __ ccmp(r12, r2, 12u, Assembler::HI); // ccmp x12, x2, #12, HI // ConditionalCompareImmedOp __ ccmnw(r16, 6, 2, Assembler::VS); // ccmn w16, #6, #2, VS __ ccmpw(r7, 11, 13, Assembler::VS); // ccmp w7, #11, #13, VS __ ccmn(r27, 10, 11, Assembler::LS); // ccmn x27, #10, #11, LS __ ccmp(r3, 13, 13, Assembler::LE); // ccmp x3, #13, #13, LE // ConditionalSelectOp __ cselw(r26, r27, r10, Assembler::VS); // csel w26, w27, w10, VS __ csincw(r10, r21, r28, Assembler::LE); // csinc w10, w21, w28, LE __ csinvw(r23, r9, r27, Assembler::LE); // csinv w23, w9, w27, LE __ csnegw(r10, r29, r15, Assembler::LE); // csneg w10, w29, w15, LE __ csel(r30, r25, r21, Assembler::HS); // csel x30, x25, x21, HS __ csinc(r0, r17, r21, Assembler::GT); // csinc x0, x17, x21, GT __ csinv(r16, r21, r20, Assembler::CS); // csinv x16, x21, x20, CS __ csneg(r19, r30, r3, Assembler::LS); // csneg x19, x30, x3, LS // TwoRegOp __ rbitw(r19, r11); // rbit w19, w11 __ rev16w(r24, r0); // rev16 w24, w0 __ revw(r27, r25); // rev w27, w25 __ clzw(r14, r3); // clz w14, w3 __ clsw(r14, r17); // cls w14, w17 __ rbit(r7, r15); // rbit x7, x15 __ rev16(r24, r28); // rev16 x24, x28 __ rev32(r17, r25); // rev32 x17, x25 __ rev(r2, r26); // rev x2, x26 __ clz(r28, r5); // clz x28, x5 __ cls(r25, r26); // cls x25, x26 __ pacia(r27, r16); // pacia x27, x16 __ pacib(r17, r6); // pacib x17, x6 __ pacda(r21, r12); // pacda x21, x12 __ pacdb(r0, r4); // pacdb x0, x4 __ autia(r12, r27); // autia x12, x27 __ autib(r17, r28); // autib x17, x28 __ autda(r28, r2); // autda x28, x2 __ autdb(r17, r10); // autdb x17, x10 __ braa(r15, r14); // braa x15, x14 __ brab(r14, r3); // brab x14, x3 __ blraa(r25, r15); // blraa x25, x15 __ blrab(r19, r14); // blrab x19, x14 // ThreeRegOp __ udivw(r5, r16, r4); // udiv w5, w16, w4 __ sdivw(r26, r25, r4); // sdiv w26, w25, w4 __ lslvw(r2, r2, r12); // lslv w2, w2, w12 __ lsrvw(r29, r17, r8); // lsrv w29, w17, w8 __ asrvw(r7, r3, r4); // asrv w7, w3, w4 __ rorvw(r25, r4, r26); // rorv w25, w4, w26 __ udiv(r25, r4, r17); // udiv x25, x4, x17 __ sdiv(r0, r26, r17); // sdiv x0, x26, x17 __ lslv(r23, r15, r21); // lslv x23, x15, x21 __ lsrv(r28, r17, r27); // lsrv x28, x17, x27 __ asrv(r10, r3, r0); // asrv x10, x3, x0 __ rorv(r7, r25, r9); // rorv x7, x25, x9 __ umulh(r6, r15, r29); // umulh x6, x15, x29 __ smulh(r15, r10, r2); // smulh x15, x10, x2 // FourRegMulOp __ maddw(r17, r7, r11, r11); // madd w17, w7, w11, w11 __ msubw(r23, r7, r29, r23); // msub w23, w7, w29, w23 __ madd(r14, r27, r11, r11); // madd x14, x27, x11, x11 __ msub(r4, r24, r12, r15); // msub x4, x24, x12, x15 __ smaddl(r14, r20, r11, r28); // smaddl x14, w20, w11, x28 __ smsubl(r13, r11, r12, r23); // smsubl x13, w11, w12, x23 __ umaddl(r30, r26, r14, r9); // umaddl x30, w26, w14, x9 __ umsubl(r13, r10, r7, r5); // umsubl x13, w10, w7, x5 // ThreeRegFloatOp __ fabds(v29, v15, v3); // fabd s29, s15, s3 __ fmuls(v11, v12, v15); // fmul s11, s12, s15 __ fdivs(v30, v30, v17); // fdiv s30, s30, s17 __ fadds(v19, v20, v15); // fadd s19, s20, s15 __ fsubs(v15, v9, v21); // fsub s15, s9, s21 __ fabdd(v2, v9, v27); // fabd d2, d9, d27 __ fmuld(v7, v29, v30); // fmul d7, d29, d30 __ fdivd(v17, v1, v2); // fdiv d17, d1, d2 __ faddd(v6, v10, v3); // fadd d6, d10, d3 __ fsubd(v24, v11, v7); // fsub d24, d11, d7 // FourRegFloatOp __ fmadds(v1, v11, v0, v3); // fmadd s1, s11, s0, s3 __ fmsubs(v17, v28, v6, v22); // fmsub s17, s28, s6, s22 __ fnmadds(v6, v0, v27, v26); // fnmadd s6, s0, s27, s26 __ fnmadds(v2, v5, v7, v28); // fnmadd s2, s5, s7, s28 __ fmaddd(v11, v25, v13, v11); // fmadd d11, d25, d13, d11 __ fmsubd(v23, v19, v8, v17); // fmsub d23, d19, d8, d17 __ fnmaddd(v21, v25, v20, v19); // fnmadd d21, d25, d20, d19 __ fnmaddd(v17, v2, v29, v22); // fnmadd d17, d2, d29, d22 // TwoRegFloatOp __ fmovs(v8, v21); // fmov s8, s21 __ fabss(v19, v20); // fabs s19, s20 __ fnegs(v11, v17); // fneg s11, s17 __ fsqrts(v20, v6); // fsqrt s20, s6 __ fcvts(v15, v3); // fcvt d15, s3 __ fcvtsh(v3, v28); // fcvt h3, s28 __ fcvths(v3, v27); // fcvt s3, h27 __ fmovd(v14, v14); // fmov d14, d14 __ fabsd(v10, v12); // fabs d10, d12 __ fnegd(v11, v17); // fneg d11, d17 __ fsqrtd(v10, v25); // fsqrt d10, d25 __ fcvtd(v7, v7); // fcvt s7, d7 // FloatConvertOp __ fcvtzsw(r14, v28); // fcvtzs w14, s28 __ fcvtzs(r0, v22); // fcvtzs x0, s22 __ fcvtzdw(r0, v12); // fcvtzs w0, d12 __ fcvtzd(r23, v13); // fcvtzs x23, d13 __ scvtfws(v13, r7); // scvtf s13, w7 __ scvtfs(v14, r7); // scvtf s14, x7 __ scvtfwd(v8, r20); // scvtf d8, w20 __ scvtfd(v17, r28); // scvtf d17, x28 __ fcvtassw(r30, v16); // fcvtas w30, s16 __ fcvtasd(r2, v9); // fcvtas x2, d9 __ fcvtmssw(r16, v20); // fcvtms w16, s20 __ fcvtmsd(r29, v4); // fcvtms x29, d4 __ fmovs(r1, v26); // fmov w1, s26 __ fmovd(r24, v23); // fmov x24, d23 __ fmovs(v14, r21); // fmov s14, w21 __ fmovd(v12, r5); // fmov d12, x5 // TwoRegFloatOp __ fcmps(v12, v24); // fcmp s12, s24 __ fcmpd(v24, v29); // fcmp d24, d29 __ fcmps(v27, 0.0); // fcmp s27, #0.0 __ fcmpd(v21, 0.0); // fcmp d21, #0.0 // LoadStorePairOp __ stpw(r22, r5, Address(r28, -48)); // stp w22, w5, [x28, #-48] __ ldpw(r19, r27, Address(r19, 16)); // ldp w19, w27, [x19, #16] __ ldpsw(r28, r26, Address(r7, -32)); // ldpsw x28, x26, [x7, #-32] __ stp(r6, r1, Address(r4, -48)); // stp x6, x1, [x4, #-48] __ ldp(r26, r23, Address(r21, -80)); // ldp x26, x23, [x21, #-80] // LoadStorePairOp __ stpw(r20, r30, Address(__ pre(r9, -96))); // stp w20, w30, [x9, #-96]! __ ldpw(r13, r20, Address(__ pre(r26, 16))); // ldp w13, w20, [x26, #16]! __ ldpsw(r29, r11, Address(__ pre(r13, -80))); // ldpsw x29, x11, [x13, #-80]! __ stp(r27, r21, Address(__ pre(r5, -48))); // stp x27, x21, [x5, #-48]! __ ldp(r6, r0, Address(__ pre(r30, 80))); // ldp x6, x0, [x30, #80]! // LoadStorePairOp __ stpw(r19, r15, Address(__ post(r16, -208))); // stp w19, w15, [x16], #-208 __ ldpw(r12, r23, Address(__ post(r9, -240))); // ldp w12, w23, [x9], #-240 __ ldpsw(r0, r26, Address(__ post(r15, 32))); // ldpsw x0, x26, [x15], #32 __ stp(r8, r17, Address(__ post(r26, -208))); // stp x8, x17, [x26], #-208 __ ldp(r25, r7, Address(__ post(r2, -176))); // ldp x25, x7, [x2], #-176 // LoadStorePairOp __ stnpw(r19, r17, Address(r1, -208)); // stnp w19, w17, [x1, #-208] __ ldnpw(r0, r13, Address(r22, 128)); // ldnp w0, w13, [x22, #128] __ stnp(r29, r23, Address(r27, 0)); // stnp x29, x23, [x27, #0] __ ldnp(r11, r10, Address(r8, -224)); // ldnp x11, x10, [x8, #-224] // LdStNEONOp __ ld1(v0, __ T8B, Address(r11)); // ld1 {v0.8B}, [x11] __ ld1(v16, v17, __ T16B, Address(__ post(r26, 32))); // ld1 {v16.16B, v17.16B}, [x26], 32 __ ld1(v21, v22, v23, __ T1D, Address(__ post(r26, r17))); // ld1 {v21.1D, v22.1D, v23.1D}, [x26], x17 __ ld1(v26, v27, v28, v29, __ T8H, Address(__ post(r29, 64))); // ld1 {v26.8H, v27.8H, v28.8H, v29.8H}, [x29], 64 __ ld1r(v21, __ T8B, Address(r6)); // ld1r {v21.8B}, [x6] __ ld1r(v13, __ T4S, Address(__ post(r29, 4))); // ld1r {v13.4S}, [x29], 4 __ ld1r(v21, __ T1D, Address(__ post(r12, r16))); // ld1r {v21.1D}, [x12], x16 __ ld2(v1, v2, __ T2D, Address(r0)); // ld2 {v1.2D, v2.2D}, [x0] __ ld2(v9, v10, __ T4H, Address(__ post(r21, 16))); // ld2 {v9.4H, v10.4H}, [x21], 16 __ ld2r(v7, v8, __ T16B, Address(r25)); // ld2r {v7.16B, v8.16B}, [x25] __ ld2r(v8, v9, __ T2S, Address(__ post(r9, 8))); // ld2r {v8.2S, v9.2S}, [x9], 8 __ ld2r(v9, v10, __ T2D, Address(__ post(r12, r14))); // ld2r {v9.2D, v10.2D}, [x12], x14 __ ld3(v7, v8, v9, __ T4S, Address(__ post(r4, r17))); // ld3 {v7.4S, v8.4S, v9.4S}, [x4], x17 __ ld3(v23, v24, v25, __ T2S, Address(r17)); // ld3 {v23.2S, v24.2S, v25.2S}, [x17] __ ld3r(v3, v4, v5, __ T8H, Address(r22)); // ld3r {v3.8H, v4.8H, v5.8H}, [x22] __ ld3r(v12, v13, v14, __ T4S, Address(__ post(r2, 12))); // ld3r {v12.4S, v13.4S, v14.4S}, [x2], 12 __ ld3r(v15, v16, v17, __ T1D, Address(__ post(r10, r12))); // ld3r {v15.1D, v16.1D, v17.1D}, [x10], x12 __ ld4(v4, v5, v6, v7, __ T8H, Address(__ post(r2, 64))); // ld4 {v4.8H, v5.8H, v6.8H, v7.8H}, [x2], 64 __ ld4(v6, v7, v8, v9, __ T8B, Address(__ post(r20, r11))); // ld4 {v6.8B, v7.8B, v8.8B, v9.8B}, [x20], x11 __ ld4r(v11, v12, v13, v14, __ T8B, Address(r12)); // ld4r {v11.8B, v12.8B, v13.8B, v14.8B}, [x12] __ ld4r(v15, v16, v17, v18, __ T4H, Address(__ post(r17, 8))); // ld4r {v15.4H, v16.4H, v17.4H, v18.4H}, [x17], 8 __ ld4r(v14, v15, v16, v17, __ T2S, Address(__ post(r25, r16))); // ld4r {v14.2S, v15.2S, v16.2S, v17.2S}, [x25], x16 // NEONReduceInstruction __ addv(v20, __ T8B, v21); // addv b20, v21.8B __ addv(v1, __ T16B, v2); // addv b1, v2.16B __ addv(v22, __ T4H, v23); // addv h22, v23.4H __ addv(v30, __ T8H, v31); // addv h30, v31.8H __ addv(v14, __ T4S, v15); // addv s14, v15.4S __ smaxv(v2, __ T8B, v3); // smaxv b2, v3.8B __ smaxv(v6, __ T16B, v7); // smaxv b6, v7.16B __ smaxv(v3, __ T4H, v4); // smaxv h3, v4.4H __ smaxv(v7, __ T8H, v8); // smaxv h7, v8.8H __ smaxv(v24, __ T4S, v25); // smaxv s24, v25.4S __ fmaxv(v0, __ T4S, v1); // fmaxv s0, v1.4S __ sminv(v27, __ T8B, v28); // sminv b27, v28.8B __ uminv(v29, __ T8B, v30); // uminv b29, v30.8B __ sminv(v5, __ T16B, v6); // sminv b5, v6.16B __ uminv(v5, __ T16B, v6); // uminv b5, v6.16B __ sminv(v29, __ T4H, v30); // sminv h29, v30.4H __ uminv(v11, __ T4H, v12); // uminv h11, v12.4H __ sminv(v25, __ T8H, v26); // sminv h25, v26.8H __ uminv(v0, __ T8H, v1); // uminv h0, v1.8H __ sminv(v30, __ T4S, v31); // sminv s30, v31.4S __ uminv(v0, __ T4S, v1); // uminv s0, v1.4S __ fminv(v17, __ T4S, v18); // fminv s17, v18.4S __ fmaxp(v28, v29, __ S); // fmaxp s28, v29.2S __ fmaxp(v25, v26, __ D); // fmaxp d25, v26.2D __ fminp(v9, v10, __ S); // fminp s9, v10.2S __ fminp(v25, v26, __ D); // fminp d25, v26.2D // NEONFloatCompareWithZero __ fcm(Assembler::GT, v12, __ T2S, v13); // fcmgt v12.2S, v13.2S, #0.0 __ fcm(Assembler::GT, v15, __ T4S, v16); // fcmgt v15.4S, v16.4S, #0.0 __ fcm(Assembler::GT, v11, __ T2D, v12); // fcmgt v11.2D, v12.2D, #0.0 __ fcm(Assembler::GE, v10, __ T2S, v11); // fcmge v10.2S, v11.2S, #0.0 __ fcm(Assembler::GE, v17, __ T4S, v18); // fcmge v17.4S, v18.4S, #0.0 __ fcm(Assembler::GE, v24, __ T2D, v25); // fcmge v24.2D, v25.2D, #0.0 __ fcm(Assembler::EQ, v21, __ T2S, v22); // fcmeq v21.2S, v22.2S, #0.0 __ fcm(Assembler::EQ, v23, __ T4S, v24); // fcmeq v23.4S, v24.4S, #0.0 __ fcm(Assembler::EQ, v0, __ T2D, v1); // fcmeq v0.2D, v1.2D, #0.0 __ fcm(Assembler::LT, v16, __ T2S, v17); // fcmlt v16.2S, v17.2S, #0.0 __ fcm(Assembler::LT, v10, __ T4S, v11); // fcmlt v10.4S, v11.4S, #0.0 __ fcm(Assembler::LT, v6, __ T2D, v7); // fcmlt v6.2D, v7.2D, #0.0 __ fcm(Assembler::LE, v28, __ T2S, v29); // fcmle v28.2S, v29.2S, #0.0 __ fcm(Assembler::LE, v6, __ T4S, v7); // fcmle v6.4S, v7.4S, #0.0 __ fcm(Assembler::LE, v5, __ T2D, v6); // fcmle v5.2D, v6.2D, #0.0 // TwoRegNEONOp __ absr(v5, __ T8B, v6); // abs v5.8B, v6.8B __ absr(v20, __ T16B, v21); // abs v20.16B, v21.16B __ absr(v17, __ T4H, v18); // abs v17.4H, v18.4H __ absr(v15, __ T8H, v16); // abs v15.8H, v16.8H __ absr(v17, __ T2S, v18); // abs v17.2S, v18.2S __ absr(v29, __ T4S, v30); // abs v29.4S, v30.4S __ absr(v26, __ T2D, v27); // abs v26.2D, v27.2D __ fabs(v28, __ T2S, v29); // fabs v28.2S, v29.2S __ fabs(v1, __ T4S, v2); // fabs v1.4S, v2.4S __ fabs(v27, __ T2D, v28); // fabs v27.2D, v28.2D __ fneg(v0, __ T2S, v1); // fneg v0.2S, v1.2S __ fneg(v20, __ T4S, v21); // fneg v20.4S, v21.4S __ fneg(v28, __ T2D, v29); // fneg v28.2D, v29.2D __ fsqrt(v15, __ T2S, v16); // fsqrt v15.2S, v16.2S __ fsqrt(v12, __ T4S, v13); // fsqrt v12.4S, v13.4S __ fsqrt(v10, __ T2D, v11); // fsqrt v10.2D, v11.2D __ notr(v28, __ T8B, v29); // not v28.8B, v29.8B __ notr(v28, __ T16B, v29); // not v28.16B, v29.16B // ThreeRegNEONOp __ andr(v19, __ T8B, v20, v21); // and v19.8B, v20.8B, v21.8B __ andr(v22, __ T16B, v23, v24); // and v22.16B, v23.16B, v24.16B __ orr(v10, __ T8B, v11, v12); // orr v10.8B, v11.8B, v12.8B __ orr(v4, __ T16B, v5, v6); // orr v4.16B, v5.16B, v6.16B __ eor(v30, __ T8B, v31, v0); // eor v30.8B, v31.8B, v0.8B __ eor(v20, __ T16B, v21, v22); // eor v20.16B, v21.16B, v22.16B __ addv(v8, __ T8B, v9, v10); // add v8.8B, v9.8B, v10.8B __ addv(v30, __ T16B, v31, v0); // add v30.16B, v31.16B, v0.16B __ addv(v17, __ T4H, v18, v19); // add v17.4H, v18.4H, v19.4H __ addv(v10, __ T8H, v11, v12); // add v10.8H, v11.8H, v12.8H __ addv(v27, __ T2S, v28, v29); // add v27.2S, v28.2S, v29.2S __ addv(v2, __ T4S, v3, v4); // add v2.4S, v3.4S, v4.4S __ addv(v24, __ T2D, v25, v26); // add v24.2D, v25.2D, v26.2D __ fadd(v4, __ T2S, v5, v6); // fadd v4.2S, v5.2S, v6.2S __ fadd(v3, __ T4S, v4, v5); // fadd v3.4S, v4.4S, v5.4S __ fadd(v8, __ T2D, v9, v10); // fadd v8.2D, v9.2D, v10.2D __ subv(v22, __ T8B, v23, v24); // sub v22.8B, v23.8B, v24.8B __ subv(v17, __ T16B, v18, v19); // sub v17.16B, v18.16B, v19.16B __ subv(v13, __ T4H, v14, v15); // sub v13.4H, v14.4H, v15.4H __ subv(v4, __ T8H, v5, v6); // sub v4.8H, v5.8H, v6.8H __ subv(v28, __ T2S, v29, v30); // sub v28.2S, v29.2S, v30.2S __ subv(v23, __ T4S, v24, v25); // sub v23.4S, v24.4S, v25.4S __ subv(v21, __ T2D, v22, v23); // sub v21.2D, v22.2D, v23.2D __ fsub(v25, __ T2S, v26, v27); // fsub v25.2S, v26.2S, v27.2S __ fsub(v24, __ T4S, v25, v26); // fsub v24.4S, v25.4S, v26.4S __ fsub(v3, __ T2D, v4, v5); // fsub v3.2D, v4.2D, v5.2D __ mulv(v23, __ T8B, v24, v25); // mul v23.8B, v24.8B, v25.8B __ mulv(v26, __ T16B, v27, v28); // mul v26.16B, v27.16B, v28.16B __ mulv(v23, __ T4H, v24, v25); // mul v23.4H, v24.4H, v25.4H __ mulv(v14, __ T8H, v15, v16); // mul v14.8H, v15.8H, v16.8H __ mulv(v21, __ T2S, v22, v23); // mul v21.2S, v22.2S, v23.2S __ mulv(v3, __ T4S, v4, v5); // mul v3.4S, v4.4S, v5.4S __ fabd(v23, __ T2S, v24, v25); // fabd v23.2S, v24.2S, v25.2S __ fabd(v8, __ T4S, v9, v10); // fabd v8.4S, v9.4S, v10.4S __ fabd(v24, __ T2D, v25, v26); // fabd v24.2D, v25.2D, v26.2D __ faddp(v19, __ T2S, v20, v21); // faddp v19.2S, v20.2S, v21.2S __ faddp(v15, __ T4S, v16, v17); // faddp v15.4S, v16.4S, v17.4S __ faddp(v16, __ T2D, v17, v18); // faddp v16.2D, v17.2D, v18.2D __ fmul(v2, __ T2S, v3, v4); // fmul v2.2S, v3.2S, v4.2S __ fmul(v1, __ T4S, v2, v3); // fmul v1.4S, v2.4S, v3.4S __ fmul(v0, __ T2D, v1, v2); // fmul v0.2D, v1.2D, v2.2D __ mlav(v24, __ T4H, v25, v26); // mla v24.4H, v25.4H, v26.4H __ mlav(v4, __ T8H, v5, v6); // mla v4.8H, v5.8H, v6.8H __ mlav(v3, __ T2S, v4, v5); // mla v3.2S, v4.2S, v5.2S __ mlav(v11, __ T4S, v12, v13); // mla v11.4S, v12.4S, v13.4S __ fmla(v30, __ T2S, v31, v0); // fmla v30.2S, v31.2S, v0.2S __ fmla(v27, __ T4S, v28, v29); // fmla v27.4S, v28.4S, v29.4S __ fmla(v9, __ T2D, v10, v11); // fmla v9.2D, v10.2D, v11.2D __ mlsv(v25, __ T4H, v26, v27); // mls v25.4H, v26.4H, v27.4H __ mlsv(v2, __ T8H, v3, v4); // mls v2.8H, v3.8H, v4.8H __ mlsv(v12, __ T2S, v13, v14); // mls v12.2S, v13.2S, v14.2S __ mlsv(v17, __ T4S, v18, v19); // mls v17.4S, v18.4S, v19.4S __ fmls(v30, __ T2S, v31, v0); // fmls v30.2S, v31.2S, v0.2S __ fmls(v1, __ T4S, v2, v3); // fmls v1.4S, v2.4S, v3.4S __ fmls(v12, __ T2D, v13, v14); // fmls v12.2D, v13.2D, v14.2D __ fdiv(v28, __ T2S, v29, v30); // fdiv v28.2S, v29.2S, v30.2S __ fdiv(v0, __ T4S, v1, v2); // fdiv v0.4S, v1.4S, v2.4S __ fdiv(v17, __ T2D, v18, v19); // fdiv v17.2D, v18.2D, v19.2D __ maxv(v12, __ T8B, v13, v14); // smax v12.8B, v13.8B, v14.8B __ maxv(v17, __ T16B, v18, v19); // smax v17.16B, v18.16B, v19.16B __ maxv(v21, __ T4H, v22, v23); // smax v21.4H, v22.4H, v23.4H __ maxv(v12, __ T8H, v13, v14); // smax v12.8H, v13.8H, v14.8H __ maxv(v27, __ T2S, v28, v29); // smax v27.2S, v28.2S, v29.2S __ maxv(v29, __ T4S, v30, v31); // smax v29.4S, v30.4S, v31.4S __ smaxp(v30, __ T8B, v31, v0); // smaxp v30.8B, v31.8B, v0.8B __ smaxp(v1, __ T16B, v2, v3); // smaxp v1.16B, v2.16B, v3.16B __ smaxp(v25, __ T4H, v26, v27); // smaxp v25.4H, v26.4H, v27.4H __ smaxp(v27, __ T8H, v28, v29); // smaxp v27.8H, v28.8H, v29.8H __ smaxp(v4, __ T2S, v5, v6); // smaxp v4.2S, v5.2S, v6.2S __ smaxp(v29, __ T4S, v30, v31); // smaxp v29.4S, v30.4S, v31.4S __ fmax(v3, __ T2S, v4, v5); // fmax v3.2S, v4.2S, v5.2S __ fmax(v6, __ T4S, v7, v8); // fmax v6.4S, v7.4S, v8.4S __ fmax(v29, __ T2D, v30, v31); // fmax v29.2D, v30.2D, v31.2D __ minv(v25, __ T8B, v26, v27); // smin v25.8B, v26.8B, v27.8B __ minv(v17, __ T16B, v18, v19); // smin v17.16B, v18.16B, v19.16B __ minv(v8, __ T4H, v9, v10); // smin v8.4H, v9.4H, v10.4H __ minv(v7, __ T8H, v8, v9); // smin v7.8H, v8.8H, v9.8H __ minv(v12, __ T2S, v13, v14); // smin v12.2S, v13.2S, v14.2S __ minv(v0, __ T4S, v1, v2); // smin v0.4S, v1.4S, v2.4S __ sminp(v19, __ T8B, v20, v21); // sminp v19.8B, v20.8B, v21.8B __ sminp(v1, __ T16B, v2, v3); // sminp v1.16B, v2.16B, v3.16B __ sminp(v23, __ T4H, v24, v25); // sminp v23.4H, v24.4H, v25.4H __ sminp(v2, __ T8H, v3, v4); // sminp v2.8H, v3.8H, v4.8H __ sminp(v0, __ T2S, v1, v2); // sminp v0.2S, v1.2S, v2.2S __ sminp(v8, __ T4S, v9, v10); // sminp v8.4S, v9.4S, v10.4S __ fmin(v23, __ T2S, v24, v25); // fmin v23.2S, v24.2S, v25.2S __ fmin(v25, __ T4S, v26, v27); // fmin v25.4S, v26.4S, v27.4S __ fmin(v15, __ T2D, v16, v17); // fmin v15.2D, v16.2D, v17.2D __ cmeq(v29, __ T8B, v30, v31); // cmeq v29.8B, v30.8B, v31.8B __ cmeq(v3, __ T16B, v4, v5); // cmeq v3.16B, v4.16B, v5.16B __ cmeq(v10, __ T4H, v11, v12); // cmeq v10.4H, v11.4H, v12.4H __ cmeq(v22, __ T8H, v23, v24); // cmeq v22.8H, v23.8H, v24.8H __ cmeq(v10, __ T2S, v11, v12); // cmeq v10.2S, v11.2S, v12.2S __ cmeq(v4, __ T4S, v5, v6); // cmeq v4.4S, v5.4S, v6.4S __ cmeq(v17, __ T2D, v18, v19); // cmeq v17.2D, v18.2D, v19.2D __ fcmeq(v1, __ T2S, v2, v3); // fcmeq v1.2S, v2.2S, v3.2S __ fcmeq(v11, __ T4S, v12, v13); // fcmeq v11.4S, v12.4S, v13.4S __ fcmeq(v7, __ T2D, v8, v9); // fcmeq v7.2D, v8.2D, v9.2D __ cmgt(v10, __ T8B, v11, v12); // cmgt v10.8B, v11.8B, v12.8B __ cmgt(v15, __ T16B, v16, v17); // cmgt v15.16B, v16.16B, v17.16B __ cmgt(v16, __ T4H, v17, v18); // cmgt v16.4H, v17.4H, v18.4H __ cmgt(v2, __ T8H, v3, v4); // cmgt v2.8H, v3.8H, v4.8H __ cmgt(v9, __ T2S, v10, v11); // cmgt v9.2S, v10.2S, v11.2S __ cmgt(v11, __ T4S, v12, v13); // cmgt v11.4S, v12.4S, v13.4S __ cmgt(v12, __ T2D, v13, v14); // cmgt v12.2D, v13.2D, v14.2D __ cmhi(v14, __ T8B, v15, v16); // cmhi v14.8B, v15.8B, v16.8B __ cmhi(v13, __ T16B, v14, v15); // cmhi v13.16B, v14.16B, v15.16B __ cmhi(v2, __ T4H, v3, v4); // cmhi v2.4H, v3.4H, v4.4H __ cmhi(v6, __ T8H, v7, v8); // cmhi v6.8H, v7.8H, v8.8H __ cmhi(v19, __ T2S, v20, v21); // cmhi v19.2S, v20.2S, v21.2S __ cmhi(v25, __ T4S, v26, v27); // cmhi v25.4S, v26.4S, v27.4S __ cmhi(v15, __ T2D, v16, v17); // cmhi v15.2D, v16.2D, v17.2D __ cmhs(v4, __ T8B, v5, v6); // cmhs v4.8B, v5.8B, v6.8B __ cmhs(v2, __ T16B, v3, v4); // cmhs v2.16B, v3.16B, v4.16B __ cmhs(v4, __ T4H, v5, v6); // cmhs v4.4H, v5.4H, v6.4H __ cmhs(v11, __ T8H, v12, v13); // cmhs v11.8H, v12.8H, v13.8H __ cmhs(v17, __ T2S, v18, v19); // cmhs v17.2S, v18.2S, v19.2S __ cmhs(v20, __ T4S, v21, v22); // cmhs v20.4S, v21.4S, v22.4S __ cmhs(v16, __ T2D, v17, v18); // cmhs v16.2D, v17.2D, v18.2D __ fcmgt(v17, __ T2S, v18, v19); // fcmgt v17.2S, v18.2S, v19.2S __ fcmgt(v10, __ T4S, v11, v12); // fcmgt v10.4S, v11.4S, v12.4S __ fcmgt(v20, __ T2D, v21, v22); // fcmgt v20.2D, v21.2D, v22.2D __ cmge(v22, __ T8B, v23, v24); // cmge v22.8B, v23.8B, v24.8B __ cmge(v12, __ T16B, v13, v14); // cmge v12.16B, v13.16B, v14.16B __ cmge(v25, __ T4H, v26, v27); // cmge v25.4H, v26.4H, v27.4H __ cmge(v23, __ T8H, v24, v25); // cmge v23.8H, v24.8H, v25.8H __ cmge(v28, __ T2S, v29, v30); // cmge v28.2S, v29.2S, v30.2S __ cmge(v14, __ T4S, v15, v16); // cmge v14.4S, v15.4S, v16.4S __ cmge(v10, __ T2D, v11, v12); // cmge v10.2D, v11.2D, v12.2D __ fcmge(v24, __ T2S, v25, v26); // fcmge v24.2S, v25.2S, v26.2S __ fcmge(v1, __ T4S, v2, v3); // fcmge v1.4S, v2.4S, v3.4S __ fcmge(v11, __ T2D, v12, v13); // fcmge v11.2D, v12.2D, v13.2D __ facgt(v30, __ T2S, v31, v0); // facgt v30.2S, v31.2S, v0.2S __ facgt(v10, __ T4S, v11, v12); // facgt v10.4S, v11.4S, v12.4S __ facgt(v15, __ T2D, v16, v17); // facgt v15.2D, v16.2D, v17.2D // SVEComparisonWithZero __ sve_fcm(Assembler::EQ, p3, __ S, p3, z2, 0.0); // fcmeq p3.s, p3/z, z2.s, #0.0 __ sve_fcm(Assembler::GT, p9, __ D, p0, z16, 0.0); // fcmgt p9.d, p0/z, z16.d, #0.0 __ sve_fcm(Assembler::GE, p0, __ D, p1, z11, 0.0); // fcmge p0.d, p1/z, z11.d, #0.0 __ sve_fcm(Assembler::LT, p4, __ D, p7, z14, 0.0); // fcmlt p4.d, p7/z, z14.d, #0.0 __ sve_fcm(Assembler::LE, p0, __ S, p5, z20, 0.0); // fcmle p0.s, p5/z, z20.s, #0.0 __ sve_fcm(Assembler::NE, p11, __ D, p6, z27, 0.0); // fcmne p11.d, p6/z, z27.d, #0.0 // SpecialCases __ ccmn(zr, zr, 3u, Assembler::LE); // ccmn xzr, xzr, #3, LE __ ccmnw(zr, zr, 5u, Assembler::EQ); // ccmn wzr, wzr, #5, EQ __ ccmp(zr, 1, 4u, Assembler::NE); // ccmp xzr, 1, #4, NE __ ccmpw(zr, 2, 2, Assembler::GT); // ccmp wzr, 2, #2, GT __ extr(zr, zr, zr, 0); // extr xzr, xzr, xzr, 0 __ stlxp(r0, zr, zr, sp); // stlxp w0, xzr, xzr, [sp] __ stlxpw(r2, zr, zr, r3); // stlxp w2, wzr, wzr, [x3] __ stxp(r4, zr, zr, r5); // stxp w4, xzr, xzr, [x5] __ stxpw(r6, zr, zr, sp); // stxp w6, wzr, wzr, [sp] __ dup(v0, __ T16B, zr); // dup v0.16b, wzr __ dup(v0, __ S, v1); // dup s0, v1.s[0] __ mov(v1, __ D, 0, zr); // mov v1.d[0], xzr __ mov(v1, __ S, 1, zr); // mov v1.s[1], wzr __ mov(v1, __ H, 2, zr); // mov v1.h[2], wzr __ mov(v1, __ B, 3, zr); // mov v1.b[3], wzr __ smov(r0, v1, __ S, 0); // smov x0, v1.s[0] __ smov(r0, v1, __ H, 1); // smov x0, v1.h[1] __ smov(r0, v1, __ B, 2); // smov x0, v1.b[2] __ umov(r0, v1, __ D, 0); // umov x0, v1.d[0] __ umov(r0, v1, __ S, 1); // umov w0, v1.s[1] __ umov(r0, v1, __ H, 2); // umov w0, v1.h[2] __ umov(r0, v1, __ B, 3); // umov w0, v1.b[3] __ fmovhid(r0, v1); // fmov x0, v1.d[1] __ fmovs(v9, __ T2S, 0.5f); // fmov v9.2s, 0.5 __ fmovd(v14, __ T2D, 0.5f); // fmov v14.2d, 0.5 __ ld1(v31, v0, __ T2D, Address(__ post(r1, r0))); // ld1 {v31.2d, v0.2d}, [x1], x0 __ fcvtzs(v0, __ T2S, v1); // fcvtzs v0.2s, v1.2s __ fcvtas(v2, __ T4S, v3); // fcvtas v2.4s, v3.4s __ fcvtms(v4, __ T2D, v5); // fcvtms v4.2d, v5.2d __ sve_cpy(z0, __ S, p0, v1); // mov z0.s, p0/m, s1 __ sve_cpy(z0, __ B, p0, 127, true); // mov z0.b, p0/m, 127 __ sve_cpy(z1, __ H, p0, -128, true); // mov z1.h, p0/m, -128 __ sve_cpy(z2, __ S, p0, 32512, true); // mov z2.s, p0/m, 32512 __ sve_cpy(z5, __ D, p0, -32768, false); // mov z5.d, p0/z, -32768 __ sve_cpy(z10, __ B, p0, -1, false); // mov z10.b, p0/z, -1 __ sve_cpy(z11, __ S, p0, -1, false); // mov z11.s, p0/z, -1 __ sve_inc(r0, __ S); // incw x0 __ sve_dec(r1, __ H); // dech x1 __ sve_lsl(z0, __ B, z1, 7); // lsl z0.b, z1.b, #7 __ sve_lsl(z21, __ H, z1, 15); // lsl z21.h, z1.h, #15 __ sve_lsl(z0, __ S, z1, 31); // lsl z0.s, z1.s, #31 __ sve_lsl(z0, __ D, z1, 63); // lsl z0.d, z1.d, #63 __ sve_lsr(z0, __ B, z1, 7); // lsr z0.b, z1.b, #7 __ sve_asr(z0, __ H, z11, 15); // asr z0.h, z11.h, #15 __ sve_lsr(z30, __ S, z1, 31); // lsr z30.s, z1.s, #31 __ sve_asr(z0, __ D, z1, 63); // asr z0.d, z1.d, #63 __ sve_lsl(z0, __ B, p0, 0); // lsl z0.b, p0/m, z0.b, #0 __ sve_lsl(z0, __ B, p0, 5); // lsl z0.b, p0/m, z0.b, #5 __ sve_lsl(z1, __ H, p1, 15); // lsl z1.h, p1/m, z1.h, #15 __ sve_lsl(z2, __ S, p2, 31); // lsl z2.s, p2/m, z2.s, #31 __ sve_lsl(z3, __ D, p3, 63); // lsl z3.d, p3/m, z3.d, #63 __ sve_lsr(z0, __ B, p0, 1); // lsr z0.b, p0/m, z0.b, #1 __ sve_lsr(z0, __ B, p0, 8); // lsr z0.b, p0/m, z0.b, #8 __ sve_lsr(z1, __ H, p1, 15); // lsr z1.h, p1/m, z1.h, #15 __ sve_lsr(z2, __ S, p2, 7); // lsr z2.s, p2/m, z2.s, #7 __ sve_lsr(z2, __ S, p2, 31); // lsr z2.s, p2/m, z2.s, #31 __ sve_lsr(z3, __ D, p3, 63); // lsr z3.d, p3/m, z3.d, #63 __ sve_asr(z0, __ B, p0, 1); // asr z0.b, p0/m, z0.b, #1 __ sve_asr(z0, __ B, p0, 7); // asr z0.b, p0/m, z0.b, #7 __ sve_asr(z1, __ H, p1, 5); // asr z1.h, p1/m, z1.h, #5 __ sve_asr(z1, __ H, p1, 15); // asr z1.h, p1/m, z1.h, #15 __ sve_asr(z2, __ S, p2, 31); // asr z2.s, p2/m, z2.s, #31 __ sve_asr(z3, __ D, p3, 63); // asr z3.d, p3/m, z3.d, #63 __ sve_addvl(sp, r0, 31); // addvl sp, x0, #31 __ sve_addpl(r1, sp, -32); // addpl x1, sp, -32 __ sve_cntp(r8, __ B, p0, p1); // cntp x8, p0, p1.b __ sve_dup(z0, __ B, 127); // dup z0.b, 127 __ sve_dup(z1, __ H, -128); // dup z1.h, -128 __ sve_dup(z2, __ S, 32512); // dup z2.s, 32512 __ sve_dup(z7, __ D, -32768); // dup z7.d, -32768 __ sve_dup(z10, __ B, -1); // dup z10.b, -1 __ sve_dup(z11, __ S, -1); // dup z11.s, -1 __ sve_ld1b(z0, __ B, p0, Address(sp)); // ld1b {z0.b}, p0/z, [sp] __ sve_ld1b(z0, __ H, p1, Address(sp)); // ld1b {z0.h}, p1/z, [sp] __ sve_ld1b(z0, __ S, p2, Address(sp, r8)); // ld1b {z0.s}, p2/z, [sp, x8] __ sve_ld1b(z0, __ D, p3, Address(sp, 7)); // ld1b {z0.d}, p3/z, [sp, #7, MUL VL] __ sve_ld1h(z10, __ H, p1, Address(sp, -8)); // ld1h {z10.h}, p1/z, [sp, #-8, MUL VL] __ sve_ld1w(z20, __ S, p2, Address(r0, 7)); // ld1w {z20.s}, p2/z, [x0, #7, MUL VL] __ sve_ld1b(z30, __ B, p3, Address(sp, r8)); // ld1b {z30.b}, p3/z, [sp, x8] __ sve_ld1w(z0, __ S, p4, Address(sp, r28)); // ld1w {z0.s}, p4/z, [sp, x28, LSL #2] __ sve_ld1d(z11, __ D, p5, Address(r0, r1)); // ld1d {z11.d}, p5/z, [x0, x1, LSL #3] __ sve_st1b(z22, __ B, p6, Address(sp)); // st1b {z22.b}, p6, [sp] __ sve_st1b(z31, __ B, p7, Address(sp, -8)); // st1b {z31.b}, p7, [sp, #-8, MUL VL] __ sve_st1b(z0, __ H, p1, Address(sp)); // st1b {z0.h}, p1, [sp] __ sve_st1b(z0, __ S, p2, Address(sp, r8)); // st1b {z0.s}, p2, [sp, x8] __ sve_st1b(z0, __ D, p3, Address(sp)); // st1b {z0.d}, p3, [sp] __ sve_st1w(z0, __ S, p1, Address(r0, 7)); // st1w {z0.s}, p1, [x0, #7, MUL VL] __ sve_st1b(z0, __ B, p2, Address(sp, r1)); // st1b {z0.b}, p2, [sp, x1] __ sve_st1h(z0, __ H, p3, Address(sp, r8)); // st1h {z0.h}, p3, [sp, x8, LSL #1] __ sve_st1d(z0, __ D, p4, Address(r0, r17)); // st1d {z0.d}, p4, [x0, x17, LSL #3] __ sve_ldr(z0, Address(sp)); // ldr z0, [sp] __ sve_ldr(z31, Address(sp, -256)); // ldr z31, [sp, #-256, MUL VL] __ sve_str(z8, Address(r8, 255)); // str z8, [x8, #255, MUL VL] __ sve_cntb(r9); // cntb x9 __ sve_cnth(r10); // cnth x10 __ sve_cntw(r11); // cntw x11 __ sve_cntd(r12); // cntd x12 __ sve_brka(p2, p0, p2, false); // brka p2.b, p0/z, p2.b __ sve_brka(p1, p2, p3, true); // brka p1.b, p2/m, p3.b __ sve_brkb(p1, p2, p3, false); // brkb p1.b, p2/z, p3.b __ sve_brkb(p2, p3, p4, true); // brkb p2.b, p3/m, p4.b __ sve_rev(p0, __ B, p1); // rev p0.b, p1.b __ sve_rev(p1, __ H, p2); // rev p1.h, p2.h __ sve_rev(p2, __ S, p3); // rev p2.s, p3.s __ sve_rev(p3, __ D, p4); // rev p3.d, p4.d __ sve_incp(r0, __ B, p2); // incp x0, p2.b __ sve_whilelt(p0, __ B, r1, r28); // whilelt p0.b, x1, x28 __ sve_whilele(p2, __ H, r11, r8); // whilele p2.h, x11, x8 __ sve_whilelo(p3, __ S, r7, r2); // whilelo p3.s, x7, x2 __ sve_whilels(p4, __ D, r17, r10); // whilels p4.d, x17, x10 __ sve_whileltw(p1, __ B, r1, r28); // whilelt p1.b, w1, w28 __ sve_whilelew(p2, __ H, r11, r8); // whilele p2.h, w11, w8 __ sve_whilelow(p3, __ S, r7, r2); // whilelo p3.s, w7, w2 __ sve_whilelsw(p4, __ D, r17, r10); // whilels p4.d, w17, w10 __ sve_sel(z0, __ B, p0, z1, z2); // sel z0.b, p0, z1.b, z2.b __ sve_sel(z4, __ D, p0, z5, z6); // sel z4.d, p0, z5.d, z6.d __ sve_cmp(Assembler::EQ, p1, __ B, p0, z0, z1); // cmpeq p1.b, p0/z, z0.b, z1.b __ sve_cmp(Assembler::NE, p1, __ H, p0, z2, z3); // cmpne p1.h, p0/z, z2.h, z3.h __ sve_cmp(Assembler::GE, p1, __ S, p2, z4, z5); // cmpge p1.s, p2/z, z4.s, z5.s __ sve_cmp(Assembler::GT, p1, __ D, p3, z6, z7); // cmpgt p1.d, p3/z, z6.d, z7.d __ sve_cmp(Assembler::HI, p1, __ S, p2, z4, z5); // cmphi p1.s, p2/z, z4.s, z5.s __ sve_cmp(Assembler::HS, p1, __ D, p3, z6, z7); // cmphs p1.d, p3/z, z6.d, z7.d __ sve_cmp(Assembler::EQ, p1, __ B, p4, z0, 15); // cmpeq p1.b, p4/z, z0.b, #15 __ sve_cmp(Assembler::NE, p1, __ H, p0, z2, -16); // cmpne p1.h, p0/z, z2.h, #-16 __ sve_cmp(Assembler::LE, p1, __ S, p1, z4, 0); // cmple p1.s, p1/z, z4.s, #0 __ sve_cmp(Assembler::LT, p1, __ D, p2, z6, -1); // cmplt p1.d, p2/z, z6.d, #-1 __ sve_cmp(Assembler::GE, p1, __ S, p3, z4, 5); // cmpge p1.s, p3/z, z4.s, #5 __ sve_cmp(Assembler::GT, p1, __ B, p4, z6, -2); // cmpgt p1.b, p4/z, z6.b, #-2 __ sve_fcm(Assembler::EQ, p1, __ S, p0, z0, z1); // fcmeq p1.s, p0/z, z0.s, z1.s __ sve_fcm(Assembler::NE, p1, __ D, p0, z2, z3); // fcmne p1.d, p0/z, z2.d, z3.d __ sve_fcm(Assembler::GT, p1, __ S, p2, z4, z5); // fcmgt p1.s, p2/z, z4.s, z5.s __ sve_fcm(Assembler::GE, p1, __ D, p3, z6, z7); // fcmge p1.d, p3/z, z6.d, z7.d __ sve_uunpkhi(z0, __ H, z1); // uunpkhi z0.h, z1.b __ sve_uunpklo(z4, __ S, z5); // uunpklo z4.s, z5.h __ sve_sunpkhi(z6, __ D, z7); // sunpkhi z6.d, z7.s __ sve_sunpklo(z10, __ H, z11); // sunpklo z10.h, z11.b __ sve_scvtf(z1, __ D, p0, z0, __ S); // scvtf z1.d, p0/m, z0.s __ sve_scvtf(z3, __ D, p1, z2, __ D); // scvtf z3.d, p1/m, z2.d __ sve_scvtf(z6, __ S, p2, z1, __ D); // scvtf z6.s, p2/m, z1.d __ sve_scvtf(z6, __ S, p3, z1, __ S); // scvtf z6.s, p3/m, z1.s __ sve_scvtf(z6, __ H, p3, z1, __ S); // scvtf z6.h, p3/m, z1.s __ sve_scvtf(z6, __ H, p3, z1, __ D); // scvtf z6.h, p3/m, z1.d __ sve_scvtf(z6, __ H, p3, z1, __ H); // scvtf z6.h, p3/m, z1.h __ sve_fcvt(z5, __ D, p3, z4, __ S); // fcvt z5.d, p3/m, z4.s __ sve_fcvt(z1, __ S, p3, z0, __ D); // fcvt z1.s, p3/m, z0.d __ sve_fcvt(z5, __ S, p3, z4, __ H); // fcvt z5.s, p3/m, z4.h __ sve_fcvt(z1, __ H, p3, z0, __ S); // fcvt z1.h, p3/m, z0.s __ sve_fcvt(z5, __ D, p3, z4, __ H); // fcvt z5.d, p3/m, z4.h __ sve_fcvt(z1, __ H, p3, z0, __ D); // fcvt z1.h, p3/m, z0.d __ sve_fcvtzs(z19, __ D, p2, z1, __ D); // fcvtzs z19.d, p2/m, z1.d __ sve_fcvtzs(z9, __ S, p1, z8, __ S); // fcvtzs z9.s, p1/m, z8.s __ sve_fcvtzs(z1, __ S, p2, z0, __ D); // fcvtzs z1.s, p2/m, z0.d __ sve_fcvtzs(z1, __ D, p3, z0, __ S); // fcvtzs z1.d, p3/m, z0.s __ sve_fcvtzs(z1, __ S, p4, z18, __ H); // fcvtzs z1.s, p4/m, z18.h __ sve_lasta(r0, __ B, p0, z15); // lasta w0, p0, z15.b __ sve_lastb(r1, __ B, p1, z16); // lastb w1, p1, z16.b __ sve_lasta(v0, __ B, p0, z15); // lasta b0, p0, z15.b __ sve_lastb(v1, __ B, p1, z16); // lastb b1, p1, z16.b __ sve_index(z6, __ S, 1, 1); // index z6.s, #1, #1 __ sve_index(z6, __ B, r5, 2); // index z6.b, w5, #2 __ sve_index(z6, __ H, r5, 3); // index z6.h, w5, #3 __ sve_index(z6, __ S, r5, 4); // index z6.s, w5, #4 __ sve_index(z7, __ D, r5, 5); // index z7.d, x5, #5 __ sve_cpy(z7, __ H, p3, r5); // cpy z7.h, p3/m, w5 __ sve_tbl(z16, __ S, z17, z18); // tbl z16.s, {z17.s}, z18.s __ sve_ld1w_gather(z15, p0, r5, z16); // ld1w {z15.s}, p0/z, [x5, z16.s, uxtw #2] __ sve_ld1d_gather(z15, p0, r5, z16); // ld1d {z15.d}, p0/z, [x5, z16.d, uxtw #3] __ sve_st1w_scatter(z15, p0, r5, z16); // st1w {z15.s}, p0, [x5, z16.s, uxtw #2] __ sve_st1d_scatter(z15, p0, r5, z16); // st1d {z15.d}, p0, [x5, z16.d, uxtw #3] __ sve_and(p0, p1, p2, p3); // and p0.b, p1/z, p2.b, p3.b __ sve_ands(p4, p5, p6, p0); // ands p4.b, p5/z, p6.b, p0.b __ sve_eor(p0, p1, p2, p3); // eor p0.b, p1/z, p2.b, p3.b __ sve_eors(p5, p6, p0, p1); // eors p5.b, p6/z, p0.b, p1.b __ sve_orr(p0, p1, p2, p3); // orr p0.b, p1/z, p2.b, p3.b __ sve_orrs(p9, p1, p4, p5); // orrs p9.b, p1/z, p4.b, p5.b __ sve_bic(p10, p7, p9, p11); // bic p10.b, p7/z, p9.b, p11.b __ sve_ptest(p7, p1); // ptest p7, p1.b __ sve_ptrue(p1, __ B); // ptrue p1.b __ sve_ptrue(p1, __ B, 0b00001); // ptrue p1.b, vl1 __ sve_ptrue(p1, __ B, 0b00101); // ptrue p1.b, vl5 __ sve_ptrue(p1, __ B, 0b01001); // ptrue p1.b, vl16 __ sve_ptrue(p1, __ B, 0b01101); // ptrue p1.b, vl256 __ sve_ptrue(p2, __ H); // ptrue p2.h __ sve_ptrue(p2, __ H, 0b00010); // ptrue p2.h, vl2 __ sve_ptrue(p2, __ H, 0b00110); // ptrue p2.h, vl6 __ sve_ptrue(p2, __ H, 0b01010); // ptrue p2.h, vl32 __ sve_ptrue(p3, __ S); // ptrue p3.s __ sve_ptrue(p3, __ S, 0b00011); // ptrue p3.s, vl3 __ sve_ptrue(p3, __ S, 0b00111); // ptrue p3.s, vl7 __ sve_ptrue(p3, __ S, 0b01011); // ptrue p3.s, vl64 __ sve_ptrue(p4, __ D); // ptrue p4.d __ sve_ptrue(p4, __ D, 0b00100); // ptrue p4.d, vl4 __ sve_ptrue(p4, __ D, 0b01000); // ptrue p4.d, vl8 __ sve_ptrue(p4, __ D, 0b01100); // ptrue p4.d, vl128 __ sve_pfalse(p7); // pfalse p7.b __ sve_uzp1(p0, __ B, p0, p1); // uzp1 p0.b, p0.b, p1.b __ sve_uzp1(p0, __ H, p0, p1); // uzp1 p0.h, p0.h, p1.h __ sve_uzp1(p0, __ S, p0, p1); // uzp1 p0.s, p0.s, p1.s __ sve_uzp1(p0, __ D, p0, p1); // uzp1 p0.d, p0.d, p1.d __ sve_uzp2(p0, __ B, p0, p1); // uzp2 p0.b, p0.b, p1.b __ sve_uzp2(p0, __ H, p0, p1); // uzp2 p0.h, p0.h, p1.h __ sve_uzp2(p0, __ S, p0, p1); // uzp2 p0.s, p0.s, p1.s __ sve_uzp2(p0, __ D, p0, p1); // uzp2 p0.d, p0.d, p1.d __ sve_punpklo(p1, p0); // punpklo p1.h, p0.b __ sve_punpkhi(p1, p0); // punpkhi p1.h, p0.b __ sve_compact(z16, __ S, z16, p1); // compact z16.s, p1, z16.s __ sve_compact(z16, __ D, z16, p1); // compact z16.d, p1, z16.d __ sve_ext(z17, z16, 63); // ext z17.b, z17.b, z16.b, #63 __ sve_fac(Assembler::GT, p1, __ H, p2, z4, z5); // facgt p1.h, p2/z, z4.h, z5.h __ sve_fac(Assembler::GT, p1, __ S, p2, z4, z5); // facgt p1.s, p2/z, z4.s, z5.s __ sve_fac(Assembler::GT, p1, __ D, p2, z4, z5); // facgt p1.d, p2/z, z4.d, z5.d __ sve_fac(Assembler::GE, p1, __ H, p2, z4, z5); // facge p1.h, p2/z, z4.h, z5.h __ sve_fac(Assembler::GE, p1, __ S, p2, z4, z5); // facge p1.s, p2/z, z4.s, z5.s __ sve_fac(Assembler::GE, p1, __ D, p2, z4, z5); // facge p1.d, p2/z, z4.d, z5.d __ sve_histcnt(z16, __ S, p0, z16, z16); // histcnt z16.s, p0/z, z16.s, z16.s __ sve_histcnt(z17, __ D, p0, z17, z17); // histcnt z17.d, p0/z, z17.d, z17.d // FloatImmediateOp __ fmovd(v0, 2.0); // fmov d0, #2.0 __ fmovd(v0, 2.125); // fmov d0, #2.125 __ fmovd(v0, 4.0); // fmov d0, #4.0 __ fmovd(v0, 4.25); // fmov d0, #4.25 __ fmovd(v0, 8.0); // fmov d0, #8.0 __ fmovd(v0, 8.5); // fmov d0, #8.5 __ fmovd(v0, 16.0); // fmov d0, #16.0 __ fmovd(v0, 17.0); // fmov d0, #17.0 __ fmovd(v0, 0.125); // fmov d0, #0.125 __ fmovd(v0, 0.1328125); // fmov d0, #0.1328125 __ fmovd(v0, 0.25); // fmov d0, #0.25 __ fmovd(v0, 0.265625); // fmov d0, #0.265625 __ fmovd(v0, 0.5); // fmov d0, #0.5 __ fmovd(v0, 0.53125); // fmov d0, #0.53125 __ fmovd(v0, 1.0); // fmov d0, #1.0 __ fmovd(v0, 1.0625); // fmov d0, #1.0625 __ fmovd(v0, -2.0); // fmov d0, #-2.0 __ fmovd(v0, -2.125); // fmov d0, #-2.125 __ fmovd(v0, -4.0); // fmov d0, #-4.0 __ fmovd(v0, -4.25); // fmov d0, #-4.25 __ fmovd(v0, -8.0); // fmov d0, #-8.0 __ fmovd(v0, -8.5); // fmov d0, #-8.5 __ fmovd(v0, -16.0); // fmov d0, #-16.0 __ fmovd(v0, -17.0); // fmov d0, #-17.0 __ fmovd(v0, -0.125); // fmov d0, #-0.125 __ fmovd(v0, -0.1328125); // fmov d0, #-0.1328125 __ fmovd(v0, -0.25); // fmov d0, #-0.25 __ fmovd(v0, -0.265625); // fmov d0, #-0.265625 __ fmovd(v0, -0.5); // fmov d0, #-0.5 __ fmovd(v0, -0.53125); // fmov d0, #-0.53125 __ fmovd(v0, -1.0); // fmov d0, #-1.0 __ fmovd(v0, -1.0625); // fmov d0, #-1.0625 // LSEOp __ swp(Assembler::xword, r25, r5, r1); // swp x25, x5, [x1] __ ldadd(Assembler::xword, r23, r16, sp); // ldadd x23, x16, [sp] __ ldbic(Assembler::xword, r5, r12, r9); // ldclr x5, x12, [x9] __ ldeor(Assembler::xword, r28, r15, r29); // ldeor x28, x15, [x29] __ ldorr(Assembler::xword, r22, zr, r19); // ldset x22, xzr, [x19] __ ldsmin(Assembler::xword, zr, r5, r14); // ldsmin xzr, x5, [x14] __ ldsmax(Assembler::xword, r16, zr, r15); // ldsmax x16, xzr, [x15] __ ldumin(Assembler::xword, r27, r20, r16); // ldumin x27, x20, [x16] __ ldumax(Assembler::xword, r12, r11, r9); // ldumax x12, x11, [x9] // LSEOp __ swpa(Assembler::xword, r6, r30, r17); // swpa x6, x30, [x17] __ ldadda(Assembler::xword, r27, r28, r30); // ldadda x27, x28, [x30] __ ldbica(Assembler::xword, r7, r10, r20); // ldclra x7, x10, [x20] __ ldeora(Assembler::xword, r10, r4, r24); // ldeora x10, x4, [x24] __ ldorra(Assembler::xword, r17, r17, r22); // ldseta x17, x17, [x22] __ ldsmina(Assembler::xword, r3, r29, r15); // ldsmina x3, x29, [x15] __ ldsmaxa(Assembler::xword, r22, r19, r19); // ldsmaxa x22, x19, [x19] __ ldumina(Assembler::xword, r22, r2, r15); // ldumina x22, x2, [x15] __ ldumaxa(Assembler::xword, r6, r12, r16); // ldumaxa x6, x12, [x16] // LSEOp __ swpal(Assembler::xword, r11, r13, r23); // swpal x11, x13, [x23] __ ldaddal(Assembler::xword, r1, r30, r19); // ldaddal x1, x30, [x19] __ ldbical(Assembler::xword, r5, r17, r2); // ldclral x5, x17, [x2] __ ldeoral(Assembler::xword, r16, r22, r13); // ldeoral x16, x22, [x13] __ ldorral(Assembler::xword, r10, r21, r29); // ldsetal x10, x21, [x29] __ ldsminal(Assembler::xword, r27, r12, r27); // ldsminal x27, x12, [x27] __ ldsmaxal(Assembler::xword, r3, r1, sp); // ldsmaxal x3, x1, [sp] __ lduminal(Assembler::xword, r24, r19, r17); // lduminal x24, x19, [x17] __ ldumaxal(Assembler::xword, r9, r28, r27); // ldumaxal x9, x28, [x27] // LSEOp __ swpl(Assembler::xword, r15, r7, r21); // swpl x15, x7, [x21] __ ldaddl(Assembler::xword, r23, zr, r25); // ldaddl x23, xzr, [x25] __ ldbicl(Assembler::xword, r2, zr, r27); // ldclrl x2, xzr, [x27] __ ldeorl(Assembler::xword, r16, r10, r23); // ldeorl x16, x10, [x23] __ ldorrl(Assembler::xword, r19, r3, r15); // ldsetl x19, x3, [x15] __ ldsminl(Assembler::xword, r0, r25, r26); // ldsminl x0, x25, [x26] __ ldsmaxl(Assembler::xword, r23, r2, r15); // ldsmaxl x23, x2, [x15] __ lduminl(Assembler::xword, r12, r4, r28); // lduminl x12, x4, [x28] __ ldumaxl(Assembler::xword, r30, r29, r16); // ldumaxl x30, x29, [x16] // LSEOp __ swp(Assembler::word, r27, r6, r9); // swp w27, w6, [x9] __ ldadd(Assembler::word, r29, r16, r7); // ldadd w29, w16, [x7] __ ldbic(Assembler::word, r4, r7, r15); // ldclr w4, w7, [x15] __ ldeor(Assembler::word, r9, r23, r8); // ldeor w9, w23, [x8] __ ldorr(Assembler::word, r2, r28, r21); // ldset w2, w28, [x21] __ ldsmin(Assembler::word, zr, r5, r27); // ldsmin wzr, w5, [x27] __ ldsmax(Assembler::word, r0, r17, r15); // ldsmax w0, w17, [x15] __ ldumin(Assembler::word, r4, r26, r8); // ldumin w4, w26, [x8] __ ldumax(Assembler::word, r28, r22, r27); // ldumax w28, w22, [x27] // LSEOp __ swpa(Assembler::word, r27, r25, r23); // swpa w27, w25, [x23] __ ldadda(Assembler::word, r0, r4, r6); // ldadda w0, w4, [x6] __ ldbica(Assembler::word, r16, r0, r4); // ldclra w16, w0, [x4] __ ldeora(Assembler::word, r15, r1, r10); // ldeora w15, w1, [x10] __ ldorra(Assembler::word, r7, r5, r10); // ldseta w7, w5, [x10] __ ldsmina(Assembler::word, r28, r7, r20); // ldsmina w28, w7, [x20] __ ldsmaxa(Assembler::word, r23, r21, r6); // ldsmaxa w23, w21, [x6] __ ldumina(Assembler::word, r11, r8, r17); // ldumina w11, w8, [x17] __ ldumaxa(Assembler::word, zr, r6, r17); // ldumaxa wzr, w6, [x17] // LSEOp __ swpal(Assembler::word, r2, r12, r30); // swpal w2, w12, [x30] __ ldaddal(Assembler::word, r29, r3, r27); // ldaddal w29, w3, [x27] __ ldbical(Assembler::word, r22, r29, r14); // ldclral w22, w29, [x14] __ ldeoral(Assembler::word, r13, r28, r17); // ldeoral w13, w28, [x17] __ ldorral(Assembler::word, r24, r5, r2); // ldsetal w24, w5, [x2] __ ldsminal(Assembler::word, r14, r10, r16); // ldsminal w14, w10, [x16] __ ldsmaxal(Assembler::word, r11, r27, r23); // ldsmaxal w11, w27, [x23] __ lduminal(Assembler::word, r12, r4, r22); // lduminal w12, w4, [x22] __ ldumaxal(Assembler::word, r17, r4, r1); // ldumaxal w17, w4, [x1] // LSEOp __ swpl(Assembler::word, r19, r16, r15); // swpl w19, w16, [x15] __ ldaddl(Assembler::word, r13, r14, r12); // ldaddl w13, w14, [x12] __ ldbicl(Assembler::word, r2, r17, r3); // ldclrl w2, w17, [x3] __ ldeorl(Assembler::word, r21, r23, r5); // ldeorl w21, w23, [x5] __ ldorrl(Assembler::word, r6, r7, r19); // ldsetl w6, w7, [x19] __ ldsminl(Assembler::word, r13, r28, r17); // ldsminl w13, w28, [x17] __ ldsmaxl(Assembler::word, r16, r6, r2); // ldsmaxl w16, w6, [x2] __ lduminl(Assembler::word, r29, r3, r4); // lduminl w29, w3, [x4] __ ldumaxl(Assembler::word, r6, r16, r20); // ldumaxl w6, w16, [x20] // SHA3SIMDOp __ bcax(v13, __ T16B, v12, v19, v8); // bcax v13.16B, v12.16B, v19.16B, v8.16B __ eor3(v24, __ T16B, v19, v17, v0); // eor3 v24.16B, v19.16B, v17.16B, v0.16B __ rax1(v10, __ T2D, v23, v6); // rax1 v10.2D, v23.2D, v6.2D __ xar(v19, __ T2D, v30, v13, 33); // xar v19.2D, v30.2D, v13.2D, #33 // SHA512SIMDOp __ sha512h(v6, __ T2D, v0, v7); // sha512h q6, q0, v7.2D __ sha512h2(v14, __ T2D, v17, v25); // sha512h2 q14, q17, v25.2D __ sha512su0(v8, __ T2D, v10); // sha512su0 v8.2D, v10.2D __ sha512su1(v22, __ T2D, v20, v22); // sha512su1 v22.2D, v20.2D, v22.2D // SVEBinaryImmOp __ sve_add(z27, __ B, 31u); // add z27.b, z27.b, #0x1f __ sve_sub(z15, __ S, 167u); // sub z15.s, z15.s, #0xa7 __ sve_and(z7, __ B, 62u); // and z7.b, z7.b, #0x3e __ sve_eor(z0, __ H, 51199u); // eor z0.h, z0.h, #0xc7ff __ sve_orr(z22, __ B, 96u); // orr z22.b, z22.b, #0x60 // SVEBinaryImmOp __ sve_add(z22, __ H, 207u); // add z22.h, z22.h, #0xcf __ sve_sub(z5, __ D, 133u); // sub z5.d, z5.d, #0x85 __ sve_and(z13, __ S, 496u); // and z13.s, z13.s, #0x1f0 __ sve_eor(z13, __ H, 33279u); // eor z13.h, z13.h, #0x81ff __ sve_orr(z25, __ H, 508u); // orr z25.h, z25.h, #0x1fc // SVEBinaryImmOp __ sve_add(z17, __ H, 54u); // add z17.h, z17.h, #0x36 __ sve_sub(z29, __ B, 234u); // sub z29.b, z29.b, #0xea __ sve_and(z16, __ D, 4503599627354112u); // and z16.d, z16.d, #0xfffffffffc000 __ sve_eor(z14, __ B, 254u); // eor z14.b, z14.b, #0xfe __ sve_orr(z3, __ B, 243u); // orr z3.b, z3.b, #0xf3 // SVEBinaryImmOp __ sve_add(z21, __ S, 119u); // add z21.s, z21.s, #0x77 __ sve_sub(z8, __ S, 179u); // sub z8.s, z8.s, #0xb3 __ sve_and(z24, __ B, 191u); // and z24.b, z24.b, #0xbf __ sve_eor(z17, __ S, 4294966791u); // eor z17.s, z17.s, #0xfffffe07 __ sve_orr(z20, __ S, 491520u); // orr z20.s, z20.s, #0x78000 // SVEBinaryImmOp __ sve_add(z17, __ D, 36u); // add z17.d, z17.d, #0x24 __ sve_sub(z19, __ B, 195u); // sub z19.b, z19.b, #0xc3 __ sve_and(z25, __ S, 33553408u); // and z25.s, z25.s, #0x1fffc00 __ sve_eor(z8, __ H, 49663u); // eor z8.h, z8.h, #0xc1ff __ sve_orr(z30, __ S, 4294967231u); // orr z30.s, z30.s, #0xffffffbf // SVEBinaryImmOp __ sve_add(z1, __ H, 163u); // add z1.h, z1.h, #0xa3 __ sve_sub(z12, __ B, 75u); // sub z12.b, z12.b, #0x4b __ sve_and(z7, __ D, 274877904896u); // and z7.d, z7.d, #0x3ffffff800 __ sve_eor(z27, __ B, 243u); // eor z27.b, z27.b, #0xf3 __ sve_orr(z23, __ H, 65534u); // orr z23.h, z23.h, #0xfffe // SVEVectorOp __ sve_add(z22, __ D, z22, z20); // add z22.d, z22.d, z20.d __ sve_sub(z28, __ S, z9, z13); // sub z28.s, z9.s, z13.s __ sve_fadd(z7, __ S, z20, z28); // fadd z7.s, z20.s, z28.s __ sve_fmul(z11, __ D, z13, z11); // fmul z11.d, z13.d, z11.d __ sve_fsub(z1, __ D, z24, z8); // fsub z1.d, z24.d, z8.d __ sve_abs(z13, __ S, p4, z17); // abs z13.s, p4/m, z17.s __ sve_add(z4, __ H, p0, z3); // add z4.h, p0/m, z4.h, z3.h __ sve_and(z7, __ S, p3, z14); // and z7.s, p3/m, z7.s, z14.s __ sve_asr(z4, __ B, p3, z29); // asr z4.b, p3/m, z4.b, z29.b __ sve_bic(z0, __ D, p2, z21); // bic z0.d, p2/m, z0.d, z21.d __ sve_clz(z3, __ S, p0, z9); // clz z3.s, p0/m, z9.s __ sve_cnt(z28, __ B, p2, z24); // cnt z28.b, p2/m, z24.b __ sve_eor(z19, __ D, p1, z23); // eor z19.d, p1/m, z19.d, z23.d __ sve_lsl(z13, __ D, p5, z10); // lsl z13.d, p5/m, z13.d, z10.d __ sve_lsr(z12, __ S, p4, z30); // lsr z12.s, p4/m, z12.s, z30.s __ sve_mul(z14, __ S, p0, z29); // mul z14.s, p0/m, z14.s, z29.s __ sve_neg(z21, __ S, p5, z7); // neg z21.s, p5/m, z7.s __ sve_not(z2, __ S, p0, z26); // not z2.s, p0/m, z26.s __ sve_orr(z9, __ S, p4, z17); // orr z9.s, p4/m, z9.s, z17.s __ sve_rbit(z0, __ D, p1, z2); // rbit z0.d, p1/m, z2.d __ sve_revb(z14, __ D, p1, z11); // revb z14.d, p1/m, z11.d __ sve_smax(z14, __ H, p4, z29); // smax z14.h, p4/m, z14.h, z29.h __ sve_smin(z3, __ H, p0, z22); // smin z3.h, p0/m, z3.h, z22.h __ sve_sub(z3, __ B, p6, z27); // sub z3.b, p6/m, z3.b, z27.b __ sve_fabs(z19, __ D, p5, z7); // fabs z19.d, p5/m, z7.d __ sve_fadd(z21, __ S, p3, z5); // fadd z21.s, p3/m, z21.s, z5.s __ sve_fdiv(z25, __ D, p1, z21); // fdiv z25.d, p1/m, z25.d, z21.d __ sve_fmax(z17, __ S, p0, z3); // fmax z17.s, p0/m, z17.s, z3.s __ sve_fmin(z19, __ S, p3, z7); // fmin z19.s, p3/m, z19.s, z7.s __ sve_fmul(z14, __ S, p4, z17); // fmul z14.s, p4/m, z14.s, z17.s __ sve_fneg(z13, __ D, p6, z17); // fneg z13.d, p6/m, z17.d __ sve_frintm(z17, __ S, p2, z15); // frintm z17.s, p2/m, z15.s __ sve_frintn(z26, __ D, p5, z27); // frintn z26.d, p5/m, z27.d __ sve_frintp(z7, __ D, p2, z5); // frintp z7.d, p2/m, z5.d __ sve_fsqrt(z27, __ S, p2, z0); // fsqrt z27.s, p2/m, z0.s __ sve_fsub(z24, __ S, p5, z20); // fsub z24.s, p5/m, z24.s, z20.s __ sve_fmad(z3, __ D, p5, z25, z5); // fmad z3.d, p5/m, z25.d, z5.d __ sve_fmla(z29, __ S, p4, z17, z1); // fmla z29.s, p4/m, z17.s, z1.s __ sve_fmls(z14, __ D, p7, z13, z0); // fmls z14.d, p7/m, z13.d, z0.d __ sve_fmsb(z2, __ D, p7, z20, z22); // fmsb z2.d, p7/m, z20.d, z22.d __ sve_fnmad(z29, __ S, p3, z8, z2); // fnmad z29.s, p3/m, z8.s, z2.s __ sve_fnmsb(z14, __ D, p5, z22, z0); // fnmsb z14.d, p5/m, z22.d, z0.d __ sve_fnmla(z25, __ D, p6, z23, z12); // fnmla z25.d, p6/m, z23.d, z12.d __ sve_fnmls(z21, __ D, p0, z1, z10); // fnmls z21.d, p0/m, z1.d, z10.d __ sve_mla(z11, __ H, p5, z23, z23); // mla z11.h, p5/m, z23.h, z23.h __ sve_mls(z30, __ S, p4, z19, z19); // mls z30.s, p4/m, z19.s, z19.s __ sve_and(z4, z20, z13); // and z4.d, z20.d, z13.d __ sve_eor(z22, z30, z30); // eor z22.d, z30.d, z30.d __ sve_orr(z17, z17, z14); // orr z17.d, z17.d, z14.d __ sve_bic(z12, z28, z20); // bic z12.d, z28.d, z20.d __ sve_uzp1(z1, __ B, z13, z13); // uzp1 z1.b, z13.b, z13.b __ sve_uzp2(z7, __ S, z10, z11); // uzp2 z7.s, z10.s, z11.s __ sve_fabd(z4, __ S, p6, z15); // fabd z4.s, p6/m, z4.s, z15.s __ sve_bext(z3, __ S, z29, z0); // bext z3.s, z29.s, z0.s __ sve_bdep(z5, __ D, z20, z30); // bdep z5.d, z20.d, z30.d __ sve_eor3(z13, z13, z8); // eor3 z13.d, z13.d, z13.d, z8.d // SVEReductionOp __ sve_andv(v29, __ D, p0, z14); // andv d29, p0, z14.d __ sve_orv(v3, __ H, p0, z25); // orv h3, p0, z25.h __ sve_eorv(v24, __ D, p2, z1); // eorv d24, p2, z1.d __ sve_smaxv(v10, __ S, p3, z1); // smaxv s10, p3, z1.s __ sve_sminv(v25, __ S, p1, z28); // sminv s25, p1, z28.s __ sve_fminv(v16, __ S, p1, z27); // fminv s16, p1, z27.s __ sve_fmaxv(v1, __ S, p7, z11); // fmaxv s1, p7, z11.s __ sve_fadda(v1, __ D, p0, z1); // fadda d1, p0, d1, z1.d __ sve_uaddv(v26, __ B, p3, z2); // uaddv d26, p3, z2.b __ bind(forth); /* */ static const unsigned int insns[] = { 0x8b0d82fa, 0xcb49970c, 0xab889dfc, 0xeb9ee787, 0x0b9b3ec9, 0x4b9179a3, 0x2b88474e, 0x6b8c56c0, 0x8a1a51e0, 0xaa11f4ba, 0xca0281b8, 0xea918c7c, 0x0a5d4a19, 0x2a4b262d, 0x4a513ca5, 0x6a9b6ae2, 0x8a70b79b, 0xaaba9728, 0xca6dfe3d, 0xea627f1c, 0x0aa70f53, 0x2aaa0f06, 0x4a6176a4, 0x6a604eb0, 0x1105ed91, 0x3100583e, 0x5101f8bd, 0x710f0306, 0x9101a1a0, 0xb10a5cc8, 0xd10810aa, 0xf10fd061, 0x120cb166, 0x321764bc, 0x52174681, 0x720c0227, 0x9241018e, 0xb25a2969, 0xd278b411, 0xf26aad01, 0x14000000, 0x17ffffd7, 0x1400041e, 0x94000000, 0x97ffffd4, 0x9400041b, 0x3400000a, 0x34fffa2a, 0x3400830a, 0x35000008, 0x35fff9c8, 0x350082a8, 0xb400000b, 0xb4fff96b, 0xb400824b, 0xb500001d, 0xb5fff91d, 0xb50081fd, 0x10000013, 0x10fff8b3, 0x10008193, 0x90000013, 0x36300016, 0x3637f836, 0x36308116, 0x3758000c, 0x375ff7cc, 0x375880ac, 0x128313a0, 0x528a32c7, 0x7289173b, 0x92ab3acc, 0xd2a0bf94, 0xf2c285e8, 0x9358722f, 0x330e652f, 0x53067f3b, 0x93577c53, 0xb34a1aac, 0xd35a4016, 0x13946c63, 0x93c3dbc8, 0x54000000, 0x54fff5a0, 0x54007e80, 0x54000001, 0x54fff541, 0x54007e21, 0x54000002, 0x54fff4e2, 0x54007dc2, 0x54000002, 0x54fff482, 0x54007d62, 0x54000003, 0x54fff423, 0x54007d03, 0x54000003, 0x54fff3c3, 0x54007ca3, 0x54000004, 0x54fff364, 0x54007c44, 0x54000005, 0x54fff305, 0x54007be5, 0x54000006, 0x54fff2a6, 0x54007b86, 0x54000007, 0x54fff247, 0x54007b27, 0x54000008, 0x54fff1e8, 0x54007ac8, 0x54000009, 0x54fff189, 0x54007a69, 0x5400000a, 0x54fff12a, 0x54007a0a, 0x5400000b, 0x54fff0cb, 0x540079ab, 0x5400000c, 0x54fff06c, 0x5400794c, 0x5400000d, 0x54fff00d, 0x540078ed, 0x5400000e, 0x54ffefae, 0x5400788e, 0x5400000f, 0x54ffef4f, 0x5400782f, 0xd40658e1, 0xd4014d22, 0xd4046543, 0xd4273f60, 0xd44cad80, 0xd503201f, 0xd503203f, 0xd503205f, 0xd503209f, 0xd50320bf, 0xd503219f, 0xd50323bf, 0xd503239f, 0xd50321df, 0xd50323ff, 0xd50323df, 0xd503211f, 0xd503233f, 0xd503231f, 0xd503215f, 0xd503237f, 0xd503235f, 0xd69f03e0, 0xd6bf03e0, 0xd5033fdf, 0xd503207f, 0xd50320ff, 0xd5033e9f, 0xd50332bf, 0xd61f0200, 0xd63f0280, 0xdac123ea, 0xdac127fb, 0xdac12be8, 0xdac12fe0, 0xdac133e1, 0xdac137f5, 0xdac13bf1, 0xdac13ffd, 0xdac147fd, 0xd61f0b9f, 0xd61f0c3f, 0xd63f0aff, 0xd63f0ebf, 0xd51b4434, 0xd51b4216, 0xd53b443b, 0xd53b4213, 0xd53b00eb, 0xd53b0030, 0xdac143e6, 0xc8117c80, 0xc80afed8, 0xc85f7e6a, 0xc85ffca1, 0xc89ffd1e, 0xc8dffe2c, 0x88097cee, 0x8801fe05, 0x885f7d82, 0x885ffd8a, 0x889fff83, 0x88dfff4e, 0x481e7dca, 0x4815fd2d, 0x485f7f76, 0x485ffe7c, 0x489fffcb, 0x48dffc53, 0x08027c37, 0x0800fe0c, 0x085f7ded, 0x085ffeb1, 0x089ffd6d, 0x08dffd1e, 0xc87f3578, 0xc87feaa1, 0xc83b506d, 0xc82c87a6, 0x887f1166, 0x887f93d0, 0x883e32a4, 0x883bf12f, 0xf80011f9, 0xb81b1022, 0x381ea354, 0x79002fd7, 0xf85cf39a, 0xb8580309, 0x385e218c, 0x784051e1, 0x389e11d8, 0x789fa1f8, 0x79c01865, 0xb881131b, 0xfc5dd3ad, 0xbc5d1136, 0xfc00900b, 0xbc181014, 0xf818ec7d, 0xb81b8c91, 0x381efc40, 0x78007c3d, 0xf857beb0, 0xb8413dd4, 0x385fddd6, 0x78409e2f, 0x389eddea, 0x789e7d94, 0x78de3d55, 0xb8805c13, 0xfc5cadc0, 0xbc428c23, 0xfc1a2dc4, 0xbc1caf91, 0xf81475f6, 0xb81f95d1, 0x381e757e, 0x78014561, 0xf8402436, 0xb85896e2, 0x385f4763, 0x785db4f0, 0x3880374f, 0x789e25e7, 0x78dd0563, 0xb88166f9, 0xfc529540, 0xbc4374d1, 0xfc1166ad, 0xbc1ba6c0, 0xf820ea7b, 0xb82d68c8, 0x38367a04, 0x782f4b59, 0xf878c8a4, 0xb8674a24, 0x386b78f1, 0x78776bc0, 0x38a15aca, 0x78bedbd5, 0x78fcd94b, 0xb8aa4a7c, 0xfc6ecbbd, 0xbc65d8a8, 0xfc2de918, 0xbc3a7b11, 0xf91f1193, 0xb91ed5f7, 0x391ec9bd, 0x79182ceb, 0xf95d4b0a, 0xb9581010, 0x395fc034, 0x795fb221, 0x399d8731, 0x799efb3b, 0x79dd1a2e, 0xb998e4ea, 0xfd583723, 0xbd5ea12c, 0xfd18dc37, 0xbd1b0e83, 0x58ffdaa2, 0x1800001d, 0xf885d1c0, 0xd8ffda40, 0xf8a77820, 0xf9980220, 0x1a030301, 0x3a140311, 0x5a0d000b, 0x7a07015c, 0x9a1001e4, 0xba140182, 0xda0d01bd, 0xfa0c00ce, 0x0b31f194, 0x2b206d7b, 0xcb29f027, 0x6b210f63, 0x8b2cb34d, 0xab2a88b1, 0xcb2f511e, 0xeb3332f3, 0x3a4533aa, 0x7a4d312b, 0xba442146, 0xfa42818c, 0x3a466a02, 0x7a4b68ed, 0xba4a9b6b, 0xfa4dd86d, 0x1a8a637a, 0x1a9cd6aa, 0x5a9bd137, 0x5a8fd7aa, 0x9a95233e, 0x9a95c620, 0xda9422b0, 0xda8397d3, 0x5ac00173, 0x5ac00418, 0x5ac00b3b, 0x5ac0106e, 0x5ac0162e, 0xdac001e7, 0xdac00798, 0xdac00b31, 0xdac00f42, 0xdac010bc, 0xdac01759, 0xdac1021b, 0xdac104d1, 0xdac10995, 0xdac10c80, 0xdac1136c, 0xdac11791, 0xdac1185c, 0xdac11d51, 0xd71f09ee, 0xd71f0dc3, 0xd73f0b2f, 0xd73f0e6e, 0x1ac40a05, 0x1ac40f3a, 0x1acc2042, 0x1ac8263d, 0x1ac42867, 0x1ada2c99, 0x9ad10899, 0x9ad10f40, 0x9ad521f7, 0x9adb263c, 0x9ac0286a, 0x9ac92f27, 0x9bdd7de6, 0x9b427d4f, 0x1b0b2cf1, 0x1b1ddcf7, 0x9b0b2f6e, 0x9b0cbf04, 0x9b2b728e, 0x9b2cdd6d, 0x9bae275e, 0x9ba7954d, 0x7ea3d5fd, 0x1e2f098b, 0x1e311bde, 0x1e2f2a93, 0x1e35392f, 0x7efbd522, 0x1e7e0ba7, 0x1e621831, 0x1e632946, 0x1e673978, 0x1f000d61, 0x1f06db91, 0x1f3b6806, 0x1f2770a2, 0x1f4d2f2b, 0x1f48c677, 0x1f744f35, 0x1f7d5851, 0x1e2042a8, 0x1e20c293, 0x1e21422b, 0x1e21c0d4, 0x1e22c06f, 0x1e23c383, 0x1ee24363, 0x1e6041ce, 0x1e60c18a, 0x1e61422b, 0x1e61c32a, 0x1e6240e7, 0x1e38038e, 0x9e3802c0, 0x1e780180, 0x9e7801b7, 0x1e2200ed, 0x9e2200ee, 0x1e620288, 0x9e620391, 0x1e24021e, 0x9e640122, 0x1e300290, 0x9e70009d, 0x1e260341, 0x9e6602f8, 0x1e2702ae, 0x9e6700ac, 0x1e382180, 0x1e7d2300, 0x1e202368, 0x1e6022a8, 0x293a1796, 0x29426e73, 0x697c68fc, 0xa93d0486, 0xa97b5eba, 0x29b47934, 0x29c2534d, 0x69f62dbd, 0xa9bd54bb, 0xa9c503c6, 0x28a63e13, 0x28e25d2c, 0x68c469e0, 0xa8b34748, 0xa8f51c59, 0x28264433, 0x285036c0, 0xa8005f7d, 0xa872290b, 0x0c407160, 0x4cdfa350, 0x0cd16f55, 0x4cdf27ba, 0x0d40c0d5, 0x4ddfcbad, 0x0dd0cd95, 0x4c408c01, 0x0cdf86a9, 0x4d60c327, 0x0dffc928, 0x4deecd89, 0x4cd14887, 0x0c404a37, 0x4d40e6c3, 0x4ddfe84c, 0x0dcced4f, 0x4cdf0444, 0x0ccb0286, 0x0d60e18b, 0x0dffe62f, 0x0df0eb2e, 0x0e31bab4, 0x4e31b841, 0x0e71baf6, 0x4e71bbfe, 0x4eb1b9ee, 0x0e30a862, 0x4e30a8e6, 0x0e70a883, 0x4e70a907, 0x4eb0ab38, 0x6e30f820, 0x0e31ab9b, 0x2e31abdd, 0x4e31a8c5, 0x6e31a8c5, 0x0e71abdd, 0x2e71a98b, 0x4e71ab59, 0x6e71a820, 0x4eb1abfe, 0x6eb1a820, 0x6eb0fa51, 0x7e30fbbc, 0x7e70fb59, 0x7eb0f949, 0x7ef0fb59, 0x0ea0c9ac, 0x4ea0ca0f, 0x4ee0c98b, 0x2ea0c96a, 0x6ea0ca51, 0x6ee0cb38, 0x0ea0dad5, 0x4ea0db17, 0x4ee0d820, 0x0ea0ea30, 0x4ea0e96a, 0x4ee0e8e6, 0x2ea0dbbc, 0x6ea0d8e6, 0x6ee0d8c5, 0x0e20b8c5, 0x4e20bab4, 0x0e60ba51, 0x4e60ba0f, 0x0ea0ba51, 0x4ea0bbdd, 0x4ee0bb7a, 0x0ea0fbbc, 0x4ea0f841, 0x4ee0fb9b, 0x2ea0f820, 0x6ea0fab4, 0x6ee0fbbc, 0x2ea1fa0f, 0x6ea1f9ac, 0x6ee1f96a, 0x2e205bbc, 0x6e205bbc, 0x0e351e93, 0x4e381ef6, 0x0eac1d6a, 0x4ea61ca4, 0x2e201ffe, 0x6e361eb4, 0x0e2a8528, 0x4e2087fe, 0x0e738651, 0x4e6c856a, 0x0ebd879b, 0x4ea48462, 0x4efa8738, 0x0e26d4a4, 0x4e25d483, 0x4e6ad528, 0x2e3886f6, 0x6e338651, 0x2e6f85cd, 0x6e6684a4, 0x2ebe87bc, 0x6eb98717, 0x6ef786d5, 0x0ebbd759, 0x4ebad738, 0x4ee5d483, 0x0e399f17, 0x4e3c9f7a, 0x0e799f17, 0x4e709dee, 0x0eb79ed5, 0x4ea59c83, 0x2eb9d717, 0x6eaad528, 0x6efad738, 0x2e35d693, 0x6e31d60f, 0x6e72d630, 0x2e24dc62, 0x6e23dc41, 0x6e62dc20, 0x0e7a9738, 0x4e6694a4, 0x0ea59483, 0x4ead958b, 0x0e20cffe, 0x4e3dcf9b, 0x4e6bcd49, 0x2e7b9759, 0x6e649462, 0x2eae95ac, 0x6eb39651, 0x0ea0cffe, 0x4ea3cc41, 0x4eeecdac, 0x2e3effbc, 0x6e22fc20, 0x6e73fe51, 0x0e2e65ac, 0x4e336651, 0x0e7766d5, 0x4e6e65ac, 0x0ebd679b, 0x4ebf67dd, 0x0e20a7fe, 0x4e23a441, 0x0e7ba759, 0x4e7da79b, 0x0ea6a4a4, 0x4ebfa7dd, 0x0e25f483, 0x4e28f4e6, 0x4e7ff7dd, 0x0e3b6f59, 0x4e336e51, 0x0e6a6d28, 0x4e696d07, 0x0eae6dac, 0x4ea26c20, 0x0e35ae93, 0x4e23ac41, 0x0e79af17, 0x4e64ac62, 0x0ea2ac20, 0x4eaaad28, 0x0eb9f717, 0x4ebbf759, 0x4ef1f60f, 0x2e3f8fdd, 0x6e258c83, 0x2e6c8d6a, 0x6e788ef6, 0x2eac8d6a, 0x6ea68ca4, 0x6ef38e51, 0x0e23e441, 0x4e2de58b, 0x4e69e507, 0x0e2c356a, 0x4e31360f, 0x0e723630, 0x4e643462, 0x0eab3549, 0x4ead358b, 0x4eee35ac, 0x2e3035ee, 0x6e2f35cd, 0x2e643462, 0x6e6834e6, 0x2eb53693, 0x6ebb3759, 0x6ef1360f, 0x2e263ca4, 0x6e243c62, 0x2e663ca4, 0x6e6d3d8b, 0x2eb33e51, 0x6eb63eb4, 0x6ef23e30, 0x2eb3e651, 0x6eace56a, 0x6ef6e6b4, 0x0e383ef6, 0x4e2e3dac, 0x0e7b3f59, 0x4e793f17, 0x0ebe3fbc, 0x4eb03dee, 0x4eec3d6a, 0x2e3ae738, 0x6e23e441, 0x6e6de58b, 0x2ea0effe, 0x6eaced6a, 0x6ef1ee0f, 0x65922c43, 0x65d02219, 0x65d02560, 0x65d13dc4, 0x65913690, 0x65d33b6b, 0xba5fd3e3, 0x3a5f03e5, 0xfa411be4, 0x7a42cbe2, 0x93df03ff, 0xc820ffff, 0x8822fc7f, 0xc8247cbf, 0x88267fff, 0x4e010fe0, 0x5e040420, 0x4e081fe1, 0x4e0c1fe1, 0x4e0a1fe1, 0x4e071fe1, 0x4e042c20, 0x4e062c20, 0x4e052c20, 0x4e083c20, 0x0e0c3c20, 0x0e0a3c20, 0x0e073c20, 0x9eae0020, 0x0f03f409, 0x6f03f40e, 0x4cc0ac3f, 0x0ea1b820, 0x4e21c862, 0x4e61b8a4, 0x05a08020, 0x05104fe0, 0x05505001, 0x05906fe2, 0x05d03005, 0x05101fea, 0x05901feb, 0x04b0e3e0, 0x0470e7e1, 0x042f9c20, 0x043f9c35, 0x047f9c20, 0x04ff9c20, 0x04299420, 0x04319160, 0x0461943e, 0x04a19020, 0x04038100, 0x040381a0, 0x040387e1, 0x04438be2, 0x04c38fe3, 0x040181e0, 0x04018100, 0x04018621, 0x04418b22, 0x04418822, 0x04818c23, 0x040081e0, 0x04008120, 0x04008761, 0x04008621, 0x04408822, 0x04808c23, 0x042053ff, 0x047f5401, 0x25208028, 0x2538cfe0, 0x2578d001, 0x25b8efe2, 0x25f8f007, 0x2538dfea, 0x25b8dfeb, 0xa400a3e0, 0xa420a7e0, 0xa4484be0, 0xa467afe0, 0xa4a8a7ea, 0xa547a814, 0xa4084ffe, 0xa55c53e0, 0xa5e1540b, 0xe400fbf6, 0xe408ffff, 0xe420e7e0, 0xe4484be0, 0xe460efe0, 0xe547e400, 0xe4014be0, 0xe4a84fe0, 0xe5f15000, 0x858043e0, 0x85a043ff, 0xe59f5d08, 0x0420e3e9, 0x0460e3ea, 0x04a0e3eb, 0x04e0e3ec, 0x25104042, 0x25104871, 0x25904861, 0x25904c92, 0x05344020, 0x05744041, 0x05b44062, 0x05f44083, 0x252c8840, 0x253c1420, 0x25681572, 0x25a21ce3, 0x25ea1e34, 0x253c0421, 0x25680572, 0x25a20ce3, 0x25ea0e34, 0x0522c020, 0x05e6c0a4, 0x2401a001, 0x2443a051, 0x24858881, 0x24c78cd1, 0x24850891, 0x24c70cc1, 0x250f9001, 0x25508051, 0x25802491, 0x25df28c1, 0x25850c81, 0x251e10d1, 0x65816001, 0x65c36051, 0x65854891, 0x65c74cc1, 0x05733820, 0x05b238a4, 0x05f138e6, 0x0570396a, 0x65d0a001, 0x65d6a443, 0x65d4a826, 0x6594ac26, 0x6554ac26, 0x6556ac26, 0x6552ac26, 0x65cbac85, 0x65caac01, 0x6589ac85, 0x6588ac01, 0x65c9ac85, 0x65c8ac01, 0x65dea833, 0x659ca509, 0x65d8a801, 0x65dcac01, 0x655cb241, 0x0520a1e0, 0x0521a601, 0x052281e0, 0x05238601, 0x04a14026, 0x042244a6, 0x046344a6, 0x04a444a6, 0x04e544a7, 0x0568aca7, 0x05b23230, 0x853040af, 0xc5b040af, 0xe57080af, 0xe5b080af, 0x25034440, 0x254054c4, 0x25034640, 0x25415a05, 0x25834440, 0x25c54489, 0x250b5d3a, 0x2550dc20, 0x2518e3e1, 0x2518e021, 0x2518e0a1, 0x2518e121, 0x2518e1a1, 0x2558e3e2, 0x2558e042, 0x2558e0c2, 0x2558e142, 0x2598e3e3, 0x2598e063, 0x2598e0e3, 0x2598e163, 0x25d8e3e4, 0x25d8e084, 0x25d8e104, 0x25d8e184, 0x2518e407, 0x05214800, 0x05614800, 0x05a14800, 0x05e14800, 0x05214c00, 0x05614c00, 0x05a14c00, 0x05e14c00, 0x05304001, 0x05314001, 0x05a18610, 0x05e18610, 0x05271e11, 0x6545e891, 0x6585e891, 0x65c5e891, 0x6545c891, 0x6585c891, 0x65c5c891, 0x45b0c210, 0x45f1c231, 0x1e601000, 0x1e603000, 0x1e621000, 0x1e623000, 0x1e641000, 0x1e643000, 0x1e661000, 0x1e663000, 0x1e681000, 0x1e683000, 0x1e6a1000, 0x1e6a3000, 0x1e6c1000, 0x1e6c3000, 0x1e6e1000, 0x1e6e3000, 0x1e701000, 0x1e703000, 0x1e721000, 0x1e723000, 0x1e741000, 0x1e743000, 0x1e761000, 0x1e763000, 0x1e781000, 0x1e783000, 0x1e7a1000, 0x1e7a3000, 0x1e7c1000, 0x1e7c3000, 0x1e7e1000, 0x1e7e3000, 0xf8398025, 0xf83703f0, 0xf825112c, 0xf83c23af, 0xf836327f, 0xf83f51c5, 0xf83041ff, 0xf83b7214, 0xf82c612b, 0xf8a6823e, 0xf8bb03dc, 0xf8a7128a, 0xf8aa2304, 0xf8b132d1, 0xf8a351fd, 0xf8b64273, 0xf8b671e2, 0xf8a6620c, 0xf8eb82ed, 0xf8e1027e, 0xf8e51051, 0xf8f021b6, 0xf8ea33b5, 0xf8fb536c, 0xf8e343e1, 0xf8f87233, 0xf8e9637c, 0xf86f82a7, 0xf877033f, 0xf862137f, 0xf87022ea, 0xf87331e3, 0xf8605359, 0xf87741e2, 0xf86c7384, 0xf87e621d, 0xb83b8126, 0xb83d00f0, 0xb82411e7, 0xb8292117, 0xb82232bc, 0xb83f5365, 0xb82041f1, 0xb824711a, 0xb83c6376, 0xb8bb82f9, 0xb8a000c4, 0xb8b01080, 0xb8af2141, 0xb8a73145, 0xb8bc5287, 0xb8b740d5, 0xb8ab7228, 0xb8bf6226, 0xb8e283cc, 0xb8fd0363, 0xb8f611dd, 0xb8ed223c, 0xb8f83045, 0xb8ee520a, 0xb8eb42fb, 0xb8ec72c4, 0xb8f16024, 0xb87381f0, 0xb86d018e, 0xb8621071, 0xb87520b7, 0xb8663267, 0xb86d523c, 0xb8704046, 0xb87d7083, 0xb8666290, 0xce33218d, 0xce110278, 0xce668eea, 0xce8d87d3, 0xce678006, 0xce79862e, 0xcec08148, 0xce768a96, 0x2520c3fb, 0x25a1d4ef, 0x05803e87, 0x05401580, 0x05001e36, 0x2560d9f6, 0x25e1d0a5, 0x0580e08d, 0x05400d2d, 0x050074d9, 0x2560c6d1, 0x2521dd5d, 0x058394b0, 0x05403ece, 0x050026a3, 0x25a0cef5, 0x25a1d668, 0x05800ed8, 0x0540bb31, 0x05008874, 0x25e0c491, 0x2521d873, 0x0580b1d9, 0x05401548, 0x0500cbde, 0x2560d461, 0x2521c96c, 0x0583ab47, 0x054026bb, 0x05007dd7, 0x04f402d6, 0x04ad053c, 0x659c0287, 0x65cb09ab, 0x65c80701, 0x0496b22d, 0x04400064, 0x049a0dc7, 0x04108fa4, 0x04db0aa0, 0x0499a123, 0x041aab1c, 0x04d906f3, 0x04d3954d, 0x049193cc, 0x049003ae, 0x0497b4f5, 0x049ea342, 0x04981229, 0x05e78440, 0x05e4856e, 0x044813ae, 0x044a02c3, 0x04011b63, 0x04dcb4f3, 0x65808cb5, 0x65cd86b9, 0x65868071, 0x65878cf3, 0x6582922e, 0x04ddba2d, 0x6582a9f1, 0x65c0b77a, 0x65c1a8a7, 0x658da81b, 0x65819698, 0x65e59723, 0x65a1123d, 0x65e03dae, 0x65f6be82, 0x65a2cd1d, 0x65e0f6ce, 0x65ec5af9, 0x65ea6035, 0x045756eb, 0x0493727e, 0x042d3284, 0x04be33d6, 0x046e3231, 0x04f4338c, 0x052d69a1, 0x05ab6d47, 0x658899e4, 0x4580b3a3, 0x45deb685, 0x042d390d, 0x04da21dd, 0x04582323, 0x04d92838, 0x04882c2a, 0x048a2799, 0x65872770, 0x65863d61, 0x65d82021, 0x04012c5a, }; // END Generated code -- do not edit