diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index 612ed63751b..a1236d032e6 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -2685,12 +2685,55 @@ bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
   return true;
 }
 
+bool can_combine_with_imm(Node* binary_node, Node* replicate_node) {
+  if (UseSVE == 0 || !VectorNode::is_invariant_vector(replicate_node)){
+    return false;
+  }
+  Node* imm_node = replicate_node->in(1);
+  if (!imm_node->is_Con()) {
+    return false;
+  }
+
+  const Type* t = imm_node->bottom_type();
+  if (!(t->isa_int() || t->isa_long())) {
+    return false;
+  }
+
+  switch (binary_node->Opcode()) {
+  case Op_AndV:
+  case Op_OrV:
+  case Op_XorV: {
+    Assembler::SIMD_RegVariant T = Assembler::elemType_to_regVariant(Matcher::vector_element_basic_type(binary_node));
+    uint64_t value = t->isa_long() ? (uint64_t)imm_node->get_long() : (uint64_t)imm_node->get_int();
+    return Assembler::operand_valid_for_sve_logical_immediate(Assembler::regVariant_to_elemBits(T), value);
+  }
+  case Op_AddVB:
+    return (imm_node->get_int() <= 255 && imm_node->get_int() >= -255);
+  case Op_AddVS:
+  case Op_AddVI:
+    return Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)imm_node->get_int());
+  case Op_AddVL:
+    return Assembler::operand_valid_for_sve_add_sub_immediate(imm_node->get_long());
+  default:
+    return false;
+  }
+}
+
+bool is_vector_arith_imm_pattern(Node* n, Node* m) {
+  if (n != NULL && m != NULL) {
+    return can_combine_with_imm(n, m);
+  }
+  return false;
+}
+
 // Should the matcher clone input 'm' of node 'n'?
 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
   // ShiftV src (ShiftCntV con)
   // StoreVector (VectorStoreMask src)
+  // Binary src (Replicate con)
   if (is_vshift_con_pattern(n, m) ||
-      (UseSVE > 0 && m->Opcode() == Op_VectorStoreMask && n->Opcode() == Op_StoreVector)) {
+      (UseSVE > 0 && m->Opcode() == Op_VectorStoreMask && n->Opcode() == Op_StoreVector) ||
+      is_vector_arith_imm_pattern(n, m)) {
     mstack.push(m, Visit);
     return true;
   }
@@ -4611,6 +4654,17 @@ operand immL8_shift8()
   interface(CONST_INTER);
 %}
 
+// 8 bit integer valid for vector add sub immediate
+operand immBAddSubV()
+%{
+  predicate(n->get_int() <= 255 && n->get_int() >= -255);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 // 32 bit integer valid for add sub immediate
 operand immIAddSub()
 %{
@@ -4621,8 +4675,39 @@ operand immIAddSub()
   interface(CONST_INTER);
 %}
 
+// 32 bit integer valid for vector add sub immediate
+operand immIAddSubV()
+%{
+  predicate(Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)n->get_int()));
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 // 32 bit unsigned integer valid for logical immediate
-// TODO -- check this is right when e.g the mask is 0x80000000
+
+operand immBLog()
+%{
+  predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerByte, (uint64_t)n->get_int()));
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immSLog()
+%{
+  predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerShort, (uint64_t)n->get_int()));
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 operand immILog()
 %{
   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
@@ -4700,6 +4785,17 @@ operand immLAddSub()
   interface(CONST_INTER);
 %}
 
+// 64 bit integer valid for addv subv immediate
+operand immLAddSubV()
+%{
+  predicate(Assembler::operand_valid_for_sve_add_sub_immediate(n->get_long()));
+  match(ConL);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 // 64 bit integer valid for logical immediate
 operand immLLog()
 %{
diff --git a/src/hotspot/cpu/aarch64/aarch64_sve.ad b/src/hotspot/cpu/aarch64/aarch64_sve.ad
index 78c09b65708..8260459f223 100644
--- a/src/hotspot/cpu/aarch64/aarch64_sve.ad
+++ b/src/hotspot/cpu/aarch64/aarch64_sve.ad
@@ -935,6 +935,217 @@ instruct vaddD_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
   ins_pipe(pipe_slow);
 %}
 
+// vector add reg imm (unpredicated)
+
+instruct vaddImmB(vReg dst_src, immBAddSubV con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (AddVB dst_src (ReplicateB con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_add $dst_src, $dst_src, $con\t # vector (sve) (B)" %}
+  ins_encode %{
+    int32_t val = $con$$constant;
+    if (val > 0){
+      __ sve_add(as_FloatRegister($dst_src$$reg), __ B, val);
+    } else if (val < 0){
+      __ sve_sub(as_FloatRegister($dst_src$$reg), __ B, -val);
+    }
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct vaddImmS(vReg dst_src, immIAddSubV con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (AddVS dst_src (ReplicateS con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_add $dst_src, $dst_src, $con\t # vector (sve) (H)" %}
+  ins_encode %{
+    int32_t val = $con$$constant;
+    if (val > 0){
+      __ sve_add(as_FloatRegister($dst_src$$reg), __ H, val);
+    } else if (val < 0){
+      __ sve_sub(as_FloatRegister($dst_src$$reg), __ H, -val);
+    }
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct vaddImmI(vReg dst_src, immIAddSubV con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (AddVI dst_src (ReplicateI con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_add $dst_src, $dst_src, $con\t # vector (sve) (S)" %}
+  ins_encode %{
+    int32_t val = $con$$constant;
+    if (val > 0){
+      __ sve_add(as_FloatRegister($dst_src$$reg), __ S, val);
+    } else if (val < 0){
+      __ sve_sub(as_FloatRegister($dst_src$$reg), __ S, -val);
+    }
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct vaddImmL(vReg dst_src, immLAddSubV con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (AddVL dst_src (ReplicateL con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_add $dst_src, $dst_src, $con\t # vector (sve) (D)" %}
+  ins_encode %{
+    int32_t val = $con$$constant;
+    if (val > 0){
+      __ sve_add(as_FloatRegister($dst_src$$reg), __ D, val);
+    } else if (val < 0){
+      __ sve_sub(as_FloatRegister($dst_src$$reg), __ D, -val);
+    }
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// vector binary op reg imm (unpredicated)
+
+instruct vandB(vReg dst_src, immBLog con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (AndV dst_src (ReplicateB con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_and $dst_src, $dst_src, $con\t # vector (sve) (B)" %}
+  ins_encode %{
+    __ sve_and(as_FloatRegister($dst_src$$reg), __ B,
+         (uint64_t)($con$$constant));
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct vandH(vReg dst_src, immSLog con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (AndV dst_src (ReplicateS con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_and $dst_src, $dst_src, $con\t # vector (sve) (H)" %}
+  ins_encode %{
+    __ sve_and(as_FloatRegister($dst_src$$reg), __ H,
+         (uint64_t)($con$$constant));
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct vandS(vReg dst_src, immILog con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (AndV dst_src (ReplicateI con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_and $dst_src, $dst_src, $con\t # vector (sve) (S)" %}
+  ins_encode %{
+    __ sve_and(as_FloatRegister($dst_src$$reg), __ S,
+         (uint64_t)($con$$constant));
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct vandD(vReg dst_src, immLLog con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (AndV dst_src (ReplicateL con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_and $dst_src, $dst_src, $con\t # vector (sve) (D)" %}
+  ins_encode %{
+    __ sve_and(as_FloatRegister($dst_src$$reg), __ D,
+         (uint64_t)($con$$constant));
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct vorB(vReg dst_src, immBLog con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (OrV dst_src (ReplicateB con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_orr $dst_src, $dst_src, $con\t # vector (sve) (B)" %}
+  ins_encode %{
+    __ sve_orr(as_FloatRegister($dst_src$$reg), __ B,
+         (uint64_t)($con$$constant));
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct vorH(vReg dst_src, immSLog con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (OrV dst_src (ReplicateS con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_orr $dst_src, $dst_src, $con\t # vector (sve) (H)" %}
+  ins_encode %{
+    __ sve_orr(as_FloatRegister($dst_src$$reg), __ H,
+         (uint64_t)($con$$constant));
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct vorS(vReg dst_src, immILog con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (OrV dst_src (ReplicateI con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_orr $dst_src, $dst_src, $con\t # vector (sve) (S)" %}
+  ins_encode %{
+    __ sve_orr(as_FloatRegister($dst_src$$reg), __ S,
+         (uint64_t)($con$$constant));
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct vorD(vReg dst_src, immLLog con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (OrV dst_src (ReplicateL con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_orr $dst_src, $dst_src, $con\t # vector (sve) (D)" %}
+  ins_encode %{
+    __ sve_orr(as_FloatRegister($dst_src$$reg), __ D,
+         (uint64_t)($con$$constant));
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct vxorB(vReg dst_src, immBLog con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (XorV dst_src (ReplicateB con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_eor $dst_src, $dst_src, $con\t # vector (sve) (B)" %}
+  ins_encode %{
+    __ sve_eor(as_FloatRegister($dst_src$$reg), __ B,
+         (uint64_t)($con$$constant));
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct vxorH(vReg dst_src, immSLog con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (XorV dst_src (ReplicateS con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_eor $dst_src, $dst_src, $con\t # vector (sve) (H)" %}
+  ins_encode %{
+    __ sve_eor(as_FloatRegister($dst_src$$reg), __ H,
+         (uint64_t)($con$$constant));
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct vxorS(vReg dst_src, immILog con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (XorV dst_src (ReplicateI con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_eor $dst_src, $dst_src, $con\t # vector (sve) (S)" %}
+  ins_encode %{
+    __ sve_eor(as_FloatRegister($dst_src$$reg), __ S,
+         (uint64_t)($con$$constant));
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct vxorD(vReg dst_src, immLLog con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (XorV dst_src (ReplicateL con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_eor $dst_src, $dst_src, $con\t # vector (sve) (D)" %}
+  ins_encode %{
+    __ sve_eor(as_FloatRegister($dst_src$$reg), __ D,
+         (uint64_t)($con$$constant));
+  %}
+  ins_pipe(pipe_slow);
+%}
 // vector and
 
 instruct vand(vReg dst, vReg src1, vReg src2) %{
diff --git a/src/hotspot/cpu/aarch64/aarch64_sve_ad.m4 b/src/hotspot/cpu/aarch64/aarch64_sve_ad.m4
index 91a7a8b21d5..7589735365e 100644
--- a/src/hotspot/cpu/aarch64/aarch64_sve_ad.m4
+++ b/src/hotspot/cpu/aarch64/aarch64_sve_ad.m4
@@ -603,7 +603,61 @@ BINARY_OP_PREDICATE(vaddI, AddVI, S, sve_add)
 BINARY_OP_PREDICATE(vaddL, AddVL, D, sve_add)
 BINARY_OP_PREDICATE(vaddF, AddVF, S, sve_fadd)
 BINARY_OP_PREDICATE(vaddD, AddVD, D, sve_fadd)
+dnl
+dnl ADD_IMM($1,          $2,   $3      )
+dnl ADD_IMM(name_suffix, size, imm_type)
+define(`ADD_IMM', `
+instruct vaddImm$1(vReg dst_src, $3 con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src (AddV$1 dst_src (Replicate$1 con)));
+  ins_cost(SVE_COST);
+  format %{ "sve_add $dst_src, $dst_src, $con\t # vector (sve) ($2)" %}
+  ins_encode %{
+    int32_t val = $con$$constant;
+    if (val > 0){
+      __ sve_add(as_FloatRegister($dst_src$$reg), __ $2, val);
+    } else if (val < 0){
+      __ sve_sub(as_FloatRegister($dst_src$$reg), __ $2, -val);
+    }
+  %}
+  ins_pipe(pipe_slow);
+%}')dnl
 
+// vector add reg imm (unpredicated)
+ADD_IMM(B, B, immBAddSubV)
+ADD_IMM(S, H, immIAddSubV)
+ADD_IMM(I, S, immIAddSubV)
+ADD_IMM(L, D, immLAddSubV)
+dnl
+dnl BITWISE_OP_IMM($1,        $2        $3,   $4    $5      )
+dnl BITWISE_OP_IMM(insn_name, op_name1, size, type, op_name2)
+define(`BITWISE_OP_IMM', `
+instruct $1(vReg dst_src, imm$4Log con) %{
+  predicate(UseSVE > 0);
+  match(Set dst_src ($2 dst_src (Replicate$4 con)));
+  ins_cost(SVE_COST);
+  format %{ "$5 $dst_src, $dst_src, $con\t # vector (sve) ($3)" %}
+  ins_encode %{
+    __ $5(as_FloatRegister($dst_src$$reg), __ $3,
+         (uint64_t)($con$$constant));
+  %}
+  ins_pipe(pipe_slow);
+%}')dnl
+
+// vector binary op reg imm (unpredicated)
+BITWISE_OP_IMM(vandB, AndV, B, B, sve_and)
+BITWISE_OP_IMM(vandH, AndV, H, S, sve_and)
+BITWISE_OP_IMM(vandS, AndV, S, I, sve_and)
+BITWISE_OP_IMM(vandD, AndV, D, L, sve_and)
+BITWISE_OP_IMM(vorB,  OrV,  B, B, sve_orr)
+BITWISE_OP_IMM(vorH,  OrV,  H, S, sve_orr)
+BITWISE_OP_IMM(vorS,  OrV,  S, I, sve_orr)
+BITWISE_OP_IMM(vorD,  OrV,  D, L, sve_orr)
+BITWISE_OP_IMM(vxorB, XorV, B, B, sve_eor)
+BITWISE_OP_IMM(vxorH, XorV, H, S, sve_eor)
+BITWISE_OP_IMM(vxorS, XorV, S, I, sve_eor)
+BITWISE_OP_IMM(vxorD, XorV, D, L, sve_eor)
+dnl
 dnl
 dnl BINARY_OP_UNSIZED($1,        $2,      $3  )
 dnl BINARY_OP_UNSIZED(insn_name, op_name, insn)
diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.cpp b/src/hotspot/cpu/aarch64/assembler_aarch64.cpp
index 4e883838a66..943ca002c7a 100644
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.cpp
@@ -82,6 +82,11 @@ Assembler::SIMD_RegVariant Assembler::elemType_to_regVariant(BasicType bt) {
   return elemBytes_to_regVariant(type2aelembytes(bt));
 }
 
+unsigned Assembler::regVariant_to_elemBits(Assembler::SIMD_RegVariant T){
+  guarantee(T != Q, "Invalid register variant");
+  return 1 << (T + 3);
+}
+
 void Assembler::emit_data64(jlong data,
                             relocInfo::relocType rtype,
                             int format) {
@@ -339,21 +344,21 @@ void Assembler::wrap_label(Label &L, prfop op, prefetch_insn insn) {
 }
 
 bool Assembler::operand_valid_for_add_sub_immediate(int64_t imm) {
-  bool shift = false;
-  uint64_t uimm = (uint64_t)uabs((jlong)imm);
-  if (uimm < (1 << 12))
-    return true;
-  if (uimm < (1 << 24)
-      && ((uimm >> 12) << 12 == uimm)) {
-    return true;
-  }
-  return false;
+  return operand_valid_for_immediate_bits(imm, 12);
+}
+
+bool Assembler::operand_valid_for_sve_add_sub_immediate(int64_t imm) {
+  return operand_valid_for_immediate_bits(imm, 8);
 }
 
 bool Assembler::operand_valid_for_logical_immediate(bool is32, uint64_t imm) {
   return encode_logical_immediate(is32, imm) != 0xffffffff;
 }
 
+bool Assembler::operand_valid_for_sve_logical_immediate(unsigned elembits, uint64_t imm) {
+  return encode_sve_logical_immediate(elembits, imm) != 0xffffffff;
+}
+
 static uint64_t doubleTo64Bits(jdouble d) {
   union {
     jdouble double_value;
@@ -383,6 +388,17 @@ int AbstractAssembler::code_fill_byte() {
 // n.b. this is implemented in subclass MacroAssembler
 void Assembler::bang_stack_with_offset(int offset) { Unimplemented(); }
 
+bool asm_util::operand_valid_for_immediate_bits(int64_t imm, unsigned nbits) {
+  guarantee(nbits == 8 || nbits == 12, "invalid nbits value");
+  uint64_t uimm = (uint64_t)uabs((jlong)imm);
+  if (uimm < (UCONST64(1) << nbits))
+    return true;
+  if (uimm < (UCONST64(1) << (2 * nbits))
+      && ((uimm >> nbits) << nbits == uimm)) {
+    return true;
+  }
+  return false;
+}
 
 // and now the routines called by the assembler which encapsulate the
 // above encode and decode functions
@@ -403,6 +419,25 @@ asm_util::encode_logical_immediate(bool is32, uint64_t imm)
   return encoding_for_logical_immediate(imm);
 }
 
+uint32_t
+asm_util::encode_sve_logical_immediate(unsigned elembits, uint64_t imm) {
+  guarantee(elembits == 8 || elembits == 16 ||
+            elembits == 32 || elembits == 64, "unsupported element size");
+  uint64_t upper = UCONST64(-1) << (elembits/2) << (elembits/2);
+  /* Allow all zeros or all ones in top bits, so that
+   * constant expressions like ~1 are permitted. */
+  if ((imm & ~upper) != imm && (imm | upper) != imm)
+    return 0xffffffff;
+
+  // Replicate the immediate in different element sizes to 64 bits.
+  imm &= ~upper;
+  for (unsigned i = elembits; i < 64; i *= 2) {
+    imm |= (imm << i);
+  }
+
+  return encoding_for_logical_immediate(imm);
+}
+
 unsigned Assembler::pack(double value) {
   float val = (float)value;
   unsigned result = encoding_for_fp_immediate(val);
diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
index 9858eccb8a7..9eee231ec0f 100644
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
@@ -159,6 +159,8 @@ REGISTER_DECLARATION(PRegister, ptrue, p7);
 
 namespace asm_util {
   uint32_t encode_logical_immediate(bool is32, uint64_t imm);
+  uint32_t encode_sve_logical_immediate(unsigned elembits, uint64_t imm);
+  bool operand_valid_for_immediate_bits(int64_t imm, unsigned nbits);
 };
 
 using namespace asm_util;
@@ -1516,6 +1518,8 @@ public:
   static SIMD_Arrangement esize2arrangement(unsigned esize, bool isQ);
   static SIMD_RegVariant elemType_to_regVariant(BasicType bt);
   static SIMD_RegVariant elemBytes_to_regVariant(unsigned esize);
+  // Return the corresponding bits for different SIMD_RegVariant value.
+  static unsigned regVariant_to_elemBits(SIMD_RegVariant T);
 
   enum shift_kind { LSL, LSR, ASR, ROR };
 
@@ -2953,6 +2957,32 @@ public:
   INSN(sve_sub, 0b001);
 #undef INSN
 
+// SVE integer add/subtract immediate (unpredicated)
+#define INSN(NAME, op)                                                  \
+  void NAME(FloatRegister Zd, SIMD_RegVariant T, unsigned imm8) {       \
+    starti;                                                             \
+    /* The immediate is an unsigned value in the range 0 to 255, and    \
+     * for element width of 16 bits or higher it may also be a          \
+     * positive multiple of 256 in the range 256 to 65280.              \
+     */                                                                 \
+    assert(T != Q, "invalid size");                                     \
+    int sh = 0;                                                         \
+    if (imm8 <= 0xff) {                                                 \
+      sh = 0;                                                           \
+    } else if (T != B && imm8 <= 0xff00 && (imm8 & 0xff) == 0) {        \
+      sh = 1;                                                           \
+      imm8 = (imm8 >> 8);                                               \
+    } else {                                                            \
+      guarantee(false, "invalid immediate");                            \
+    }                                                                   \
+    f(0b00100101, 31, 24), f(T, 23, 22), f(0b10000, 21, 17);            \
+    f(op, 16, 14), f(sh, 13), f(imm8, 12, 5), rf(Zd, 0);                \
+  }
+
+  INSN(sve_add, 0b011);
+  INSN(sve_sub, 0b111);
+#undef INSN
+
 // SVE floating-point arithmetic - unpredicated
 #define INSN(NAME, opcode)                                                             \
   void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) { \
@@ -3090,6 +3120,20 @@ public:
   INSN(sve_bic, 0b11);
 #undef INSN
 
+// SVE bitwise logical with immediate (unpredicated)
+#define INSN(NAME, opc)                                                      \
+  void NAME(FloatRegister Zd, SIMD_RegVariant T, uint64_t imm) {             \
+    starti;                                                                  \
+    unsigned elembits = regVariant_to_elemBits(T);                           \
+    uint32_t val = encode_sve_logical_immediate(elembits, imm);              \
+    f(0b00000101, 31, 24), f(opc, 23, 22), f(0b0000, 21, 18);                \
+    f(val, 17, 5), rf(Zd, 0);                                                \
+  }
+  INSN(sve_and, 0b10);
+  INSN(sve_eor, 0b01);
+  INSN(sve_orr, 0b00);
+#undef INSN
+
 // SVE shift immediate - unpredicated
 #define INSN(NAME, opc, isSHR)                                                  \
   void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, int shift) { \
@@ -3639,7 +3683,9 @@ void sve_cmp(Condition cond, PRegister Pd, SIMD_RegVariant T,
   virtual void bang_stack_with_offset(int offset);
 
   static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm);
+  static bool operand_valid_for_sve_logical_immediate(unsigned elembits, uint64_t imm);
   static bool operand_valid_for_add_sub_immediate(int64_t imm);
+  static bool operand_valid_for_sve_add_sub_immediate(int64_t imm);
   static bool operand_valid_for_float_immediate(double imm);
 
   void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
diff --git a/test/hotspot/gtest/aarch64/aarch64-asmtest.py b/test/hotspot/gtest/aarch64/aarch64-asmtest.py
index a7df17b2129..e79afecd3dc 100644
--- a/test/hotspot/gtest/aarch64/aarch64-asmtest.py
+++ b/test/hotspot/gtest/aarch64/aarch64-asmtest.py
@@ -7,6 +7,55 @@ AARCH64_AS = "as"
 AARCH64_OBJDUMP = "objdump"
 AARCH64_OBJCOPY = "objcopy"
 
+# These tables are legal immediate logical operands
+immediates8 \
+     = [0x1, 0x0c, 0x3e, 0x60, 0x7c, 0x80, 0x83,
+        0xe1, 0xbf, 0xef, 0xf3, 0xfe]
+
+immediates16 \
+     = [0x1, 0x38, 0x7e, 0xff, 0x1fc, 0x1ff, 0x3f0,
+        0x7e0, 0xfc0, 0x1f80, 0x3ff0, 0x7e00, 0x7e00,
+        0x8000, 0x81ff, 0xc1ff, 0xc003, 0xc7ff, 0xdfff,
+        0xe03f, 0xe10f, 0xe1ff, 0xf801, 0xfc00, 0xfc07,
+        0xff03, 0xfffe]
+
+immediates32 \
+     = [0x1, 0x3f, 0x1f0, 0x7e0,
+        0x1c00, 0x3ff0, 0x8000, 0x1e000,
+        0x3e000, 0x78000, 0xe0000, 0x100000,
+        0x1fffe0, 0x3fe000, 0x780000, 0x7ffff8,
+        0xff8000, 0x1800180, 0x1fffc00, 0x3c003c0,
+        0x3ffff00, 0x7c00000, 0x7fffe00, 0xf000f00,
+        0xfffe000, 0x18181818, 0x1ffc0000, 0x1ffffffe,
+        0x3f003f00, 0x3fffe000, 0x60006000, 0x7f807f80,
+        0x7ffffc00, 0x800001ff, 0x803fffff, 0x9f9f9f9f,
+        0xc0000fff, 0xc0c0c0c0, 0xe0000000, 0xe003e003,
+        0xe3ffffff, 0xf0000fff, 0xf0f0f0f0, 0xf80000ff,
+        0xf83ff83f, 0xfc00007f, 0xfc1fffff, 0xfe0001ff,
+        0xfe3fffff, 0xff003fff, 0xff800003, 0xff87ff87,
+        0xffc00fff, 0xffe0000f, 0xffefffef, 0xfff1fff1,
+        0xfff83fff, 0xfffc0fff, 0xfffe0fff, 0xffff3fff,
+        0xffffc007, 0xffffe1ff, 0xfffff80f, 0xfffffe07,
+        0xffffffbf, 0xfffffffd]
+
+immediates64 \
+     = [0x1, 0x1f80, 0x3fff0, 0x3ffffc,
+        0x3fe0000, 0x1ffc0000, 0xf8000000, 0x3ffffc000,
+        0xffffffe00, 0x3ffffff800, 0xffffc00000, 0x3f000000000,
+        0x7fffffff800, 0x1fe000001fe0, 0x3ffffff80000, 0xc00000000000,
+        0x1ffc000000000, 0x3ffff0003ffff, 0x7ffffffe00000, 0xfffffffffc000,
+        0x1ffffffffffc00, 0x3fffffffffff00, 0x7ffffffffffc00, 0xffffffffff8000,
+        0x1ffffffff800000, 0x3fffffc03fffffc, 0x7fffc0000000000, 0xff80ff80ff80ff8,
+        0x1c00000000000000, 0x1fffffffffff0000, 0x3fffff803fffff80, 0x7fc000007fc00000,
+        0x8000000000000000, 0x803fffff803fffff, 0xc000007fc000007f, 0xe00000000000ffff,
+        0xe3ffffffffffffff, 0xf007f007f007f007, 0xf80003ffffffffff, 0xfc000003fc000003,
+        0xfe000000007fffff, 0xff00000000007fff, 0xff800000000003ff, 0xffc00000000000ff,
+        0xffe00000000003ff, 0xfff0000000003fff, 0xfff80000001fffff, 0xfffc0000fffc0000,
+        0xfffe003fffffffff, 0xffff3fffffffffff, 0xffffc0000007ffff, 0xffffe01fffffe01f,
+        0xfffff800000007ff, 0xfffffc0fffffffff, 0xffffff00003fffff, 0xffffffc0000007ff,
+        0xfffffff0000001ff, 0xfffffffc00003fff, 0xffffffff07ffffff, 0xffffffffe003ffff,
+        0xfffffffffc01ffff, 0xffffffffffc00003, 0xfffffffffffc000f, 0xffffffffffffe07f]
+
 class Operand(object):
 
      def generate(self):
@@ -351,51 +400,12 @@ class AddSubImmOp(TwoRegImmedInstruction):
          return super(AddSubImmOp, self).cstr() + ");"
 
 class LogicalImmOp(AddSubImmOp):
-
-     # These tables are legal immediate logical operands
-     immediates32 \
-         = [0x1, 0x3f, 0x1f0, 0x7e0,
-            0x1c00, 0x3ff0, 0x8000, 0x1e000,
-            0x3e000, 0x78000, 0xe0000, 0x100000,
-            0x1fffe0, 0x3fe000, 0x780000, 0x7ffff8,
-            0xff8000, 0x1800180, 0x1fffc00, 0x3c003c0,
-            0x3ffff00, 0x7c00000, 0x7fffe00, 0xf000f00,
-            0xfffe000, 0x18181818, 0x1ffc0000, 0x1ffffffe,
-            0x3f003f00, 0x3fffe000, 0x60006000, 0x7f807f80,
-            0x7ffffc00, 0x800001ff, 0x803fffff, 0x9f9f9f9f,
-            0xc0000fff, 0xc0c0c0c0, 0xe0000000, 0xe003e003,
-            0xe3ffffff, 0xf0000fff, 0xf0f0f0f0, 0xf80000ff,
-            0xf83ff83f, 0xfc00007f, 0xfc1fffff, 0xfe0001ff,
-            0xfe3fffff, 0xff003fff, 0xff800003, 0xff87ff87,
-            0xffc00fff, 0xffe0000f, 0xffefffef, 0xfff1fff1,
-            0xfff83fff, 0xfffc0fff, 0xfffe0fff, 0xffff3fff,
-            0xffffc007, 0xffffe1ff, 0xfffff80f, 0xfffffe07,
-            0xffffffbf, 0xfffffffd]
-
-     immediates \
-         = [0x1, 0x1f80, 0x3fff0, 0x3ffffc,
-            0x3fe0000, 0x1ffc0000, 0xf8000000, 0x3ffffc000,
-            0xffffffe00, 0x3ffffff800, 0xffffc00000, 0x3f000000000,
-            0x7fffffff800, 0x1fe000001fe0, 0x3ffffff80000, 0xc00000000000,
-            0x1ffc000000000, 0x3ffff0003ffff, 0x7ffffffe00000, 0xfffffffffc000,
-            0x1ffffffffffc00, 0x3fffffffffff00, 0x7ffffffffffc00, 0xffffffffff8000,
-            0x1ffffffff800000, 0x3fffffc03fffffc, 0x7fffc0000000000, 0xff80ff80ff80ff8,
-            0x1c00000000000000, 0x1fffffffffff0000, 0x3fffff803fffff80, 0x7fc000007fc00000,
-            0x8000000000000000, 0x803fffff803fffff, 0xc000007fc000007f, 0xe00000000000ffff,
-            0xe3ffffffffffffff, 0xf007f007f007f007, 0xf80003ffffffffff, 0xfc000003fc000003,
-            0xfe000000007fffff, 0xff00000000007fff, 0xff800000000003ff, 0xffc00000000000ff,
-            0xffe00000000003ff, 0xfff0000000003fff, 0xfff80000001fffff, 0xfffc0000fffc0000,
-            0xfffe003fffffffff, 0xffff3fffffffffff, 0xffffc0000007ffff, 0xffffe01fffffe01f,
-            0xfffff800000007ff, 0xfffffc0fffffffff, 0xffffff00003fffff, 0xffffffc0000007ff,
-            0xfffffff0000001ff, 0xfffffffc00003fff, 0xffffffff07ffffff, 0xffffffffe003ffff,
-            0xfffffffffc01ffff, 0xffffffffffc00003, 0xfffffffffffc000f, 0xffffffffffffe07f]
-
      def generate(self):
           AddSubImmOp.generate(self)
           self.immed = \
-              self.immediates32[random.randint(0, len(self.immediates32)-1)] \
+              immediates32[random.randint(0, len(immediates32)-1)] \
               if self.isWord else \
-              self.immediates[random.randint(0, len(self.immediates)-1)]
+              immediates64[random.randint(0, len(immediates64)-1)]
 
           return self
 
@@ -406,6 +416,44 @@ class LogicalImmOp(AddSubImmOp):
      def cstr(self):
           return super(AddSubImmOp, self).cstr() + "ll);"
 
+class SVEBinaryImmOp(Instruction):
+    def __init__(self, name):
+        reg = SVEVectorRegister().generate()
+        self.reg = [reg, reg]
+        self.numRegs = len(self.reg)
+        self._width = RegVariant(0, 3)
+        self._isLogical = False
+        if name in ["and", "eor", "orr"]:
+            self._isLogical = True
+        Instruction.__init__(self, name)
+
+    def generate(self):
+        Instruction.generate(self)
+        self.immed = random.randint(0, (1<<8)-1)
+        if self._isLogical:
+            vectype = self._width.cstr()
+            if vectype == "__ B":
+                self.immed = immediates8[random.randint(0, len(immediates8)-1)]
+            elif vectype == "__ H":
+                self.immed = immediates16[random.randint(0, len(immediates16)-1)]
+            elif vectype == "__ S":
+                self.immed = immediates32[random.randint(0, len(immediates32)-1)]
+            elif vectype == "__ D":
+                self.immed = immediates64[random.randint(0, len(immediates64)-1)]
+        return self
+
+    def cstr(self):
+        formatStr = "%s%s, %s, %su);"
+        return (formatStr
+                % tuple(["__ sve_" + self._name + "("] +
+                        [str(self.reg[0]), self._width.cstr(), self.immed]))
+
+    def astr(self):
+        formatStr = "%s%s, %s, #0x%x"
+        Regs = [str(self.reg[i]) + self._width.astr() for i in range(0, self.numRegs)]
+        return (formatStr
+                % tuple([Instruction.astr(self)] + Regs + [self.immed]))
+
 class MultiOp():
 
     def multipleForms(self):
@@ -1719,6 +1767,9 @@ generate(SHA3SIMDOp, ["bcax", "eor3", "rax1", "xar"])
 
 generate(SHA512SIMDOp, ["sha512h", "sha512h2", "sha512su0", "sha512su1"])
 
+for i in range(6):
+    generate(SVEBinaryImmOp, ["add", "sub", "and", "eor", "orr"])
+
 generate(SVEVectorOp, [["add", "ZZZ"],
                        ["sub", "ZZZ"],
                        ["fadd", "ZZZ"],
diff --git a/test/hotspot/gtest/aarch64/asmtest.out.h b/test/hotspot/gtest/aarch64/asmtest.out.h
index 85aca7268f4..8bbbdc579f5 100644
--- a/test/hotspot/gtest/aarch64/asmtest.out.h
+++ b/test/hotspot/gtest/aarch64/asmtest.out.h
@@ -1013,63 +1013,105 @@
     __ sha512su0(v26, __ T2D, v26);                    //       sha512su0               v26.2D, v26.2D
     __ sha512su1(v24, __ T2D, v22, v0);                //       sha512su1               v24.2D, v22.2D, v0.2D
 
+// SVEBinaryImmOp
+    __ sve_add(z4, __ B, 147u);                        //       add     z4.b, z4.b, #0x93
+    __ sve_sub(z0, __ B, 124u);                        //       sub     z0.b, z0.b, #0x7c
+    __ sve_and(z1, __ H, 508u);                        //       and     z1.h, z1.h, #0x1fc
+    __ sve_eor(z9, __ D, 18374686479671656447u);       //       eor     z9.d, z9.d, #0xff00000000007fff
+    __ sve_orr(z22, __ S, 251662080u);                 //       orr     z22.s, z22.s, #0xf000f00
+
+// SVEBinaryImmOp
+    __ sve_add(z8, __ S, 248u);                        //       add     z8.s, z8.s, #0xf8
+    __ sve_sub(z6, __ S, 16u);                         //       sub     z6.s, z6.s, #0x10
+    __ sve_and(z11, __ D, 4160749568u);                //       and     z11.d, z11.d, #0xf8000000
+    __ sve_eor(z26, __ S, 1610637312u);                //       eor     z26.s, z26.s, #0x60006000
+    __ sve_orr(z13, __ D, 18446181398634037247u);      //       orr     z13.d, z13.d, #0xfffe003fffffffff
+
+// SVEBinaryImmOp
+    __ sve_add(z5, __ B, 112u);                        //       add     z5.b, z5.b, #0x70
+    __ sve_sub(z10, __ S, 88u);                        //       sub     z10.s, z10.s, #0x58
+    __ sve_and(z26, __ S, 253952u);                    //       and     z26.s, z26.s, #0x3e000
+    __ sve_eor(z22, __ S, 496u);                       //       eor     z22.s, z22.s, #0x1f0
+    __ sve_orr(z19, __ S, 536870910u);                 //       orr     z19.s, z19.s, #0x1ffffffe
+
+// SVEBinaryImmOp
+    __ sve_add(z14, __ H, 22u);                        //       add     z14.h, z14.h, #0x16
+    __ sve_sub(z16, __ B, 172u);                       //       sub     z16.b, z16.b, #0xac
+    __ sve_and(z23, __ B, 62u);                        //       and     z23.b, z23.b, #0x3e
+    __ sve_eor(z17, __ H, 33279u);                     //       eor     z17.h, z17.h, #0x81ff
+    __ sve_orr(z16, __ B, 254u);                       //       orr     z16.b, z16.b, #0xfe
+
+// SVEBinaryImmOp
+    __ sve_add(z3, __ B, 49u);                         //       add     z3.b, z3.b, #0x31
+    __ sve_sub(z17, __ S, 110u);                       //       sub     z17.s, z17.s, #0x6e
+    __ sve_and(z12, __ S, 4290777087u);                //       and     z12.s, z12.s, #0xffc00fff
+    __ sve_eor(z19, __ S, 134217216u);                 //       eor     z19.s, z19.s, #0x7fffe00
+    __ sve_orr(z23, __ B, 254u);                       //       orr     z23.b, z23.b, #0xfe
+
+// SVEBinaryImmOp
+    __ sve_add(z13, __ S, 54u);                        //       add     z13.s, z13.s, #0x36
+    __ sve_sub(z0, __ B, 120u);                        //       sub     z0.b, z0.b, #0x78
+    __ sve_and(z17, __ D, 18014398509481728u);         //       and     z17.d, z17.d, #0x3fffffffffff00
+    __ sve_eor(z22, __ S, 4294709247u);                //       eor     z22.s, z22.s, #0xfffc0fff
+    __ sve_orr(z2, __ B, 225u);                        //       orr     z2.b, z2.b, #0xe1
+
 // SVEVectorOp
-    __ sve_add(z4, __ B, z6, z17);                     //       add     z4.b, z6.b, z17.b
-    __ sve_sub(z3, __ H, z15, z1);                     //       sub     z3.h, z15.h, z1.h
-    __ sve_fadd(z6, __ D, z5, z9);                     //       fadd    z6.d, z5.d, z9.d
-    __ sve_fmul(z7, __ D, z20, z22);                   //       fmul    z7.d, z20.d, z22.d
-    __ sve_fsub(z5, __ D, z10, z8);                    //       fsub    z5.d, z10.d, z8.d
-    __ sve_abs(z30, __ B, p1, z17);                    //       abs     z30.b, p1/m, z17.b
-    __ sve_add(z11, __ B, p7, z28);                    //       add     z11.b, p7/m, z11.b, z28.b
-    __ sve_and(z26, __ H, p5, z28);                    //       and     z26.h, p5/m, z26.h, z28.h
-    __ sve_asr(z13, __ D, p7, z16);                    //       asr     z13.d, p7/m, z13.d, z16.d
-    __ sve_cnt(z5, __ H, p0, z13);                     //       cnt     z5.h, p0/m, z13.h
-    __ sve_eor(z15, __ S, p2, z26);                    //       eor     z15.s, p2/m, z15.s, z26.s
-    __ sve_lsl(z11, __ S, p1, z22);                    //       lsl     z11.s, p1/m, z11.s, z22.s
-    __ sve_lsr(z4, __ S, p0, z19);                     //       lsr     z4.s, p0/m, z4.s, z19.s
-    __ sve_mul(z17, __ H, p3, z14);                    //       mul     z17.h, p3/m, z17.h, z14.h
-    __ sve_neg(z2, __ S, p4, z3);                      //       neg     z2.s, p4/m, z3.s
-    __ sve_not(z23, __ B, p1, z6);                     //       not     z23.b, p1/m, z6.b
-    __ sve_orr(z17, __ S, p3, z27);                    //       orr     z17.s, p3/m, z17.s, z27.s
-    __ sve_smax(z16, __ D, p1, z2);                    //       smax    z16.d, p1/m, z16.d, z2.d
-    __ sve_smin(z3, __ S, p1, z6);                     //       smin    z3.s, p1/m, z3.s, z6.s
-    __ sve_sub(z19, __ S, p3, z12);                    //       sub     z19.s, p3/m, z19.s, z12.s
-    __ sve_fabs(z8, __ D, p6, z19);                    //       fabs    z8.d, p6/m, z19.d
-    __ sve_fadd(z0, __ S, p2, z23);                    //       fadd    z0.s, p2/m, z0.s, z23.s
-    __ sve_fdiv(z19, __ D, p7, z13);                   //       fdiv    z19.d, p7/m, z19.d, z13.d
-    __ sve_fmax(z6, __ S, p0, z7);                     //       fmax    z6.s, p0/m, z6.s, z7.s
-    __ sve_fmin(z17, __ S, p6, z8);                    //       fmin    z17.s, p6/m, z17.s, z8.s
-    __ sve_fmul(z22, __ D, p5, z22);                   //       fmul    z22.d, p5/m, z22.d, z22.d
-    __ sve_fneg(z2, __ D, p0, z15);                    //       fneg    z2.d, p0/m, z15.d
-    __ sve_frintm(z20, __ D, p1, z4);                  //       frintm  z20.d, p1/m, z4.d
-    __ sve_frintn(z7, __ D, p0, z8);                   //       frintn  z7.d, p0/m, z8.d
-    __ sve_frintp(z19, __ D, p5, z4);                  //       frintp  z19.d, p5/m, z4.d
-    __ sve_fsqrt(z9, __ D, p5, z11);                   //       fsqrt   z9.d, p5/m, z11.d
-    __ sve_fsub(z5, __ S, p7, z16);                    //       fsub    z5.s, p7/m, z5.s, z16.s
-    __ sve_fmad(z22, __ S, p3, z1, z13);               //       fmad    z22.s, p3/m, z1.s, z13.s
-    __ sve_fmla(z20, __ S, p4, z25, z15);              //       fmla    z20.s, p4/m, z25.s, z15.s
-    __ sve_fmls(z4, __ D, p4, z8, z6);                 //       fmls    z4.d, p4/m, z8.d, z6.d
-    __ sve_fnmla(z4, __ D, p7, z16, z29);              //       fnmla   z4.d, p7/m, z16.d, z29.d
-    __ sve_fnmls(z9, __ D, p3, z2, z11);               //       fnmls   z9.d, p3/m, z2.d, z11.d
-    __ sve_mla(z3, __ S, p1, z1, z26);                 //       mla     z3.s, p1/m, z1.s, z26.s
-    __ sve_mls(z17, __ S, p3, z8, z17);                //       mls     z17.s, p3/m, z8.s, z17.s
-    __ sve_and(z24, z5, z19);                          //       and     z24.d, z5.d, z19.d
-    __ sve_eor(z17, z22, z16);                         //       eor     z17.d, z22.d, z16.d
-    __ sve_orr(z20, z19, z0);                          //       orr     z20.d, z19.d, z0.d
-    __ sve_bic(z17, z23, z4);                          //       bic     z17.d, z23.d, z4.d
-    __ sve_uzp1(z4, __ S, z23, z25);                   //       uzp1    z4.s, z23.s, z25.s
-    __ sve_uzp2(z2, __ H, z8, z8);                     //       uzp2    z2.h, z8.h, z8.h
+    __ sve_add(z20, __ D, z7, z4);                     //       add     z20.d, z7.d, z4.d
+    __ sve_sub(z7, __ S, z0, z8);                      //       sub     z7.s, z0.s, z8.s
+    __ sve_fadd(z19, __ D, z22, z4);                   //       fadd    z19.d, z22.d, z4.d
+    __ sve_fmul(z9, __ D, z22, z11);                   //       fmul    z9.d, z22.d, z11.d
+    __ sve_fsub(z5, __ S, z30, z16);                   //       fsub    z5.s, z30.s, z16.s
+    __ sve_abs(z22, __ H, p3, z1);                     //       abs     z22.h, p3/m, z1.h
+    __ sve_add(z8, __ D, p5, z16);                     //       add     z8.d, p5/m, z8.d, z16.d
+    __ sve_and(z15, __ S, p1, z4);                     //       and     z15.s, p1/m, z15.s, z4.s
+    __ sve_asr(z8, __ B, p1, z29);                     //       asr     z8.b, p1/m, z8.b, z29.b
+    __ sve_cnt(z28, __ D, p4, z29);                    //       cnt     z28.d, p4/m, z29.d
+    __ sve_eor(z9, __ H, p3, z2);                      //       eor     z9.h, p3/m, z9.h, z2.h
+    __ sve_lsl(z28, __ B, p0, z7);                     //       lsl     z28.b, p0/m, z28.b, z7.b
+    __ sve_lsr(z26, __ H, p5, z17);                    //       lsr     z26.h, p5/m, z26.h, z17.h
+    __ sve_mul(z8, __ D, p4, z21);                     //       mul     z8.d, p4/m, z8.d, z21.d
+    __ sve_neg(z5, __ S, p5, z21);                     //       neg     z5.s, p5/m, z21.s
+    __ sve_not(z22, __ S, p4, z29);                    //       not     z22.s, p4/m, z29.s
+    __ sve_orr(z19, __ S, p0, z4);                     //       orr     z19.s, p0/m, z19.s, z4.s
+    __ sve_smax(z23, __ B, p1, z19);                   //       smax    z23.b, p1/m, z23.b, z19.b
+    __ sve_smin(z23, __ B, p6, z19);                   //       smin    z23.b, p6/m, z23.b, z19.b
+    __ sve_sub(z8, __ D, p2, z14);                     //       sub     z8.d, p2/m, z8.d, z14.d
+    __ sve_fabs(z17, __ S, p7, z21);                   //       fabs    z17.s, p7/m, z21.s
+    __ sve_fadd(z30, __ D, p0, z10);                   //       fadd    z30.d, p0/m, z30.d, z10.d
+    __ sve_fdiv(z12, __ S, p0, z9);                    //       fdiv    z12.s, p0/m, z12.s, z9.s
+    __ sve_fmax(z24, __ D, p4, z4);                    //       fmax    z24.d, p4/m, z24.d, z4.d
+    __ sve_fmin(z6, __ D, p2, z27);                    //       fmin    z6.d, p2/m, z6.d, z27.d
+    __ sve_fmul(z13, __ D, p4, z30);                   //       fmul    z13.d, p4/m, z13.d, z30.d
+    __ sve_fneg(z22, __ D, p5, z30);                   //       fneg    z22.d, p5/m, z30.d
+    __ sve_frintm(z9, __ S, p3, z19);                  //       frintm  z9.s, p3/m, z19.s
+    __ sve_frintn(z20, __ S, p7, z9);                  //       frintn  z20.s, p7/m, z9.s
+    __ sve_frintp(z13, __ S, p3, z19);                 //       frintp  z13.s, p3/m, z19.s
+    __ sve_fsqrt(z24, __ S, p2, z19);                  //       fsqrt   z24.s, p2/m, z19.s
+    __ sve_fsub(z17, __ S, p4, z16);                   //       fsub    z17.s, p4/m, z17.s, z16.s
+    __ sve_fmad(z0, __ S, p0, z11, z7);                //       fmad    z0.s, p0/m, z11.s, z7.s
+    __ sve_fmla(z14, __ D, p4, z4, z15);               //       fmla    z14.d, p4/m, z4.d, z15.d
+    __ sve_fmls(z5, __ D, p0, z10, z21);               //       fmls    z5.d, p0/m, z10.d, z21.d
+    __ sve_fnmla(z3, __ D, p0, z9, z19);               //       fnmla   z3.d, p0/m, z9.d, z19.d
+    __ sve_fnmls(z10, __ S, p6, z3, z19);              //       fnmls   z10.s, p6/m, z3.s, z19.s
+    __ sve_mla(z23, __ H, p7, z13, z21);               //       mla     z23.h, p7/m, z13.h, z21.h
+    __ sve_mls(z26, __ S, p3, z17, z30);               //       mls     z26.s, p3/m, z17.s, z30.s
+    __ sve_and(z14, z2, z29);                          //       and     z14.d, z2.d, z29.d
+    __ sve_eor(z21, z20, z7);                          //       eor     z21.d, z20.d, z7.d
+    __ sve_orr(z2, z1, z26);                           //       orr     z2.d, z1.d, z26.d
+    __ sve_bic(z9, z16, z17);                          //       bic     z9.d, z16.d, z17.d
+    __ sve_uzp1(z0, __ D, z4, z2);                     //       uzp1    z0.d, z4.d, z2.d
+    __ sve_uzp2(z14, __ S, z6, z11);                   //       uzp2    z14.s, z6.s, z11.s
 
 // SVEReductionOp
-    __ sve_andv(v24, __ S, p4, z30);                   //       andv s24, p4, z30.s
-    __ sve_orv(v4, __ H, p7, z1);                      //       orv h4, p7, z1.h
-    __ sve_eorv(v19, __ H, p3, z0);                    //       eorv h19, p3, z0.h
-    __ sve_smaxv(v7, __ B, p6, z17);                   //       smaxv b7, p6, z17.b
-    __ sve_sminv(v27, __ D, p1, z9);                   //       sminv d27, p1, z9.d
-    __ sve_fminv(v23, __ D, p3, z16);                  //       fminv d23, p3, z16.d
-    __ sve_fmaxv(v22, __ D, p5, z20);                  //       fmaxv d22, p5, z20.d
-    __ sve_fadda(v28, __ D, p2, z13);                  //       fadda d28, p2, d28, z13.d
-    __ sve_uaddv(v7, __ H, p5, z28);                   //       uaddv d7, p5, z28.h
+    __ sve_andv(v14, __ H, p4, z29);                   //       andv h14, p4, z29.h
+    __ sve_orv(v3, __ H, p0, z22);                     //       orv h3, p0, z22.h
+    __ sve_eorv(v3, __ B, p6, z27);                    //       eorv b3, p6, z27.b
+    __ sve_smaxv(v19, __ D, p5, z7);                   //       smaxv d19, p5, z7.d
+    __ sve_sminv(v21, __ H, p3, z5);                   //       sminv h21, p3, z5.h
+    __ sve_fminv(v25, __ D, p1, z21);                  //       fminv d25, p1, z21.d
+    __ sve_fmaxv(v17, __ S, p0, z3);                   //       fmaxv s17, p0, z3.s
+    __ sve_fadda(v19, __ S, p3, z7);                   //       fadda s19, p3, s19, z7.s
+    __ sve_uaddv(v14, __ H, p4, z17);                  //       uaddv d14, p4, z17.h
 
     __ bind(forth);
 
@@ -1088,30 +1130,30 @@
     0x9101a1a0,     0xb10a5cc8,     0xd10810aa,     0xf10fd061,
     0x120cb166,     0x321764bc,     0x52174681,     0x720c0227,
     0x9241018e,     0xb25a2969,     0xd278b411,     0xf26aad01,
-    0x14000000,     0x17ffffd7,     0x1400037a,     0x94000000,
-    0x97ffffd4,     0x94000377,     0x3400000a,     0x34fffa2a,
-    0x34006e8a,     0x35000008,     0x35fff9c8,     0x35006e28,
-    0xb400000b,     0xb4fff96b,     0xb4006dcb,     0xb500001d,
-    0xb5fff91d,     0xb5006d7d,     0x10000013,     0x10fff8b3,
-    0x10006d13,     0x90000013,     0x36300016,     0x3637f836,
-    0x36306c96,     0x3758000c,     0x375ff7cc,     0x37586c2c,
+    0x14000000,     0x17ffffd7,     0x14000398,     0x94000000,
+    0x97ffffd4,     0x94000395,     0x3400000a,     0x34fffa2a,
+    0x3400724a,     0x35000008,     0x35fff9c8,     0x350071e8,
+    0xb400000b,     0xb4fff96b,     0xb400718b,     0xb500001d,
+    0xb5fff91d,     0xb500713d,     0x10000013,     0x10fff8b3,
+    0x100070d3,     0x90000013,     0x36300016,     0x3637f836,
+    0x36307056,     0x3758000c,     0x375ff7cc,     0x37586fec,
     0x128313a0,     0x528a32c7,     0x7289173b,     0x92ab3acc,
     0xd2a0bf94,     0xf2c285e8,     0x9358722f,     0x330e652f,
     0x53067f3b,     0x93577c53,     0xb34a1aac,     0xd35a4016,
     0x13946c63,     0x93c3dbc8,     0x54000000,     0x54fff5a0,
-    0x54006a00,     0x54000001,     0x54fff541,     0x540069a1,
-    0x54000002,     0x54fff4e2,     0x54006942,     0x54000002,
-    0x54fff482,     0x540068e2,     0x54000003,     0x54fff423,
-    0x54006883,     0x54000003,     0x54fff3c3,     0x54006823,
-    0x54000004,     0x54fff364,     0x540067c4,     0x54000005,
-    0x54fff305,     0x54006765,     0x54000006,     0x54fff2a6,
-    0x54006706,     0x54000007,     0x54fff247,     0x540066a7,
-    0x54000008,     0x54fff1e8,     0x54006648,     0x54000009,
-    0x54fff189,     0x540065e9,     0x5400000a,     0x54fff12a,
-    0x5400658a,     0x5400000b,     0x54fff0cb,     0x5400652b,
-    0x5400000c,     0x54fff06c,     0x540064cc,     0x5400000d,
-    0x54fff00d,     0x5400646d,     0x5400000e,     0x54ffefae,
-    0x5400640e,     0x5400000f,     0x54ffef4f,     0x540063af,
+    0x54006dc0,     0x54000001,     0x54fff541,     0x54006d61,
+    0x54000002,     0x54fff4e2,     0x54006d02,     0x54000002,
+    0x54fff482,     0x54006ca2,     0x54000003,     0x54fff423,
+    0x54006c43,     0x54000003,     0x54fff3c3,     0x54006be3,
+    0x54000004,     0x54fff364,     0x54006b84,     0x54000005,
+    0x54fff305,     0x54006b25,     0x54000006,     0x54fff2a6,
+    0x54006ac6,     0x54000007,     0x54fff247,     0x54006a67,
+    0x54000008,     0x54fff1e8,     0x54006a08,     0x54000009,
+    0x54fff189,     0x540069a9,     0x5400000a,     0x54fff12a,
+    0x5400694a,     0x5400000b,     0x54fff0cb,     0x540068eb,
+    0x5400000c,     0x54fff06c,     0x5400688c,     0x5400000d,
+    0x54fff00d,     0x5400682d,     0x5400000e,     0x54ffefae,
+    0x540067ce,     0x5400000f,     0x54ffef4f,     0x5400676f,
     0xd40658e1,     0xd4014d22,     0xd4046543,     0xd4273f60,
     0xd44cad80,     0xd503201f,     0xd69f03e0,     0xd6bf03e0,
     0xd5033fdf,     0xd5033e9f,     0xd50332bf,     0xd61f0200,
@@ -1143,7 +1185,7 @@
     0x791f226d,     0xf95aa2f3,     0xb9587bb7,     0x395f7176,
     0x795d9143,     0x399e7e08,     0x799a2697,     0x79df3422,
     0xb99c2624,     0xfd5c2374,     0xbd5fa1d9,     0xfd1d595a,
-    0xbd1b1869,     0x580053fb,     0x1800000b,     0xf8945060,
+    0xbd1b1869,     0x580057bb,     0x1800000b,     0xf8945060,
     0xd8000000,     0xf8ae6ba0,     0xf99a0080,     0x1a070035,
     0x3a0700a8,     0x5a0e0367,     0x7a11009b,     0x9a000380,
     0xba1e030c,     0xda0f0320,     0xfa030301,     0x0b340b11,
@@ -1297,20 +1339,27 @@
     0xb8702320,     0xb87a3057,     0xb870508c,     0xb87c43be,
     0xb87070db,     0xb86961fd,     0xce273c87,     0xce080ac9,
     0xce7e8e9b,     0xce808b45,     0xce79806e,     0xce758768,
-    0xcec0835a,     0xce608ad8,     0x043100c4,     0x046105e3,
-    0x65c900a6,     0x65d60a87,     0x65c80545,     0x0416a63e,
-    0x04001f8b,     0x045a179a,     0x04d09e0d,     0x045aa1a5,
-    0x04990b4f,     0x049386cb,     0x04918264,     0x04500dd1,
-    0x0497b062,     0x041ea4d7,     0x04980f71,     0x04c80450,
-    0x048a04c3,     0x04810d93,     0x04dcba68,     0x65808ae0,
-    0x65cd9db3,     0x658680e6,     0x65879911,     0x65c296d6,
-    0x04dda1e2,     0x65c2a494,     0x65c0a107,     0x65c1b493,
-    0x65cdb569,     0x65819e05,     0x65ad8c36,     0x65af1334,
-    0x65e63104,     0x65fd5e04,     0x65eb6c49,     0x049a4423,
-    0x04916d11,     0x043330b8,     0x04b032d1,     0x04603274,
-    0x04e432f1,     0x05b96ae4,     0x05686d02,     0x049a33d8,
-    0x04583c24,     0x04592c13,     0x04083a27,     0x04ca253b,
-    0x65c72e17,     0x65c63696,     0x65d829bc,     0x04413787,
-
+    0xcec0835a,     0xce608ad8,     0x2520d264,     0x2521cf80,
+    0x058074c1,     0x054242c9,     0x05004476,     0x25a0df08,
+    0x25a1c206,     0x0583288b,     0x05401c3a,     0x05027e8d,
+    0x2520ce05,     0x25a1cb0a,     0x0580989a,     0x0540e096,
+    0x0500fb73,     0x2560c2ce,     0x2521d590,     0x05803e97,
+    0x05400d31,     0x05003ed0,     0x2520c623,     0x25a1cdd1,
+    0x058052ac,     0x0540ba33,     0x05003ed7,     0x25a0c6cd,
+    0x2521cf00,     0x0583c5b1,     0x05407336,     0x05001e62,
+    0x04e400f4,     0x04a80407,     0x65c402d3,     0x65cb0ac9,
+    0x659007c5,     0x0456ac36,     0x04c01608,     0x049a048f,
+    0x041087a8,     0x04dab3bc,     0x04590c49,     0x041380fc,
+    0x0451963a,     0x04d012a8,     0x0497b6a5,     0x049eb3b6,
+    0x04980093,     0x04080677,     0x040a1a77,     0x04c109c8,
+    0x049cbeb1,     0x65c0815e,     0x658d812c,     0x65c69098,
+    0x65c78b66,     0x65c293cd,     0x04ddb7d6,     0x6582ae69,
+    0x6580bd34,     0x6581ae6d,     0x658daa78,     0x65819211,
+    0x65a78160,     0x65ef108e,     0x65f52145,     0x65f34123,
+    0x65b3786a,     0x04555db7,     0x049e6e3a,     0x043d304e,
+    0x04a73295,     0x047a3022,     0x04f13209,     0x05e26880,
+    0x05ab6cce,     0x045a33ae,     0x045822c3,     0x04193b63,
+    0x04c834f3,     0x044a2cb5,     0x65c726b9,     0x65862071,
+    0x65982cf3,     0x0441322e,
   };
 // END  Generated code -- do not edit
diff --git a/test/hotspot/jtreg/compiler/codegen/TestByteVect.java b/test/hotspot/jtreg/compiler/codegen/TestByteVect.java
index 385ba2dc8ea..73a2028ac7c 100644
--- a/test/hotspot/jtreg/compiler/codegen/TestByteVect.java
+++ b/test/hotspot/jtreg/compiler/codegen/TestByteVect.java
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,7 @@ public class TestByteVect {
   static int test() {
     byte[] a1 = new byte[ARRLEN];
     byte[] a2 = new byte[ARRLEN];
+    byte[] a3 = new byte[ARRLEN];
     System.out.println("Warmup");
     for (int i=0; i<ITERS; i++) {
       test_ci(a1);
@@ -94,6 +95,16 @@ public class TestByteVect {
       test_cp_unalnsrc(a1, a2);
       test_2ci_unaln(a1, a2);
       test_2vi_unaln(a1, a2, (byte)123, (byte)103);
+      test_addImm127(a1, a2);
+      test_addImm(a1, a2, a3);
+      test_addImm256(a1, a2);
+      test_addImmNeg128(a1, a2);
+      test_addImmNeg129(a1, a2);
+      test_subImm(a1, a2, a3);
+      test_andImm21(a1, a2);
+      test_andImm7(a1, a2);
+      test_orImm(a1, a2);
+      test_xorImm(a1, a2, a3);
     }
     // Initialize
     for (int i=0; i<ARRLEN; i++) {
@@ -487,6 +498,77 @@ public class TestByteVect {
       for (int i=ARRLEN-UNALIGN_OFF; i<ARRLEN; i++) {
         errn += verify("test_2vi_unaln_overlap: a1", i, a1[i], (byte)103);
       }
+      byte base = (byte) 10;
+      for (int i = 0; i < ARRLEN; i++) {
+        a1[i] = (byte) 10;
+      }
+      byte golden = (byte)(base + 127);
+      test_addImm127(a1, a2);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm127: a2", i, a2[i], golden);
+      }
+      test_addImm(a1, a2, a3);
+      golden = (byte)(base + 8);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm: a2", i, a2[i], golden);
+      }
+      golden = (byte) (base + 255);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm: a3", i, a3[i], golden);
+      }
+      test_addImm256(a1, a2);
+      golden = (byte)(base + 256);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm256: a3", i, a2[i], golden);
+      }
+      test_addImmNeg128(a1, a2);
+      golden = (byte)(base + (-128));
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImmNeg128: a2", i, a2[i], golden);
+      }
+      test_addImmNeg129(a1, a2);
+      golden = (byte)(base + (-129));
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImmNeg129: a2", i, a2[i], golden);
+      }
+      // Reset for sub test
+      base = (byte) 120;
+      for (int i = 0; i < ARRLEN; i++) {
+        a1[i] = (byte) 120;
+      }
+      test_subImm(a1, a2, a3);
+      golden = (byte) (base - 56);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_subImm: a2", i, a2[i], golden);
+      }
+      golden = (byte) (base - 256);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_subImm: a3", i, a3[i], golden);
+      }
+      test_andImm21(a1, a2);
+      golden = (byte) (base & 21);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_andImm21: a2", i, a2[i], golden);
+      }
+      test_andImm7(a1, a2);
+      golden = (byte) (base & 7);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_andImm7: a2", i, a2[i], golden);
+      }
+      test_orImm(a1, a2);
+      golden = (byte) (base | 3);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_orImm: a2", i, a2[i], golden);
+      }
+      test_xorImm(a1, a2, a3);
+      golden = (byte) (base ^ 127);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_xorImm: a2", i, a2[i], golden);
+      }
+      golden = (byte) (base ^ 255);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_xorImm: a3", i, a3[i], golden);
+      }
 
     }
 
@@ -730,6 +812,59 @@ public class TestByteVect {
     }
     end = System.currentTimeMillis();
     System.out.println("test_2vi_unaln: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_addImm127(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_addImm127: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_addImm(a1, a2, a3);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_addImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_addImm256(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_addImm256: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_addImmNeg128(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_addImmNeg128: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_addImmNeg129(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_addImmNeg129: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_subImm(a1, a2, a3);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_subImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_andImm7(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_andImm7: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_orImm(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_orImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_xorImm(a1, a2, a3);
+    }
+    end = System.currentTimeMillis();
 
     return errn;
   }
@@ -945,6 +1080,59 @@ public class TestByteVect {
       b[i+UNALIGN_OFF] = d;
     }
   }
+  static void test_addImm127(byte[] a, byte[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (byte) (a[i] + 127);
+    }
+  }
+  static void test_addImm(byte[] a, byte[] b, byte[] c) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (byte) (a[i] + 8);
+      c[i] = (byte) (a[i] + 255);
+    }
+  }
+  static void test_addImm256(byte[] a, byte[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (byte) (a[i] + 256);
+    }
+  }
+  static void test_addImmNeg128(byte[] a, byte[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (byte) (a[i] + (-128));
+    }
+  }
+  static void test_addImmNeg129(byte[] a, byte[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (byte) (a[i] + (-129));
+    }
+  }
+  static void test_subImm(byte[] a, byte[] b, byte[] c) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (byte) (a[i] - 56);
+      c[i] = (byte) (a[i] - 256);
+    }
+  }
+  static void test_andImm21(byte[] a, byte[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (byte) (a[i] & 21);
+    }
+  }
+  static void test_andImm7(byte[] a, byte[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (byte) (a[i] & 7);
+    }
+  }
+  static void test_orImm(byte[] a, byte[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (byte) (a[i] | 3);
+    }
+  }
+  static void test_xorImm(byte[] a, byte[] b, byte[] c) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (byte) (a[i] ^ 127);
+      c[i] = (byte) (a[i] ^ 255);
+    }
+  }
 
   static int verify(String text, int i, byte elem, byte val) {
     if (elem != val) {
diff --git a/test/hotspot/jtreg/compiler/codegen/TestCharVect.java b/test/hotspot/jtreg/compiler/codegen/TestCharVect.java
index 1cef590c15b..cd5d5df82f6 100644
--- a/test/hotspot/jtreg/compiler/codegen/TestCharVect.java
+++ b/test/hotspot/jtreg/compiler/codegen/TestCharVect.java
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,7 @@ public class TestCharVect {
   static int test() {
     char[] a1 = new char[ARRLEN];
     char[] a2 = new char[ARRLEN];
+    char[] a3 = new char[ARRLEN];
     System.out.println("Warmup");
     for (int i=0; i<ITERS; i++) {
       test_ci(a1);
@@ -94,6 +95,13 @@ public class TestCharVect {
       test_cp_unalnsrc(a1, a2);
       test_2ci_unaln(a1, a2);
       test_2vi_unaln(a1, a2, (char)123, (char)103);
+      test_addImm129(a1, a2);
+      test_addImm(a1, a2, a3);
+      test_subImm56(a1, a2);
+      test_subImm256(a1, a2);
+      test_andImm(a1, a2);
+      test_orImm(a1, a2);
+      test_xorImm(a1, a2);
     }
     // Initialize
     for (int i=0; i<ARRLEN; i++) {
@@ -487,6 +495,56 @@ public class TestCharVect {
       for (int i=ARRLEN-UNALIGN_OFF; i<ARRLEN; i++) {
         errn += verify("test_2vi_unaln_overlap: a1", i, a1[i], (char)103);
       }
+      // Reset for binary operation with immediate.
+      char base = (char) 3;
+      for (int i = 0; i < ARRLEN; i++) {
+        a1[i] = (char) 3;
+      }
+      char golden = (char)(base + 129);
+      test_addImm129(a1, a2);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm129: a2", i, a2[i], golden);
+      }
+      test_addImm(a1, a2, a3);
+      golden = (char)(base + 129);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm: a2", i, a2[i], golden);
+      }
+      golden = (char) (base + 255);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm: a3", i, a3[i], golden);
+      }
+      // Reset for sub operation test.
+      base = (char) 120;
+      for (int i = 0; i < ARRLEN; i++) {
+        a1[i] = (char) 120;
+      }
+      test_subImm56(a1, a2);
+      golden = (char) (base - 56);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_subImm56: a2", i, a2[i], golden);
+      }
+      test_subImm256(a1, a2);
+      golden = (char) (base - 256);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_subImm256: a2", i, a2[i], golden);
+      }
+      test_andImm(a1, a2);
+      golden = (char) (base & 0xfe);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_andImm: a2", i, a2[i], golden);
+      }
+      test_orImm(a1, a2);
+      golden = (char) (base | 0xff);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_orImm: a2", i, a2[i], golden);
+      }
+      test_xorImm(a1, a2);
+      golden = (char) (base ^ 0xc7);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_xorImm: a2", i, a2[i], golden);
+      }
+
 
     }
 
@@ -730,6 +788,49 @@ public class TestCharVect {
     }
     end = System.currentTimeMillis();
     System.out.println("test_2vi_unaln: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_addImm129(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_addImm129: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_addImm(a1, a2, a3);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_addImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_subImm56(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_subImm56: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_subImm256(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_subImm256: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_andImm(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_andImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_orImm(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_orImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_xorImm(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_xorImm: " + (end - start));
+
 
     return errn;
   }
@@ -945,6 +1046,47 @@ public class TestCharVect {
       b[i+UNALIGN_OFF] = d;
     }
   }
+  static void test_addImm129(char[] a, char[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (char) (a[i] + 129);
+    }
+  }
+  static void test_addImm(char[] a, char[] b, char[] c) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (char) (a[i] + 129);
+      c[i] = (char) (a[i] + 255);
+    }
+  }
+
+  static void test_subImm56(char[] a, char[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (char) (a[i] - 56);
+    }
+  }
+
+  static void test_subImm256(char[] a, char[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (char) (a[i] - 256);
+    }
+  }
+
+  static void test_andImm(char[] a, char[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (char) (a[i] & 0xfe);
+    }
+  }
+
+  static void test_orImm(char[] a, char[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (char) (a[i] | 0xff);
+    }
+  }
+
+  static void test_xorImm(char[] a, char[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (char) (a[i] ^ 0xc7);
+    }
+  }
 
   static int verify(String text, int i, char elem, char val) {
     if (elem != val) {
diff --git a/test/hotspot/jtreg/compiler/codegen/TestIntVect.java b/test/hotspot/jtreg/compiler/codegen/TestIntVect.java
index 1eb4a980f4b..f4040aafbbf 100644
--- a/test/hotspot/jtreg/compiler/codegen/TestIntVect.java
+++ b/test/hotspot/jtreg/compiler/codegen/TestIntVect.java
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,7 @@ public class TestIntVect {
   static int test() {
     int[] a1 = new int[ARRLEN];
     int[] a2 = new int[ARRLEN];
+    int[] a3 = new int[ARRLEN];
     System.out.println("Warmup");
     for (int i=0; i<ITERS; i++) {
       test_ci(a1);
@@ -94,6 +95,13 @@ public class TestIntVect {
       test_cp_unalnsrc(a1, a2);
       test_2ci_unaln(a1, a2);
       test_2vi_unaln(a1, a2, (int)123, (int)103);
+      test_addImm127(a1, a2);
+      test_addImm(a1, a2, a3);
+      test_addImm256(a1, a2);
+      test_subImm(a1, a2, a3);
+      test_andImm(a1, a2, a3);
+      test_orImm(a1, a2);
+      test_xorImm(a1, a2);
     }
     // Initialize
     for (int i=0; i<ARRLEN; i++) {
@@ -487,6 +495,63 @@ public class TestIntVect {
       for (int i=ARRLEN-UNALIGN_OFF; i<ARRLEN; i++) {
         errn += verify("test_2vi_unaln_overlap: a1", i, a1[i], (int)103);
       }
+      // Reset for binary operation with immediate
+      int base = 10;
+      for (int i = 0; i < ARRLEN; i++) {
+          a1[i] = 10;
+      }
+      int golden = base + 127;
+      test_addImm127(a1, a2);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm127: a2", i, a2[i], golden);
+      }
+      test_addImm(a1, a2, a3);
+      golden = base + 127;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm: a2", i, a2[i], golden);
+      }
+      golden = base + 255;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm: a3", i, a3[i], golden);
+      }
+      test_addImm256(a1, a2);
+      golden = base + 256;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm256: a2", i, a2[i], golden);
+      }
+      // Reset for sub test
+      base = 10000;
+      for (int i = 0; i < ARRLEN; i++) {
+        a1[i] = 10000;
+      }
+      test_subImm(a1, a2, a3);
+      golden = base - 2304;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_subImm: a2", i, a2[i], golden);
+      }
+      golden = base - 65280;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_subImm: a3", i, a3[i], golden);
+      }
+      test_andImm(a1, a2, a3);
+      golden = base + 2560;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_andImm: a2", i, a2[i], golden);
+      }
+      golden = base & 516096;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_andImm: a3", i, a3[i], golden);
+      }
+      test_orImm(a1, a2);
+      golden = base | 8257536;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_orImm: a2", i, a2[i], golden);
+      }
+      test_xorImm(a1, a2);
+      golden = base ^ 2032;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_xorImm: a2", i, a2[i], golden);
+      }
 
     }
 
@@ -730,6 +795,48 @@ public class TestIntVect {
     }
     end = System.currentTimeMillis();
     System.out.println("test_2vi_unaln: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_addImm127(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_addImm127: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_addImm(a1, a2, a3);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_addImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_addImm256(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_addImm256: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_subImm(a1, a2, a3);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_subImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_andImm(a1, a2, a3);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_andImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_orImm(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_orImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_xorImm(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_xorImm: " + (end - start));
 
     return errn;
   }
@@ -945,6 +1052,50 @@ public class TestIntVect {
       b[i+UNALIGN_OFF] = d;
     }
   }
+  static void test_addImm127(int[] a, int[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = a[i] + 127;
+    }
+  }
+  static void test_addImm(int[] a, int[] b, int[] c) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = a[i] + 127;
+      c[i] = a[i] + 255;
+    }
+  }
+  static void test_addImm256(int[] a, int[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = a[i] + 256;
+    }
+  }
+  static void test_subImm(int[] a, int[] b, int[] c) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = a[i] - 2304;
+      c[i] = a[i] - 65280;
+    }
+  }
+  static void test_andImm21(int[] a, int[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = a[i] & 21;
+    }
+  }
+  static void test_andImm(int[] a, int[] b, int[] c) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = a[i] + 2560;
+      c[i] = a[i] & 516096;
+    }
+  }
+  static void test_orImm(int[] a, int[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = a[i] | 8257536;
+    }
+  }
+  static void test_xorImm(int[] a, int[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = a[i] ^ 2032;
+    }
+  }
+
 
   static int verify(String text, int i, int elem, int val) {
     if (elem != val) {
diff --git a/test/hotspot/jtreg/compiler/codegen/TestLongVect.java b/test/hotspot/jtreg/compiler/codegen/TestLongVect.java
index 512c2d6d13f..55deee514cb 100644
--- a/test/hotspot/jtreg/compiler/codegen/TestLongVect.java
+++ b/test/hotspot/jtreg/compiler/codegen/TestLongVect.java
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,7 @@ public class TestLongVect {
   static int test() {
     long[] a1 = new long[ARRLEN];
     long[] a2 = new long[ARRLEN];
+    long[] a3 = new long[ARRLEN];
     System.out.println("Warmup");
     for (int i=0; i<ITERS; i++) {
       test_ci(a1);
@@ -94,6 +95,12 @@ public class TestLongVect {
       test_cp_unalnsrc(a1, a2);
       test_2ci_unaln(a1, a2);
       test_2vi_unaln(a1, a2, (long)123, (long)103);
+      test_addImm(a1, a2, a3);
+      test_subImm(a1, a2, a3);
+      test_subImm256(a1, a2);
+      test_andImm(a1, a2);
+      test_orImm(a1, a2);
+      test_xorImm(a1, a2);
     }
     // Initialize
     for (int i=0; i<ARRLEN; i++) {
@@ -487,6 +494,53 @@ public class TestLongVect {
       for (int i=ARRLEN-UNALIGN_OFF; i<ARRLEN; i++) {
         errn += verify("test_2vi_unaln_overlap: a1", i, a1[i], (long)103);
       }
+      // Reset for binary operations with immediate.
+      for (int i=0; i<ARRLEN; i++) {
+      a1[i] = 10;
+      }
+      long base = 10;
+      test_addImm(a1, a2, a3);
+      long golden = base & 516097;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm: a2", i, a2[i], golden);
+      }
+      golden = base + 65280;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm: a3", i, a3[i], golden);
+      }
+      base = 120;
+      for (int i=0; i<ARRLEN; i++) {
+        a1[i] = 120;
+      }
+      test_subImm(a1, a2, a3);
+      golden = base + 65535;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_subImm: a2", i, a2[i], golden);
+      }
+      golden = base - 2147483647;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_subImm: a3", i, a3[i], golden);
+      }
+      test_subImm256(a1, a2);
+      golden = base - 256;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_subImm256: a2", i, a2[i], golden);
+      }
+      test_andImm(a1, a2);
+      golden = base & 132120576;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_andImm: a2", i, a2[i], golden);
+      }
+      test_orImm(a1, a2);
+      golden = base | 2113929216;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_orImm: a2", i, a2[i], golden);
+      }
+      test_xorImm(a1, a2);
+      golden = base ^ 516096;
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_xorImm: a2", i, a2[i], golden);
+      }
 
     }
 
@@ -730,6 +784,42 @@ public class TestLongVect {
     }
     end = System.currentTimeMillis();
     System.out.println("test_2vi_unaln: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_addImm(a1, a2, a3);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_addImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_subImm(a1, a2, a3);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_subImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_subImm256(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_subImm256: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_andImm(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_andImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_orImm(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_orImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_xorImm(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_xorImm: " + (end - start));
 
     return errn;
   }
@@ -946,6 +1036,44 @@ public class TestLongVect {
     }
   }
 
+  static void test_addImm(long[] a, long[] b, long[] c) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = a[i] & 516097;
+      c[i] = a[i] + 65280;
+    }
+  }
+
+  static void test_subImm(long[] a, long[] b, long[] c) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = a[i] + 65535;
+      c[i] = a[i] - 2147483647;
+    }
+  }
+
+  static void test_subImm256(long[] a, long[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = a[i] - 256;
+    }
+  }
+
+  static void test_andImm(long[] a, long[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = a[i] & 132120576;
+    }
+  }
+
+  static void test_orImm(long[] a, long[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = a[i] | 2113929216;
+    }
+  }
+
+  static void test_xorImm(long[] a, long[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = a[i] ^ 516096;
+    }
+  }
+
   static int verify(String text, int i, long elem, long val) {
     if (elem != val) {
       System.err.println(text + "[" + i + "] = " + elem + " != " + val);
diff --git a/test/hotspot/jtreg/compiler/codegen/TestShortVect.java b/test/hotspot/jtreg/compiler/codegen/TestShortVect.java
index 03c0c9f8c36..0fbb8a0631a 100644
--- a/test/hotspot/jtreg/compiler/codegen/TestShortVect.java
+++ b/test/hotspot/jtreg/compiler/codegen/TestShortVect.java
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,7 @@ public class TestShortVect {
   static int test() {
     short[] a1 = new short[ARRLEN];
     short[] a2 = new short[ARRLEN];
+    short[] a3 = new short[ARRLEN];
     System.out.println("Warmup");
     for (int i=0; i<ITERS; i++) {
       test_ci(a1);
@@ -94,6 +95,13 @@ public class TestShortVect {
       test_cp_unalnsrc(a1, a2);
       test_2ci_unaln(a1, a2);
       test_2vi_unaln(a1, a2, (short)123, (short)103);
+      test_addImm129(a1, a2);
+      test_addImm(a1, a2, a3);
+      test_subImm56(a1, a2);
+      test_subImm256(a1, a2);
+      test_andImm(a1, a2);
+      test_orImm(a1, a2);
+      test_xorImm(a1, a2);
     }
     // Initialize
     for (int i=0; i<ARRLEN; i++) {
@@ -487,6 +495,54 @@ public class TestShortVect {
       for (int i=ARRLEN-UNALIGN_OFF; i<ARRLEN; i++) {
         errn += verify("test_2vi_unaln_overlap: a1", i, a1[i], (short)103);
       }
+      short base = (short) 3;
+      for (int i = 0; i < ARRLEN; i++) {
+          a1[i] = (short) 3;
+      }
+      short golden = (short)(base + 129);
+      test_addImm129(a1, a2);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm129: a2", i, a2[i], golden);
+      }
+      test_addImm(a1, a2, a3);
+      golden = (short)(base + 129);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm: a2", i, a2[i], golden);
+      }
+      golden = (short) (base + 255);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_addImm: a3", i, a3[i], golden);
+      }
+      // Reset for sub test
+      base = (short) 120;
+      for (int i = 0; i < ARRLEN; i++) {
+        a1[i] = (short) 120;
+      }
+      test_subImm56(a1, a2);
+      golden = (short) (base - 56);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_subImm56: a2", i, a2[i], golden);
+      }
+      test_subImm256(a1, a2);
+      golden = (short) (base - 256);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_subImm256: a2", i, a2[i], golden);
+      }
+      test_andImm(a1, a2);
+      golden = (short) (base & 0xfe);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_andImm: a2", i, a2[i], golden);
+      }
+      test_orImm(a1, a2);
+      golden = (short) (base | 0xff);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_orImm: a2", i, a2[i], golden);
+      }
+      test_xorImm(a1, a2);
+      golden = (short) (base ^ 0xc7);
+      for (int i=0; i<ARRLEN; i++) {
+        errn += verify("test_xorImm: a2", i, a2[i], golden);
+      }
 
     }
 
@@ -730,6 +786,48 @@ public class TestShortVect {
     }
     end = System.currentTimeMillis();
     System.out.println("test_2vi_unaln: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_addImm129(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_addImm129: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_addImm(a1, a2, a3);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_addImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_subImm56(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_subImm56: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_subImm256(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_subImm256: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_andImm(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_andImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_orImm(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_orImm: " + (end - start));
+    start = System.currentTimeMillis();
+    for (int i=0; i<ITERS; i++) {
+      test_xorImm(a1, a2);
+    }
+    end = System.currentTimeMillis();
+    System.out.println("test_xorImm: " + (end - start));
 
     return errn;
   }
@@ -945,6 +1043,43 @@ public class TestShortVect {
       b[i+UNALIGN_OFF] = d;
     }
   }
+  static void test_addImm129(short[] a, short[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (short) (a[i] + 129);
+    }
+  }
+
+  static void test_addImm(short[] a, short[] b, short[] c) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (short) (a[i] + 129);
+      c[i] = (short) (a[i] + 255);
+    }
+  }
+  static void test_subImm56(short[] a, short[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (short) (a[i] - 56);
+    }
+  }
+  static void test_subImm256(short[] a, short[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (short) (a[i] - 256);
+    }
+  }
+  static void test_andImm(short[] a, short[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (short) (a[i] & 0xfe);
+    }
+  }
+  static void test_orImm(short[] a, short[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (short) (a[i] | 0xff);
+    }
+  }
+  static void test_xorImm(short[] a, short[] b) {
+    for (int i = 0; i < a.length; i++) {
+      b[i] = (short) (a[i] ^ 0xc7);
+    }
+  }
 
   static int verify(String text, int i, short elem, short val) {
     if (elem != val) {