8297730: C2: Arraycopy intrinsic throws incorrect exception

Reviewed-by: thartmann, kvn
This commit is contained in:
Christian Hagedorn 2023-01-25 07:22:12 +00:00
parent 7465de453a
commit 5a478ef775
3 changed files with 434 additions and 56 deletions
src/hotspot/share/opto
test/hotspot/jtreg/compiler/arraycopy

@ -4917,24 +4917,7 @@ JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc
}
if (no_interfering_store) {
JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
uint size = alloc->req();
SafePointNode* sfpt = new SafePointNode(size, old_jvms);
old_jvms->set_map(sfpt);
for (uint i = 0; i < size; i++) {
sfpt->init_req(i, alloc->in(i));
}
// re-push array length for deoptimization
sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
old_jvms->set_sp(old_jvms->sp()+1);
old_jvms->set_monoff(old_jvms->monoff()+1);
old_jvms->set_scloff(old_jvms->scloff()+1);
old_jvms->set_endoff(old_jvms->endoff()+1);
old_jvms->set_should_reexecute(true);
sfpt->set_i_o(map()->i_o());
sfpt->set_memory(map()->memory());
sfpt->set_control(map()->control());
SafePointNode* sfpt = create_safepoint_with_state_before_array_allocation(alloc);
JVMState* saved_jvms = jvms();
saved_reexecute_sp = _reexecute_sp;
@ -4949,6 +4932,30 @@ JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc
return NULL;
}
// Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack
// such that uncommon traps can be emitted to re-execute the array allocation in the interpreter.
SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const {
JVMState* old_jvms = alloc->jvms()->clone_shallow(C);
uint size = alloc->req();
SafePointNode* sfpt = new SafePointNode(size, old_jvms);
old_jvms->set_map(sfpt);
for (uint i = 0; i < size; i++) {
sfpt->init_req(i, alloc->in(i));
}
// re-push array length for deoptimization
sfpt->ins_req(old_jvms->stkoff() + old_jvms->sp(), alloc->in(AllocateNode::ALength));
old_jvms->set_sp(old_jvms->sp()+1);
old_jvms->set_monoff(old_jvms->monoff()+1);
old_jvms->set_scloff(old_jvms->scloff()+1);
old_jvms->set_endoff(old_jvms->endoff()+1);
old_jvms->set_should_reexecute(true);
sfpt->set_i_o(map()->i_o());
sfpt->set_memory(map()->memory());
sfpt->set_control(map()->control());
return sfpt;
}
// In case of a deoptimization, we restart execution at the
// allocation, allocating a new array. We would leave an uninitialized
// array in the heap that GCs wouldn't expect. Move the allocation
@ -4956,18 +4963,20 @@ JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc
// deoptimize. This is possible because tightly_coupled_allocation()
// guarantees there's no observer of the allocated array at this point
// and the control flow is simple enough.
void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms,
void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards,
int saved_reexecute_sp, uint new_idx) {
if (saved_jvms != NULL && !stopped()) {
if (saved_jvms_before_guards != NULL && !stopped()) {
replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards);
assert(alloc != NULL, "only with a tightly coupled allocation");
// restore JVM state to the state at the arraycopy
saved_jvms->map()->set_control(map()->control());
assert(saved_jvms->map()->memory() == map()->memory(), "memory state changed?");
assert(saved_jvms->map()->i_o() == map()->i_o(), "IO state changed?");
saved_jvms_before_guards->map()->set_control(map()->control());
assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?");
assert(saved_jvms_before_guards->map()->i_o() == map()->i_o(), "IO state changed?");
// If we've improved the types of some nodes (null check) while
// emitting the guards, propagate them to the current state
map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
set_jvms(saved_jvms);
map()->replaced_nodes().apply(saved_jvms_before_guards->map(), new_idx);
set_jvms(saved_jvms_before_guards);
_reexecute_sp = saved_reexecute_sp;
// Remove the allocation from above the guards
@ -5043,6 +5052,58 @@ void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, No
}
}
// Unrelated UCTs between the array allocation and the array copy, which are considered safe by tightly_coupled_allocation(),
// need to be replaced by an UCT with a state before the array allocation (including the array length). This is necessary
// because we could hit one of these UCTs (which are executed before the emitted array copy guards and the actual array
// allocation which is moved down in arraycopy_move_allocation_here()). When later resuming execution in the interpreter,
// we would have wrongly skipped the array allocation. To prevent this, we resume execution at the array allocation in
// the interpreter similar to what we are doing for the newly emitted guards for the array copy.
void LibraryCallKit::replace_unrelated_uncommon_traps_with_alloc_state(AllocateArrayNode* alloc,
JVMState* saved_jvms_before_guards) {
if (saved_jvms_before_guards->map()->control()->is_IfProj()) {
// There is at least one unrelated uncommon trap which needs to be replaced.
SafePointNode* sfpt = create_safepoint_with_state_before_array_allocation(alloc);
JVMState* saved_jvms = jvms();
const int saved_reexecute_sp = _reexecute_sp;
set_jvms(sfpt->jvms());
_reexecute_sp = jvms()->sp();
replace_unrelated_uncommon_traps_with_alloc_state(saved_jvms_before_guards);
// Restore state
set_jvms(saved_jvms);
_reexecute_sp = saved_reexecute_sp;
}
}
// Replace the unrelated uncommon traps with new uncommon trap nodes by reusing the action and reason. The new uncommon
// traps will have the state of the array allocation. Let the old uncommon trap nodes die.
void LibraryCallKit::replace_unrelated_uncommon_traps_with_alloc_state(JVMState* saved_jvms_before_guards) {
Node* if_proj = saved_jvms_before_guards->map()->control(); // Start the search right before the newly emitted guards
while (if_proj->is_IfProj()) {
CallStaticJavaNode* uncommon_trap = get_uncommon_trap_from_success_proj(if_proj);
if (uncommon_trap != nullptr) {
create_new_uncommon_trap(uncommon_trap);
}
assert(if_proj->in(0)->is_If(), "must be If");
if_proj = if_proj->in(0)->in(0);
}
assert(if_proj->is_Proj() && if_proj->in(0)->is_Initialize(),
"must have reached control projection of init node");
}
void LibraryCallKit::create_new_uncommon_trap(CallStaticJavaNode* uncommon_trap_call) {
const int trap_request = uncommon_trap_call->uncommon_trap_request();
assert(trap_request != 0, "no valid UCT trap request");
PreserveJVMState pjvms(this);
set_control(uncommon_trap_call->in(0));
uncommon_trap(Deoptimization::trap_request_reason(trap_request),
Deoptimization::trap_request_action(trap_request));
assert(stopped(), "Should be stopped");
_gvn.hash_delete(uncommon_trap_call);
uncommon_trap_call->set_req(0, top()); // not used anymore, kill it
}
//------------------------------inline_arraycopy-----------------------
// public static native void java.lang.System.arraycopy(Object src, int srcPos,
@ -5063,12 +5124,12 @@ bool LibraryCallKit::inline_arraycopy() {
AllocateArrayNode* alloc = tightly_coupled_allocation(dest);
int saved_reexecute_sp = -1;
JVMState* saved_jvms = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
JVMState* saved_jvms_before_guards = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp);
// See arraycopy_restore_alloc_state() comment
// if alloc == NULL we don't have to worry about a tightly coupled allocation so we can emit all needed guards
// if saved_jvms != NULL (then alloc != NULL) then we can handle guards and a tightly coupled allocation
// if saved_jvms == NULL and alloc != NULL, we can't emit any guards
bool can_emit_guards = (alloc == NULL || saved_jvms != NULL);
// if saved_jvms_before_guards != NULL (then alloc != NULL) then we can handle guards and a tightly coupled allocation
// if saved_jvms_before_guards == NULL and alloc != NULL, we can't emit any guards
bool can_emit_guards = (alloc == NULL || saved_jvms_before_guards != NULL);
// The following tests must be performed
// (1) src and dest are arrays.
@ -5084,12 +5145,12 @@ bool LibraryCallKit::inline_arraycopy() {
// (3) src and dest must not be null.
// always do this here because we need the JVM state for uncommon traps
Node* null_ctl = top();
src = saved_jvms != NULL ? null_check_oop(src, &null_ctl, true, true) : null_check(src, T_ARRAY);
src = saved_jvms_before_guards != NULL ? null_check_oop(src, &null_ctl, true, true) : null_check(src, T_ARRAY);
assert(null_ctl->is_top(), "no null control here");
dest = null_check(dest, T_ARRAY);
if (!can_emit_guards) {
// if saved_jvms == NULL and alloc != NULL, we don't emit any
// if saved_jvms_before_guards == NULL and alloc != NULL, we don't emit any
// guards but the arraycopy node could still take advantage of a
// tightly allocated allocation. tightly_coupled_allocation() is
// called again to make sure it takes the null check above into
@ -5203,7 +5264,7 @@ bool LibraryCallKit::inline_arraycopy() {
ciMethod* trap_method = method();
int trap_bci = bci();
if (saved_jvms != NULL) {
if (saved_jvms_before_guards != NULL) {
trap_method = alloc->jvms()->method();
trap_bci = alloc->jvms()->bci();
}
@ -5275,10 +5336,9 @@ bool LibraryCallKit::inline_arraycopy() {
const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
const Type *toop = dest_klass_t->cast_to_exactness(false)->as_instance_type();
src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
arraycopy_move_allocation_here(alloc, dest, saved_jvms_before_guards, saved_reexecute_sp, new_idx);
}
arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
if (stopped()) {
return true;
}
@ -5343,28 +5403,15 @@ LibraryCallKit::tightly_coupled_allocation(Node* ptr) {
// There may be guards which feed into the slow_region.
// Any other control flow means that we might not get a chance
// to finish initializing the allocated object.
if ((ctl->is_IfFalse() || ctl->is_IfTrue()) && ctl->in(0)->is_If()) {
IfNode* iff = ctl->in(0)->as_If();
Node* not_ctl = iff->proj_out_or_null(1 - ctl->as_Proj()->_con);
assert(not_ctl != NULL && not_ctl != ctl, "found alternate");
// One more try: Various low-level checks bottom out in
// uncommon traps. If the debug-info of the trap omits
// any reference to the allocation, as we've already
// observed, then there can be no objection to the trap.
bool found_trap = false;
for (DUIterator_Fast jmax, j = not_ctl->fast_outs(jmax); j < jmax; j++) {
Node* obs = not_ctl->fast_out(j);
if (obs->in(0) == not_ctl && obs->is_Call() &&
(obs->as_Call()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point())) {
found_trap = true; break;
}
}
if (found_trap) {
ctl = iff->in(0); // This test feeds a harmless uncommon trap.
continue;
}
// Various low-level checks bottom out in uncommon traps. These
// are considered safe since we've already checked above that
// there is no unexpected observer of this allocation.
if (get_uncommon_trap_from_success_proj(ctl) != nullptr) {
assert(ctl->in(0)->is_If(), "must be If");
ctl = ctl->in(0)->in(0);
} else {
return nullptr;
}
return NULL;
}
// If we get this far, we have an allocation which immediately
@ -5375,6 +5422,20 @@ LibraryCallKit::tightly_coupled_allocation(Node* ptr) {
return alloc;
}
CallStaticJavaNode* LibraryCallKit::get_uncommon_trap_from_success_proj(Node* node) {
if (node->is_IfProj()) {
Node* other_proj = node->as_IfProj()->other_if_proj();
for (DUIterator_Fast jmax, j = other_proj->fast_outs(jmax); j < jmax; j++) {
Node* obs = other_proj->fast_out(j);
if (obs->in(0) == other_proj && obs->is_CallStaticJava() &&
(obs->as_CallStaticJava()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point())) {
return obs->as_CallStaticJava();
}
}
}
return nullptr;
}
//-------------inline_encodeISOArray-----------------------------------
// encode char[] to byte[] in ISO_8859_1 or ASCII
bool LibraryCallKit::inline_encodeISOArray(bool ascii) {

@ -266,8 +266,13 @@ class LibraryCallKit : public GraphKit {
// Helper functions for inlining arraycopy
bool inline_arraycopy();
AllocateArrayNode* tightly_coupled_allocation(Node* ptr);
static CallStaticJavaNode* get_uncommon_trap_from_success_proj(Node* node);
SafePointNode* create_safepoint_with_state_before_array_allocation(const AllocateArrayNode* alloc) const;
void replace_unrelated_uncommon_traps_with_alloc_state(AllocateArrayNode* alloc, JVMState* saved_jvms_before_guards);
void replace_unrelated_uncommon_traps_with_alloc_state(JVMState* saved_jvms_before_guards);
void create_new_uncommon_trap(CallStaticJavaNode* uncommon_trap_call);
JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms, int saved_reexecute_sp,
void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards, int saved_reexecute_sp,
uint new_idx);
typedef enum { LS_get_add, LS_get_set, LS_cmp_swap, LS_cmp_swap_weak, LS_cmp_exchange } LoadStoreKind;

@ -0,0 +1,312 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8297730
* @summary Test taking UCT between array allocation and array copy to report correct exception.
* @library /test/lib
* @run main/othervm -Xcomp -XX:-TieredCompilation
* -XX:CompileCommand=compileonly,compiler.arraycopy.TestArrayCopyIntrinsicWithUCT::test*
* compiler.arraycopy.TestArrayCopyIntrinsicWithUCT
*/
package compiler.arraycopy;
import jdk.test.lib.Asserts;
import java.util.function.Function;
import java.util.function.Supplier;
public class TestArrayCopyIntrinsicWithUCT {
static int zero = 0;
static int zero2 = 0;
static int minusOne = -1;
static int iFld;
static int iFld2;
static boolean flag;
static byte[] byArrNull = null;
static A aFld = null;
static public void main(String[] args) {
System.out.println("Start"); // Ensure loaded.
new A(); // Ensure loaded
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSize);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSize2);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeFldSize);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeFldSize2);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeStore);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeStore2);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZero);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZero2);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZero3);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZero4);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZero5);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZero6);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZero7);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZeroFld);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZeroFld2);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZeroFld3);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZeroFld4);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZeroFld5);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZeroFld6);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZeroFld7);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeDivZeroNullPointer);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeSizeComplex);
runNegativeSize(TestArrayCopyIntrinsicWithUCT::testNegativeControlFlowNotAllowed);
flag = false;
runNegativeSizeHalf();
runNegativeSizeHalf();
}
static void runNegativeSize(Supplier<byte[]> testMethod) {
try {
testMethod.get();
Asserts.fail("should throw exception");
} catch (NegativeArraySizeException e) {
// Expected
}
}
static void runNegativeSize(Function<byte[], byte[]> testMethod) {
try {
testMethod.apply(null);
Asserts.fail("should throw exception");
} catch (NegativeArraySizeException e) {
// Expected
}
}
static byte[] testNegativeSize(byte[] byArr) {
byte[] b = new byte[-1]; // throws NegativeArraySizeException
int len = byArr.length; // null check trap would fail
System.arraycopy(byArr, 0, b, 0, len);
return b;
}
static byte[] testNegativeSize2() {
byte[] byArr = new byte[8];
byte[] b = new byte[-1]; // throws NegativeArraySizeException
int len = byArrNull.length; // null check trap would fail
System.arraycopy(byArr, 0, b, 0, len);
return b;
}
static byte[] testNegativeFldSize(byte[] byArr) {
byte[] b = new byte[minusOne]; // throws NegativeArraySizeException
int len = byArr.length; // null check trap would fail
System.arraycopy(byArr, 0, b, 0, len);
return b;
}
static byte[] testNegativeFldSize2() {
byte[] byArr = new byte[8];
byte[] b = new byte[minusOne]; // throws NegativeArraySizeException
int len = byArrNull.length; // null check trap would fail
System.arraycopy(byArr, 0, b, 0, len);
return b;
}
static byte[] testNegativeSizeStore(byte[] byArr) {
byte[] b = new byte[minusOne]; // throws NegativeArraySizeException
iFld++; // Since we have a store here, we do not move the allocation down
int len = byArr.length; // null check trap would fail
System.arraycopy(byArr, 0, b, 0, len);
return b;
}
static byte[] testNegativeSizeStore2() {
byte[] byArr = new byte[8];
byte[] b = new byte[minusOne]; // throws NegativeArraySizeException
iFld++; // Since we have a store here, we do not move the allocation down
int len = byArrNull.length; // null check trap would fail
System.arraycopy(byArr, 0, b, 0, len);
return b;
}
static byte[] testNegativeSizeDivZero(byte[] byArr) {
byte[] b = new byte[-1]; // throws NegativeArraySizeException
int len = 8 / zero; // div by zero trap would fail
System.arraycopy(byArr, 0, b, 0, len);
return b;
}
static byte[] testNegativeSizeDivZero2() {
byte[] byArr = new byte[8];
byte[] b = new byte[-1]; // throws NegativeArraySizeException
int len = 8 / zero; // div by zero trap would fail
System.arraycopy(byArr, 0, b, 0, len);
return b;
}
static byte[] testNegativeSizeDivZero3(byte[] byArr) {
byte[] b = new byte[-1]; // throws NegativeArraySizeException
int len = 8 / zero; // div by zero trap would fail
System.arraycopy(byArr, 0, b, 0, iFld2);
iFld = len;
return b;
}
static byte[] testNegativeSizeDivZero4(byte[] byArr) {
byte[] b = new byte[-1]; // throws NegativeArraySizeException
int len = 8 / zero / zero2; // 2 div by zero traps would fail
System.arraycopy(byArr, 0, b, 0, iFld2);
iFld = len;
return b;
}
static byte[] testNegativeSizeDivZero5(byte[] byArr) {
byte[] b = new byte[minusOne]; // throws NegativeArraySizeException
int len = 8 / zero / zero2; // 2 div by zero traps would fail
System.arraycopy(byArr, 0, b, 0, iFld2);
iFld = len;
return b;
}
static byte[] testNegativeSizeDivZero6(byte[] byArr) {
byte[] b = new byte[minusOne]; // throws NegativeArraySizeException
int len = 8 / zero / zero2; // 2 div by zero traps would fail
System.arraycopy(byArr, 0, b, 0, 8);
iFld = len;
return b;
}
static byte[] testNegativeSizeDivZero7(byte[] byArr) {
byte[] b = new byte[-1]; // throws NegativeArraySizeException
int len = 8 / zero / zero2; // 2 div by zero traps would fail
System.arraycopy(byArr, 0, b, 0, 8);
iFld = len;
return b;
}
static byte[] testNegativeSizeDivZeroFld(byte[] byArr) {
byte[] b = new byte[-1]; // throws NegativeArraySizeException
int len = minusOne / zero; // div by zero trap would fail
System.arraycopy(byArr, 0, b, 0, len);
return b;
}
static byte[] testNegativeSizeDivZeroFld2() {
byte[] byArr = new byte[8];
byte[] b = new byte[-1]; // throws NegativeArraySizeException
int len = minusOne / zero; // div by zero trap would fail
System.arraycopy(byArr, 0, b, 0, len);
return b;
}
static byte[] testNegativeSizeDivZeroFld3(byte[] byArr) {
byte[] b = new byte[-1]; // throws NegativeArraySizeException
int len = minusOne / zero; // div by zero trap would fail
System.arraycopy(byArr, 0, b, 0, iFld2);
iFld = len;
return b;
}
static byte[] testNegativeSizeDivZeroFld4(byte[] byArr) {
byte[] b = new byte[-1]; // throws NegativeArraySizeException
int len = minusOne / zero / zero2; // div by zero trap would fail
System.arraycopy(byArr, 0, b, 0, iFld2);
iFld = len;
return b;
}
static byte[] testNegativeSizeDivZeroFld5(byte[] byArr) {
byte[] b = new byte[minusOne]; // throws NegativeArraySizeException
int len = minusOne / zero / zero2; // div by zero trap would fail
System.arraycopy(byArr, 0, b, 0, iFld2);
iFld = len;
return b;
}
static byte[] testNegativeSizeDivZeroFld6(byte[] byArr) {
byte[] b = new byte[minusOne]; // throws NegativeArraySizeException
int len = minusOne / zero / zero2; // div by zero trap would fail
System.arraycopy(byArr, 0, b, 0, 8);
iFld = len;
return b;
}
static byte[] testNegativeSizeDivZeroFld7(byte[] byArr) {
byte[] b = new byte[-1]; // throws NegativeArraySizeException
int len = minusOne / zero / zero2; // div by zero trap would fail
System.arraycopy(byArr, 0, b, 0, 8);
iFld = len;
return b;
}
static byte[] testNegativeSizeDivZeroNullPointer(byte[] byArr) {
byte[] b = new byte[minusOne]; // throws NegativeArraySizeException
int x = minusOne / zero / zero2; // div by zero trap would fail
int len = byArr.length;
System.arraycopy(byArr, 0, b, 0, len);
iFld = x;
return b;
}
static byte[] testNegativeSizeComplex(byte[] byArr) {
byte[] b = new byte[minusOne]; // throws NegativeArraySizeException
int x = minusOne / zero; // div by zero trap would fail
int y = aFld.i;
int len = byArr.length;
x = x + aFld.i2 / zero2;
System.arraycopy(byArr, 0, b, 0, x);
iFld = x + y;
return b;
}
// Optimization not applied because of additional control flow that is not considered safe.
static byte[] testNegativeControlFlowNotAllowed(byte[] byArr) {
int x = 23;
byte[] b = new byte[minusOne]; // throws NegativeArraySizeException
if (flag) {
x = 34;
}
int len = x / zero;
System.arraycopy(byArr, 0, b, 0, 8);
iFld = len;
return b;
}
static void runNegativeSizeHalf() {
try {
testNegativeSizeHalf(null);
Asserts.fail("should throw exception");
} catch (NegativeArraySizeException e) {
Asserts.assertTrue(flag, "wrongly caught NegativeArraySizeException");
} catch (NullPointerException e) {
Asserts.assertFalse(flag, "wrongly caught NullPointerException");
}
flag = !flag;
}
static byte[] testNegativeSizeHalf(byte[] byArr) {
int size = flag ? -1 : 1;
byte[] b = new byte[size]; // throws NegativeArraySizeException if size == -1
int len = byArr.length; // throws NullPointerException if size == 1
System.arraycopy(byArr, 0, b, 0, len);
return b;
}
}
class A {
int i, i2;
}