diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp index f3179cf6479..80c00c713d7 100644 --- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp +++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp @@ -2450,7 +2450,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, // -------------------------------------------------------------------------- if (method->is_synchronized()) { - ConditionRegister r_flag = CCR1; Register r_oop = r_temp_4; const Register r_box = r_temp_5; Label done, locked; @@ -2465,8 +2464,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, // Try fastpath for locking. // fast_lock kills r_temp_1, r_temp_2, r_temp_3. - __ compiler_fast_lock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); - __ beq(r_flag, locked); + __ compiler_fast_lock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); + __ beq(CCR0, locked); // None of the above fast optimizations worked so we have to get into the // slow case of monitor enter. Inline a special case of call_VM that @@ -2659,8 +2658,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, // -------------------------------------------------------------------------- if (method->is_synchronized()) { - - ConditionRegister r_flag = CCR1; const Register r_oop = r_temp_4; const Register r_box = r_temp_5; const Register r_exception = r_temp_6; @@ -2677,8 +2674,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, __ addi(r_box, R1_SP, lock_offset); // Try fastpath for unlocking. - __ compiler_fast_unlock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); - __ beq(r_flag, done); + __ compiler_fast_unlock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); + __ beq(CCR0, done); // Save and restore any potential method result value around the unlocking operation. save_native_result(masm, ret_type, workspace_slot_offset);