8191988: JDK-8190862 work for arch s390

Cleanup interpreter TLAB code and fix ZeroTLAB

Reviewed-by: mdoerr, goetz
This commit is contained in:
Jean Christophe Beyler 2017-12-20 08:38:20 -08:00
parent 63d41b2e6a
commit 2a8c7a915a

View File

@ -3722,7 +3722,6 @@ void TemplateTable::_new() {
Label slow_case;
Label done;
Label initialize_header;
Label initialize_object; // Including clearing the fields.
Label allocate_shared;
BLOCK_COMMENT("TemplateTable::_new {");
@ -3760,65 +3759,41 @@ void TemplateTable::_new() {
// Allocate the instance
// 1) Try to allocate in the TLAB.
// 2) If fail and the object is large allocate in the shared Eden.
// 3) If the above fails (or is not applicable), go to a slow case
// 2) If the above fails (or is not applicable), go to a slow case
// (creates a new TLAB, etc.).
// Always go the slow path. See comment above this template.
const bool allow_shared_alloc = false;
// Note: compared to other architectures, s390's implementation always goes
// to the slow path if TLAB is used and fails.
if (UseTLAB) {
Register RoldTopValue = RallocatedObject;
Register RnewTopValue = tmp;
__ z_lg(RoldTopValue, Address(Z_thread, JavaThread::tlab_top_offset()));
__ load_address(RnewTopValue, Address(RoldTopValue, Rsize));
__ z_cg(RnewTopValue, Address(Z_thread, JavaThread::tlab_end_offset()));
__ z_brh(allow_shared_alloc ? allocate_shared : slow_case);
__ z_brh(slow_case);
__ z_stg(RnewTopValue, Address(Z_thread, JavaThread::tlab_top_offset()));
if (ZeroTLAB) {
// The fields have been already cleared.
__ z_bru(initialize_header);
} else {
// Initialize both the header and fields.
if (allow_shared_alloc) {
__ z_bru(initialize_object);
} else {
// Fallthrough to initialize_object, but assert that it is on fall through path.
prev_instr_address = __ pc();
}
}
}
if (allow_shared_alloc) {
// Allocation in shared Eden not implemented, because sapjvm allocation trace does not allow it.
Unimplemented();
}
if (UseTLAB) {
Register RobjectFields = tmp;
Register Rzero = Z_R1_scratch;
assert(ZeroTLAB || prev_instr_address == __ pc(),
"must not omit jump to initialize_object above, as it is not on the fall through path");
__ clear_reg(Rzero, true /*whole reg*/, false); // Load 0L into Rzero. Don't set CC.
// The object is initialized before the header. If the object size is
// zero, go directly to the header initialization.
__ bind(initialize_object);
__ z_aghi(Rsize, (int)-sizeof(oopDesc)); // Subtract header size, set CC.
__ z_bre(initialize_header); // Jump if size of fields is zero.
if (!ZeroTLAB) {
// The object is initialized before the header. If the object size is
// zero, go directly to the header initialization.
__ z_aghi(Rsize, (int)-sizeof(oopDesc)); // Subtract header size, set CC.
__ z_bre(initialize_header); // Jump if size of fields is zero.
// Initialize object fields.
// See documentation for MVCLE instruction!!!
assert(RobjectFields->encoding() % 2 == 0, "RobjectFields must be an even register");
assert(Rsize->encoding() == (RobjectFields->encoding()+1),
"RobjectFields and Rsize must be a register pair");
assert(Rzero->encoding() % 2 == 1, "Rzero must be an odd register");
// Initialize object fields.
// See documentation for MVCLE instruction!!!
assert(RobjectFields->encoding() % 2 == 0, "RobjectFields must be an even register");
assert(Rsize->encoding() == (RobjectFields->encoding()+1),
"RobjectFields and Rsize must be a register pair");
assert(Rzero->encoding() % 2 == 1, "Rzero must be an odd register");
// Set Rzero to 0 and use it as src length, then mvcle will copy nothing
// and fill the object with the padding value 0.
__ add2reg(RobjectFields, sizeof(oopDesc), RallocatedObject);
__ move_long_ext(RobjectFields, as_Register(Rzero->encoding() - 1), 0);
// Set Rzero to 0 and use it as src length, then mvcle will copy nothing
// and fill the object with the padding value 0.
__ add2reg(RobjectFields, sizeof(oopDesc), RallocatedObject);
__ move_long_ext(RobjectFields, as_Register(Rzero->encoding() - 1), 0);
}
// Initialize object header only.
__ bind(initialize_header);