This commit is contained in:
Mikael Gerdin 2016-05-04 10:06:00 +02:00
commit 512ffd9ded
68 changed files with 619 additions and 561 deletions

View File

@ -825,17 +825,6 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// The following routine generates a subroutine to throw an asynchronous
// UnknownError when an unsafe access gets a fault that could not be
// reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
//
address generate_handler_for_unsafe_access() {
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
address start = __ function_entry();
__ unimplemented("StubRoutines::handler_for_unsafe_access", 93);
return start;
}
#if !defined(PRODUCT)
// Wrapper which calls oopDesc::is_oop_or_null()
// Only called by MacroAssembler::verify_oop
@ -3111,8 +3100,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
StubRoutines::_handler_for_unsafe_access_entry = generate_handler_for_unsafe_access();
// support for verify_oop (must happen after universe_init)
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,20 +64,6 @@ static const Register& Lstub_temp = L2;
// -------------------------------------------------------------------------------------------------------------------------
// Stub Code definitions
static address handle_unsafe_access() {
JavaThread* thread = JavaThread::current();
address pc = thread->saved_exception_pc();
address npc = thread->saved_exception_npc();
// pc is the instruction which we must emulate
// doing a no-op is fine: return garbage from the load
// request an async exception
thread->set_pending_unsafe_access_error();
// return address of next instruction to execute
return npc;
}
class StubGenerator: public StubCodeGenerator {
private:
@ -746,62 +732,6 @@ class StubGenerator: public StubCodeGenerator {
Label _atomic_add_stub; // called from other stubs
//------------------------------------------------------------------------------------------------------------------------
// The following routine generates a subroutine to throw an asynchronous
// UnknownError when an unsafe access gets a fault that could not be
// reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
//
// Arguments :
//
// trapping PC: O7
//
// Results:
// posts an asynchronous exception, skips the trapping instruction
//
address generate_handler_for_unsafe_access() {
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
address start = __ pc();
const int preserve_register_words = (64 * 2);
Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS);
Register Lthread = L7_thread_cache;
int i;
__ save_frame(0);
__ mov(G1, L1);
__ mov(G2, L2);
__ mov(G3, L3);
__ mov(G4, L4);
__ mov(G5, L5);
for (i = 0; i < 64; i += 2) {
__ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
}
address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access);
BLOCK_COMMENT("call handle_unsafe_access");
__ call(entry_point, relocInfo::runtime_call_type);
__ delayed()->nop();
__ mov(L1, G1);
__ mov(L2, G2);
__ mov(L3, G3);
__ mov(L4, G4);
__ mov(L5, G5);
for (i = 0; i < 64; i += 2) {
__ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
}
__ verify_thread();
__ jmp(O0, 0);
__ delayed()->restore();
return start;
}
// Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
// Arguments :
//
@ -5380,9 +5310,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
StubRoutines::_handler_for_unsafe_access_entry =
generate_handler_for_unsafe_access();
// support for verify_oop (must happen after universe_init)
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine();

View File

@ -67,7 +67,7 @@ void LinearScan::allocate_fpu_stack() {
// register information would be incorrect.
if (b->number_of_preds() > 1) {
int id = b->first_lir_instruction_id();
BitMap regs(FrameMap::nof_fpu_regs);
ResourceBitMap regs(FrameMap::nof_fpu_regs);
regs.clear();
iw.walk_to(id); // walk after the first instruction (always a label) of the block
@ -1069,7 +1069,7 @@ bool FpuStackAllocator::merge_fpu_stack_with_successors(BlockBegin* block) {
// clean up stack first so that there are no dead values on the stack
if (ComputeExactFPURegisterUsage) {
FpuStackSim* cur_sim = sim();
BitMap live_fpu_regs = block->sux_at(0)->fpu_register_usage();
ResourceBitMap live_fpu_regs = block->sux_at(0)->fpu_register_usage();
assert(live_fpu_regs.size() == FrameMap::nof_fpu_regs, "missing register usage");
merge_cleanup_fpu_stack(instrs, cur_sim, live_fpu_regs);

View File

@ -380,7 +380,7 @@ ALIGNED_(8) juint StubRoutines::x86::_P_1[] =
ALIGNED_(8) juint StubRoutines::x86::_NEG_ZERO[] =
{
0x00000000UL, 0x3c800000UL
0x00000000UL, 0x80000000UL
};
void MacroAssembler::fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register eax, Register ebx, Register ecx, Register edx, Register tmp1, Register tmp2, Register tmp3, Register tmp4) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,21 +63,6 @@ const int FPU_CNTRL_WRD_MASK = 0xFFFF;
// -------------------------------------------------------------------------------------------------------------------------
// Stub Code definitions
static address handle_unsafe_access() {
JavaThread* thread = JavaThread::current();
address pc = thread->saved_exception_pc();
// pc is the instruction which we must emulate
// doing a no-op is fine: return garbage from the load
// therefore, compute npc
address npc = Assembler::locate_next_instruction(pc);
// request an async exception
thread->set_pending_unsafe_access_error();
// return address of next instruction to execute
return npc;
}
class StubGenerator: public StubCodeGenerator {
private:
@ -623,27 +608,6 @@ class StubGenerator: public StubCodeGenerator {
}
//---------------------------------------------------------------------------
// The following routine generates a subroutine to throw an asynchronous
// UnknownError when an unsafe access gets a fault that could not be
// reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
address generate_handler_for_unsafe_access() {
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
address start = __ pc();
__ push(0); // hole for return address-to-be
__ pusha(); // push registers
Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
BLOCK_COMMENT("call handle_unsafe_access");
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
__ movptr(next_pc, rax); // stuff next address
__ popa();
__ ret(0); // jump to next address
return start;
}
//----------------------------------------------------------------------------------------------------
// Non-destructive plausibility checks for oops
@ -3865,9 +3829,6 @@ class StubGenerator: public StubCodeGenerator {
// These are currently used by Solaris/Intel
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
StubRoutines::_handler_for_unsafe_access_entry =
generate_handler_for_unsafe_access();
// platform dependent
create_control_words();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,21 +61,6 @@ const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
// Stub Code definitions
static address handle_unsafe_access() {
JavaThread* thread = JavaThread::current();
address pc = thread->saved_exception_pc();
// pc is the instruction which we must emulate
// doing a no-op is fine: return garbage from the load
// therefore, compute npc
address npc = Assembler::locate_next_instruction(pc);
// request an async exception
thread->set_pending_unsafe_access_error();
// return address of next instruction to execute
return npc;
}
class StubGenerator: public StubCodeGenerator {
private:
@ -989,32 +974,6 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// The following routine generates a subroutine to throw an
// asynchronous UnknownError when an unsafe access gets a fault that
// could not be reasonably prevented by the programmer. (Example:
// SIGBUS/OBJERR.)
address generate_handler_for_unsafe_access() {
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
address start = __ pc();
__ push(0); // hole for return address-to-be
__ pusha(); // push registers
Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
// FIXME: this probably needs alignment logic
__ subptr(rsp, frame::arg_reg_save_area_bytes);
BLOCK_COMMENT("call handle_unsafe_access");
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
__ addptr(rsp, frame::arg_reg_save_area_bytes);
__ movptr(next_pc, rax); // stuff next address
__ popa();
__ ret(0); // jump to next address
return start;
}
// Non-destructive plausibility checks for oops
//
// Arguments:
@ -5136,9 +5095,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
StubRoutines::_fence_entry = generate_orderaccess_fence();
StubRoutines::_handler_for_unsafe_access_entry =
generate_handler_for_unsafe_access();
// platform dependent
StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010, 2015 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -261,10 +261,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_atomic_add_entry = ShouldNotCallThisStub();
StubRoutines::_atomic_add_ptr_entry = ShouldNotCallThisStub();
StubRoutines::_fence_entry = ShouldNotCallThisStub();
// amd64 does this here, sparc does it in generate_all()
StubRoutines::_handler_for_unsafe_access_entry =
ShouldNotCallThisStub();
}
void generate_all() {

View File

@ -392,11 +392,9 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) {
// We don't really need a stub here! Just set the pending exeption and
// continue at the next instruction after the faulting read. Returning
// garbage from this read is ok.
thread->set_pending_unsafe_access_error();
os::Aix::ucontext_set_pc(uc, pc + 4);
address next_pc = pc + 4;
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
os::Aix::ucontext_set_pc(uc, next_pc);
return 1;
}
}
@ -415,11 +413,9 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
}
else if (thread->thread_state() == _thread_in_vm &&
sig == SIGBUS && thread->doing_unsafe_access()) {
// We don't really need a stub here! Just set the pending exeption and
// continue at the next instruction after the faulting read. Returning
// garbage from this read is ok.
thread->set_pending_unsafe_access_error();
os::Aix::ucontext_set_pc(uc, pc + 4);
address next_pc = pc + 4;
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
os::Aix::ucontext_set_pc(uc, next_pc);
return 1;
}
}

View File

@ -584,7 +584,8 @@ JVM_handle_bsd_signal(int sig,
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
address next_pc = Assembler::locate_next_instruction(pc);
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
}
else
@ -655,7 +656,8 @@ JVM_handle_bsd_signal(int sig,
} else if (thread->thread_state() == _thread_in_vm &&
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
address next_pc = Assembler::locate_next_instruction(pc);
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in

View File

@ -226,23 +226,6 @@ extern "C" void FetchNPFI () ;
extern "C" void FetchNResume () ;
#endif
// An operation in Unsafe has faulted. We're going to return to the
// instruction after the faulting load or store. We also set
// pending_unsafe_access_error so that at some point in the future our
// user will get a helpful message.
static address handle_unsafe_access(JavaThread* thread, address pc) {
// pc is the instruction which we must emulate
// doing a no-op is fine: return garbage from the load
// therefore, compute npc
address npc = pc + NativeCall::instruction_size;
// request an async exception
thread->set_pending_unsafe_access_error();
// return address of next instruction to execute
return npc;
}
extern "C" JNIEXPORT int
JVM_handle_linux_signal(int sig,
siginfo_t* info,
@ -387,7 +370,8 @@ JVM_handle_linux_signal(int sig,
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
stub = handle_unsafe_access(thread, pc);
address next_pc = pc + NativeCall::instruction_size;
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
}
else
@ -408,7 +392,8 @@ JVM_handle_linux_signal(int sig,
} else if (thread->thread_state() == _thread_in_vm &&
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access()) {
stub = handle_unsafe_access(thread, pc);
address next_pc = pc + NativeCall::instruction_size;
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in

View File

@ -366,11 +366,9 @@ JVM_handle_linux_signal(int sig,
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
// We don't really need a stub here! Just set the pending exeption and
// continue at the next instruction after the faulting read. Returning
// garbage from this read is ok.
thread->set_pending_unsafe_access_error();
os::Linux::ucontext_set_pc(uc, pc + 4);
address next_pc = pc + 4;
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
os::Linux::ucontext_set_pc(uc, next_pc);
return true;
}
}
@ -385,10 +383,8 @@ JVM_handle_linux_signal(int sig,
}
else if (thread->thread_state() == _thread_in_vm &&
sig == SIGBUS && thread->doing_unsafe_access()) {
// We don't really need a stub here! Just set the pending exeption and
// continue at the next instruction after the faulting read. Returning
// garbage from this read is ok.
thread->set_pending_unsafe_access_error();
address next_pc = pc + 4;
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
os::Linux::ucontext_set_pc(uc, pc + 4);
return true;
}

View File

@ -433,14 +433,14 @@ inline static bool checkPollingPage(address pc, address fault, address* stub) {
return false;
}
inline static bool checkByteBuffer(address pc, address* stub) {
inline static bool checkByteBuffer(address pc, address npc, address* stub) {
// BugId 4454115: A read from a MappedByteBuffer can fault
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) {
*stub = StubRoutines::handler_for_unsafe_access();
*stub = SharedRuntime::handle_unsafe_access(thread, npc);
return true;
}
return false;
@ -613,7 +613,7 @@ JVM_handle_linux_signal(int sig,
if (sig == SIGBUS &&
thread->thread_state() == _thread_in_vm &&
thread->doing_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
stub = SharedRuntime::handle_unsafe_access(thread, npc);
}
if (thread->thread_state() == _thread_in_Java) {
@ -625,7 +625,7 @@ JVM_handle_linux_signal(int sig,
break;
}
if ((sig == SIGBUS) && checkByteBuffer(pc, &stub)) {
if ((sig == SIGBUS) && checkByteBuffer(pc, npc, &stub)) {
break;
}

View File

@ -420,7 +420,8 @@ JVM_handle_linux_signal(int sig,
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
address next_pc = Assembler::locate_next_instruction(pc);
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
}
else
@ -469,7 +470,8 @@ JVM_handle_linux_signal(int sig,
} else if (thread->thread_state() == _thread_in_vm &&
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
address next_pc = Assembler::locate_next_instruction(pc);
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in

View File

@ -441,7 +441,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
if (thread->thread_state() == _thread_in_vm) {
if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
stub = SharedRuntime::handle_unsafe_access(thread, npc);
}
}
@ -480,7 +480,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
stub = SharedRuntime::handle_unsafe_access(thread, npc);
}
}

View File

@ -503,7 +503,8 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
if (thread->thread_state() == _thread_in_vm) {
if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
address next_pc = Assembler::locate_next_instruction(pc);
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
}
@ -520,7 +521,8 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
if (cb != NULL) {
CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
address next_pc = Assembler::locate_next_instruction(pc);
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
}
}

View File

@ -50,11 +50,11 @@ class BlockListBuilder VALUE_OBJ_CLASS_SPEC {
BlockList* _bci2block; // mapping from bci to blocks for GraphBuilder
// fields used by mark_loops
BitMap _active; // for iteration of control flow graph
BitMap _visited; // for iteration of control flow graph
intArray _loop_map; // caches the information if a block is contained in a loop
int _next_loop_index; // next free loop number
int _next_block_number; // for reverse postorder numbering of blocks
ResourceBitMap _active; // for iteration of control flow graph
ResourceBitMap _visited; // for iteration of control flow graph
intArray _loop_map; // caches the information if a block is contained in a loop
int _next_loop_index; // next free loop number
int _next_block_number; // for reverse postorder numbering of blocks
// accessors
Compilation* compilation() const { return _compilation; }
@ -227,7 +227,7 @@ void BlockListBuilder::set_leaders() {
// Without it, backward branches could jump to a bci where no block was created
// during bytecode iteration. This would require the creation of a new block at the
// branch target and a modification of the successor lists.
BitMap bci_block_start = method()->bci_block_start();
const BitMap& bci_block_start = method()->bci_block_start();
ciBytecodeStream s(method());
while (s.next() != ciBytecodeStream::EOBC()) {
@ -355,8 +355,8 @@ void BlockListBuilder::set_leaders() {
void BlockListBuilder::mark_loops() {
ResourceMark rm;
_active = BitMap(BlockBegin::number_of_blocks()); _active.clear();
_visited = BitMap(BlockBegin::number_of_blocks()); _visited.clear();
_active.initialize(BlockBegin::number_of_blocks());
_visited.initialize(BlockBegin::number_of_blocks());
_loop_map = intArray(BlockBegin::number_of_blocks(), BlockBegin::number_of_blocks(), 0);
_next_loop_index = 0;
_next_block_number = _blocks.length();
@ -364,6 +364,10 @@ void BlockListBuilder::mark_loops() {
// recursively iterate the control flow graph
mark_loops(_bci2block->at(0), false);
assert(_next_block_number >= 0, "invalid block numbers");
// Remove dangling Resource pointers before the ResourceMark goes out-of-scope.
_active.resize(0);
_visited.resize(0);
}
void BlockListBuilder::make_loop_header(BlockBegin* block) {
@ -3076,7 +3080,7 @@ void GraphBuilder::setup_osr_entry_block() {
Value local;
// find all the locals that the interpreter thinks contain live oops
const BitMap live_oops = method()->live_local_oops_at_bci(osr_bci);
const ResourceBitMap live_oops = method()->live_local_oops_at_bci(osr_bci);
// compute the offset into the locals so that we can treat the buffer
// as if the locals were still in the interpreter frame

View File

@ -460,14 +460,14 @@ class ComputeLinearScanOrder : public StackObj {
BlockList* _linear_scan_order; // the resulting list of blocks in correct order
BitMap _visited_blocks; // used for recursive processing of blocks
BitMap _active_blocks; // used for recursive processing of blocks
BitMap _dominator_blocks; // temproary BitMap used for computation of dominator
intArray _forward_branches; // number of incoming forward branches for each block
BlockList _loop_end_blocks; // list of all loop end blocks collected during count_edges
BitMap2D _loop_map; // two-dimensional bit set: a bit is set if a block is contained in a loop
BlockList _work_list; // temporary list (used in mark_loops and compute_order)
BlockList _loop_headers;
ResourceBitMap _visited_blocks; // used for recursive processing of blocks
ResourceBitMap _active_blocks; // used for recursive processing of blocks
ResourceBitMap _dominator_blocks; // temproary BitMap used for computation of dominator
intArray _forward_branches; // number of incoming forward branches for each block
BlockList _loop_end_blocks; // list of all loop end blocks collected during count_edges
BitMap2D _loop_map; // two-dimensional bit set: a bit is set if a block is contained in a loop
BlockList _work_list; // temporary list (used in mark_loops and compute_order)
BlockList _loop_headers;
Compilation* _compilation;
@ -535,7 +535,7 @@ ComputeLinearScanOrder::ComputeLinearScanOrder(Compilation* c, BlockBegin* start
_loop_end_blocks(8),
_work_list(8),
_linear_scan_order(NULL), // initialized later with correct size
_loop_map(0, 0), // initialized later with correct size
_loop_map(0), // initialized later with correct size
_compilation(c)
{
TRACE_LINEAR_SCAN(2, tty->print_cr("***** computing linear-scan block order"));

View File

@ -151,7 +151,7 @@ class IRScope: public CompilationResourceObj {
bool _wrote_volatile; // has written volatile field
BlockBegin* _start; // the start block, successsors are method entries
BitMap _requires_phi_function; // bit is set if phi functions at loop headers are necessary for a local variable
ResourceBitMap _requires_phi_function; // bit is set if phi functions at loop headers are necessary for a local variable
// helper functions
BlockBegin* build_graph(Compilation* compilation, int osr_bci);

View File

@ -787,7 +787,7 @@ bool BlockBegin::try_merge(ValueStack* new_state) {
TRACE_PHI(tty->print_cr("creating phi-function %c%d for stack %d", new_state->stack_at(index)->type()->tchar(), new_state->stack_at(index)->id(), index));
}
BitMap requires_phi_function = new_state->scope()->requires_phi_function();
BitMap& requires_phi_function = new_state->scope()->requires_phi_function();
for_each_local_value(new_state, index, new_value) {
bool requires_phi = requires_phi_function.at(index) || (new_value->type()->is_double_word() && requires_phi_function.at(index + 1));

View File

@ -1596,8 +1596,8 @@ LEAF(BlockBegin, StateSplit)
int _flags; // the flags associated with this block
// fields used by BlockListBuilder
int _total_preds; // number of predecessors found by BlockListBuilder
BitMap _stores_to_locals; // bit is set when a local variable is stored in the block
int _total_preds; // number of predecessors found by BlockListBuilder
ResourceBitMap _stores_to_locals; // bit is set when a local variable is stored in the block
// SSA specific fields: (factor out later)
BlockList _successors; // the successors of this block
@ -1614,15 +1614,15 @@ LEAF(BlockBegin, StateSplit)
Label _label; // the label associated with this block
LIR_List* _lir; // the low level intermediate representation for this block
BitMap _live_in; // set of live LIR_Opr registers at entry to this block
BitMap _live_out; // set of live LIR_Opr registers at exit from this block
BitMap _live_gen; // set of registers used before any redefinition in this block
BitMap _live_kill; // set of registers defined in this block
ResourceBitMap _live_in; // set of live LIR_Opr registers at entry to this block
ResourceBitMap _live_out; // set of live LIR_Opr registers at exit from this block
ResourceBitMap _live_gen; // set of registers used before any redefinition in this block
ResourceBitMap _live_kill; // set of registers defined in this block
BitMap _fpu_register_usage;
intArray* _fpu_stack_state; // For x86 FPU code generation with UseLinearScan
int _first_lir_instruction_id; // ID of first LIR instruction in this block
int _last_lir_instruction_id; // ID of last LIR instruction in this block
ResourceBitMap _fpu_register_usage;
intArray* _fpu_stack_state; // For x86 FPU code generation with UseLinearScan
int _first_lir_instruction_id; // ID of first LIR instruction in this block
int _last_lir_instruction_id; // ID of last LIR instruction in this block
void iterate_preorder (boolArray& mark, BlockClosure* closure);
void iterate_postorder(boolArray& mark, BlockClosure* closure);
@ -1693,11 +1693,11 @@ LEAF(BlockBegin, StateSplit)
Label* label() { return &_label; }
LIR_List* lir() const { return _lir; }
int exception_handler_pco() const { return _exception_handler_pco; }
BitMap& live_in() { return _live_in; }
BitMap& live_out() { return _live_out; }
BitMap& live_gen() { return _live_gen; }
BitMap& live_kill() { return _live_kill; }
BitMap& fpu_register_usage() { return _fpu_register_usage; }
ResourceBitMap& live_in() { return _live_in; }
ResourceBitMap& live_out() { return _live_out; }
ResourceBitMap& live_gen() { return _live_gen; }
ResourceBitMap& live_kill() { return _live_kill; }
ResourceBitMap& fpu_register_usage() { return _fpu_register_usage; }
intArray* fpu_stack_state() const { return _fpu_stack_state; }
int first_lir_instruction_id() const { return _first_lir_instruction_id; }
int last_lir_instruction_id() const { return _last_lir_instruction_id; }
@ -1718,16 +1718,16 @@ LEAF(BlockBegin, StateSplit)
void substitute_sux(BlockBegin* old_sux, BlockBegin* new_sux);
void set_lir(LIR_List* lir) { _lir = lir; }
void set_exception_handler_pco(int pco) { _exception_handler_pco = pco; }
void set_live_in (BitMap map) { _live_in = map; }
void set_live_out (BitMap map) { _live_out = map; }
void set_live_gen (BitMap map) { _live_gen = map; }
void set_live_kill (BitMap map) { _live_kill = map; }
void set_fpu_register_usage(BitMap map) { _fpu_register_usage = map; }
void set_live_in (const ResourceBitMap& map) { _live_in = map; }
void set_live_out (const ResourceBitMap& map) { _live_out = map; }
void set_live_gen (const ResourceBitMap& map) { _live_gen = map; }
void set_live_kill(const ResourceBitMap& map) { _live_kill = map; }
void set_fpu_register_usage(const ResourceBitMap& map) { _fpu_register_usage = map; }
void set_fpu_stack_state(intArray* state) { _fpu_stack_state = state; }
void set_first_lir_instruction_id(int id) { _first_lir_instruction_id = id; }
void set_last_lir_instruction_id(int id) { _last_lir_instruction_id = id; }
void increment_total_preds(int n = 1) { _total_preds += n; }
void init_stores_to_locals(int locals_count) { _stores_to_locals = BitMap(locals_count); _stores_to_locals.clear(); }
void init_stores_to_locals(int locals_count) { _stores_to_locals.initialize(locals_count); }
// generic
virtual void state_values_do(ValueVisitor* f);

View File

@ -470,7 +470,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
: _compilation(compilation)
, _method(method)
, _virtual_register_number(LIR_OprDesc::vreg_base)
, _vreg_flags(NULL, 0, num_vreg_flags) {
, _vreg_flags(num_vreg_flags) {
init();
}

View File

@ -88,7 +88,7 @@ LinearScan::LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map)
, _has_info(0)
, _has_call(0)
, _scope_value_cache(0) // initialized later with correct length
, _interval_in_loop(0, 0) // initialized later with correct length
, _interval_in_loop(0) // initialized later with correct length
, _cached_blocks(*ir->linear_scan_order())
#ifdef X86
, _fpu_stack_allocator(NULL)
@ -524,8 +524,8 @@ void LinearScan::number_instructions() {
assert(idx == num_instructions, "must match");
assert(idx * 2 == op_id, "must match");
_has_call = BitMap(num_instructions); _has_call.clear();
_has_info = BitMap(num_instructions); _has_info.clear();
_has_call.initialize(num_instructions);
_has_info.initialize(num_instructions);
}
@ -568,8 +568,8 @@ void LinearScan::compute_local_live_sets() {
for (int i = 0; i < num_blocks; i++) {
BlockBegin* block = block_at(i);
BitMap live_gen(live_size); live_gen.clear();
BitMap live_kill(live_size); live_kill.clear();
ResourceBitMap live_gen(live_size); live_gen.clear();
ResourceBitMap live_kill(live_size); live_kill.clear();
if (block->is_set(BlockBegin::exception_entry_flag)) {
// Phi functions at the begin of an exception handler are
@ -715,8 +715,8 @@ void LinearScan::compute_local_live_sets() {
block->set_live_gen (live_gen);
block->set_live_kill(live_kill);
block->set_live_in (BitMap(live_size)); block->live_in().clear();
block->set_live_out (BitMap(live_size)); block->live_out().clear();
block->set_live_in (ResourceBitMap(live_size)); block->live_in().clear();
block->set_live_out (ResourceBitMap(live_size)); block->live_out().clear();
TRACE_LINEAR_SCAN(4, tty->print("live_gen B%d ", block->block_id()); print_bitmap(block->live_gen()));
TRACE_LINEAR_SCAN(4, tty->print("live_kill B%d ", block->block_id()); print_bitmap(block->live_kill()));
@ -741,7 +741,7 @@ void LinearScan::compute_global_live_sets() {
bool change_occurred;
bool change_occurred_in_block;
int iteration_count = 0;
BitMap live_out(live_set_size()); live_out.clear(); // scratch set for calculations
ResourceBitMap live_out(live_set_size()); live_out.clear(); // scratch set for calculations
// Perform a backward dataflow analysis to compute live_out and live_in for each block.
// The loop is executed until a fixpoint is reached (no changes in an iteration)
@ -775,7 +775,7 @@ void LinearScan::compute_global_live_sets() {
if (!block->live_out().is_same(live_out)) {
// A change occurred. Swap the old and new live out sets to avoid copying.
BitMap temp = block->live_out();
ResourceBitMap temp = block->live_out();
block->set_live_out(live_out);
live_out = temp;
@ -787,7 +787,7 @@ void LinearScan::compute_global_live_sets() {
if (iteration_count == 0 || change_occurred_in_block) {
// live_in(block) is the union of live_gen(block) with (live_out(block) & !live_kill(block))
// note: live_in has to be computed only in first iteration or if live_out has changed!
BitMap live_in = block->live_in();
ResourceBitMap live_in = block->live_in();
live_in.set_from(block->live_out());
live_in.set_difference(block->live_kill());
live_in.set_union(block->live_gen());
@ -826,7 +826,7 @@ void LinearScan::compute_global_live_sets() {
#endif
// check that the live_in set of the first block is empty
BitMap live_in_args(ir()->start()->live_in().size());
ResourceBitMap live_in_args(ir()->start()->live_in().size());
live_in_args.clear();
if (!ir()->start()->live_in().is_same(live_in_args)) {
#ifdef ASSERT
@ -1317,7 +1317,7 @@ void LinearScan::build_intervals() {
assert(block_to == instructions->at(instructions->length() - 1)->id(), "must be");
// Update intervals for registers live at the end of this block;
BitMap live = block->live_out();
ResourceBitMap live = block->live_out();
int size = (int)live.size();
for (int number = (int)live.get_next_one_offset(0, size); number < size; number = (int)live.get_next_one_offset(number + 1, size)) {
assert(live.at(number), "should not stop here otherwise");
@ -1717,7 +1717,7 @@ void LinearScan::resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to
const int num_regs = num_virtual_regs();
const int size = live_set_size();
const BitMap live_at_edge = to_block->live_in();
const ResourceBitMap live_at_edge = to_block->live_in();
// visit all registers where the live_at_edge bit is set
for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) {
@ -1774,8 +1774,8 @@ void LinearScan::resolve_data_flow() {
int num_blocks = block_count();
MoveResolver move_resolver(this);
BitMap block_completed(num_blocks); block_completed.clear();
BitMap already_resolved(num_blocks); already_resolved.clear();
ResourceBitMap block_completed(num_blocks); block_completed.clear();
ResourceBitMap already_resolved(num_blocks); already_resolved.clear();
int i;
for (i = 0; i < num_blocks; i++) {
@ -3397,7 +3397,7 @@ void LinearScan::verify_constants() {
for (int i = 0; i < num_blocks; i++) {
BlockBegin* block = block_at(i);
BitMap live_at_edge = block->live_in();
ResourceBitMap live_at_edge = block->live_in();
// visit all registers where the live_at_edge bit is set
for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) {
@ -3749,7 +3749,7 @@ void MoveResolver::verify_before_resolve() {
}
BitMap used_regs(LinearScan::nof_regs + allocator()->frame_map()->argcount() + allocator()->max_spills());
ResourceBitMap used_regs(LinearScan::nof_regs + allocator()->frame_map()->argcount() + allocator()->max_spills());
used_regs.clear();
if (!_multiple_reads_allowed) {
for (i = 0; i < _mapping_from.length(); i++) {
@ -6317,7 +6317,7 @@ void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
void ControlFlowOptimizer::delete_jumps_to_return(BlockList* code) {
#ifdef ASSERT
BitMap return_converted(BlockBegin::number_of_blocks());
ResourceBitMap return_converted(BlockBegin::number_of_blocks());
return_converted.clear();
#endif

View File

@ -140,8 +140,8 @@ class LinearScan : public CompilationResourceObj {
LIR_OpArray _lir_ops; // mapping from LIR_Op id to LIR_Op node
BlockBeginArray _block_of_op; // mapping from LIR_Op id to the BlockBegin containing this instruction
BitMap _has_info; // bit set for each LIR_Op id that has a CodeEmitInfo
BitMap _has_call; // bit set for each LIR_Op id that destroys all caller save registers
ResourceBitMap _has_info; // bit set for each LIR_Op id that has a CodeEmitInfo
ResourceBitMap _has_call; // bit set for each LIR_Op id that destroys all caller save registers
BitMap2D _interval_in_loop; // bit set for each virtual register that is contained in each loop
// cached debug info to prevent multiple creation of same object

View File

@ -36,7 +36,7 @@
class ValueSet: public CompilationResourceObj {
private:
BitMap _map;
ResourceBitMap _map;
public:
ValueSet();

View File

@ -443,12 +443,12 @@ MethodLivenessResult ciMethod::liveness_at_bci(int bci) {
// gc'ing an interpreter frame we need to use its viewpoint during
// OSR when loading the locals.
BitMap ciMethod::live_local_oops_at_bci(int bci) {
ResourceBitMap ciMethod::live_local_oops_at_bci(int bci) {
VM_ENTRY_MARK;
InterpreterOopMap mask;
OopMapCache::compute_one_oop_map(get_Method(), bci, &mask);
int mask_size = max_locals();
BitMap result(mask_size);
ResourceBitMap result(mask_size);
result.clear();
int i;
for (i = 0; i < mask_size ; i++ ) {
@ -463,7 +463,7 @@ BitMap ciMethod::live_local_oops_at_bci(int bci) {
// ciMethod::bci_block_start
//
// Marks all bcis where a new basic block starts
const BitMap ciMethod::bci_block_start() {
const BitMap& ciMethod::bci_block_start() {
check_is_loaded();
if (_liveness == NULL) {
// Create the liveness analyzer.

View File

@ -36,7 +36,6 @@
class ciMethodBlocks;
class MethodLiveness;
class BitMap;
class Arena;
class BCEscapeAnalyzer;
class InlineTree;
@ -233,10 +232,10 @@ class ciMethod : public ciMetadata {
// used when gc'ing an interpreter frame we need to use its viewpoint
// during OSR when loading the locals.
BitMap live_local_oops_at_bci(int bci);
ResourceBitMap live_local_oops_at_bci(int bci);
#ifdef COMPILER1
const BitMap bci_block_start();
const BitMap& bci_block_start();
#endif
ciTypeFlow* get_flow_analysis();

View File

@ -3967,7 +3967,7 @@ void ClassFileParser::layout_fields(ConstantPool* cp,
next_nonstatic_padded_offset += ContendedPaddingWidth;
// collect all contended groups
BitMap bm(cp->size());
ResourceBitMap bm(cp->size());
for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
// skip already laid out fields
if (fs.is_offset_set()) continue;

View File

@ -131,13 +131,13 @@ elapsedTimer MethodLiveness::_time_total;
MethodLiveness::MethodLiveness(Arena* arena, ciMethod* method)
#ifdef COMPILER1
: _bci_block_start((uintptr_t*)arena->Amalloc((method->code_size() >> LogBitsPerByte) + 1), method->code_size())
: _bci_block_start(arena, method->code_size())
#endif
{
_arena = arena;
_method = method;
_bit_map_size_bits = method->max_locals();
_bit_map_size_words = (_bit_map_size_bits / sizeof(unsigned int)) + 1;
#ifdef COMPILER1
_bci_block_start.clear();
@ -475,7 +475,7 @@ MethodLivenessResult MethodLiveness::get_liveness_at(int entry_bci) {
bci = 0;
}
MethodLivenessResult answer((BitMap::bm_word_t*)NULL,0);
MethodLivenessResult answer;
if (_block_count > 0) {
if (TimeLivenessAnalysis) _time_total.start();
@ -574,16 +574,11 @@ void MethodLiveness::print_times() {
MethodLiveness::BasicBlock::BasicBlock(MethodLiveness *analyzer, int start, int limit) :
_gen((uintptr_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
analyzer->bit_map_size_bits()),
_kill((uintptr_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
analyzer->bit_map_size_bits()),
_entry((uintptr_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
analyzer->bit_map_size_bits()),
_normal_exit((uintptr_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
analyzer->bit_map_size_bits()),
_exception_exit((uintptr_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
analyzer->bit_map_size_bits()),
_gen(analyzer->arena(), analyzer->bit_map_size_bits()),
_kill(analyzer->arena(), analyzer->bit_map_size_bits()),
_entry(analyzer->arena(), analyzer->bit_map_size_bits()),
_normal_exit(analyzer->arena(), analyzer->bit_map_size_bits()),
_exception_exit(analyzer->arena(), analyzer->bit_map_size_bits()),
_last_bci(-1) {
_analyzer = analyzer;
_start_bci = start;
@ -991,17 +986,16 @@ void MethodLiveness::BasicBlock::propagate(MethodLiveness *ml) {
}
}
bool MethodLiveness::BasicBlock::merge_normal(BitMap other) {
bool MethodLiveness::BasicBlock::merge_normal(const BitMap& other) {
return _normal_exit.set_union_with_result(other);
}
bool MethodLiveness::BasicBlock::merge_exception(BitMap other) {
bool MethodLiveness::BasicBlock::merge_exception(const BitMap& other) {
return _exception_exit.set_union_with_result(other);
}
MethodLivenessResult MethodLiveness::BasicBlock::get_liveness_at(ciMethod* method, int bci) {
MethodLivenessResult answer(NEW_RESOURCE_ARRAY(BitMap::bm_word_t, _analyzer->bit_map_size_words()),
_analyzer->bit_map_size_bits());
MethodLivenessResult answer(_analyzer->bit_map_size_bits());
answer.set_is_valid();
#ifndef ASSERT
@ -1013,8 +1007,8 @@ MethodLivenessResult MethodLiveness::BasicBlock::get_liveness_at(ciMethod* metho
#ifdef ASSERT
ResourceMark rm;
BitMap g(_gen.size()); g.set_from(_gen);
BitMap k(_kill.size()); k.set_from(_kill);
ResourceBitMap g(_gen.size()); g.set_from(_gen);
ResourceBitMap k(_kill.size()); k.set_from(_kill);
#endif
if (_last_bci != bci || trueInDebug) {
ciBytecodeStream bytes(method);

View File

@ -30,18 +30,18 @@
class ciMethod;
class MethodLivenessResult : public BitMap {
class MethodLivenessResult : public ResourceBitMap {
private:
bool _is_valid;
public:
MethodLivenessResult(BitMap::bm_word_t* map, idx_t size_in_bits)
: BitMap(map, size_in_bits)
MethodLivenessResult()
: ResourceBitMap()
, _is_valid(false)
{}
MethodLivenessResult(idx_t size_in_bits)
: BitMap(size_in_bits)
: ResourceBitMap(size_in_bits)
, _is_valid(false)
{}
@ -66,23 +66,23 @@ class MethodLiveness : public ResourceObj {
int _limit_bci;
// The liveness at the start of the block;
BitMap _entry;
ArenaBitMap _entry;
// The summarized liveness effects of our direct successors reached
// by normal control flow
BitMap _normal_exit;
ArenaBitMap _normal_exit;
// The summarized liveness effects of our direct successors reached
// by exceptional control flow
BitMap _exception_exit;
ArenaBitMap _exception_exit;
// These members hold the results of the last call to
// compute_gen_kill_range(). _gen is the set of locals
// used before they are defined in the range. _kill is the
// set of locals defined before they are used.
BitMap _gen;
BitMap _kill;
int _last_bci;
ArenaBitMap _gen;
ArenaBitMap _kill;
int _last_bci;
// A list of all blocks which could come directly before this one
// in normal (non-exceptional) control flow. We propagate liveness
@ -100,11 +100,11 @@ class MethodLiveness : public ResourceObj {
// Our successors call this method to merge liveness information into
// our _normal_exit member.
bool merge_normal(BitMap other);
bool merge_normal(const BitMap& other);
// Our successors call this method to merge liveness information into
// our _exception_exit member.
bool merge_exception(BitMap other);
bool merge_exception(const BitMap& other);
// This helper routine is used to help compute the gen/kill pair for
// the block. It is also used to answer queries.
@ -181,7 +181,6 @@ class MethodLiveness : public ResourceObj {
// The size of a BitMap.
int _bit_map_size_bits;
int _bit_map_size_words;
// A list of all BasicBlocks.
BasicBlock **_block_list;
@ -198,7 +197,7 @@ class MethodLiveness : public ResourceObj {
#ifdef COMPILER1
// bcis where blocks start are marked
BitMap _bci_block_start;
ArenaBitMap _bci_block_start;
#endif // COMPILER1
// -- Graph construction & Analysis
@ -218,7 +217,6 @@ class MethodLiveness : public ResourceObj {
// And accessors.
int bit_map_size_bits() const { return _bit_map_size_bits; }
int bit_map_size_words() const { return _bit_map_size_words; }
// Work list manipulation routines. Called internally by BasicBlock.
BasicBlock *work_list_get();
@ -270,7 +268,7 @@ class MethodLiveness : public ResourceObj {
MethodLivenessResult get_liveness_at(int bci);
#ifdef COMPILER1
const BitMap get_bci_block_start() const { return _bci_block_start; }
const BitMap& get_bci_block_start() const { return _bci_block_start; }
#endif // COMPILER1
static void print_times() PRODUCT_RETURN;

View File

@ -5666,10 +5666,9 @@ bool CMSBitMap::allocate(MemRegion mr) {
}
assert(_virtual_space.committed_size() == brs.size(),
"didn't reserve backing store for all of CMS bit map?");
_bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
_bmWordSize, "inconsistency in bit map sizing");
_bm.set_size(_bmWordSize >> _shifter);
_bm = BitMapView((BitMap::bm_word_t*)_virtual_space.low(), _bmWordSize >> _shifter);
// bm.clear(); // can we rely on getting zero'd memory? verify below
assert(isAllClear(),

View File

@ -83,13 +83,12 @@ class SerialOldTracer;
class CMSBitMap VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
HeapWord* _bmStartWord; // base address of range covered by map
size_t _bmWordSize; // map size (in #HeapWords covered)
const int _shifter; // shifts to convert HeapWord to bit position
HeapWord* _bmStartWord; // base address of range covered by map
size_t _bmWordSize; // map size (in #HeapWords covered)
const int _shifter; // shifts to convert HeapWord to bit position
VirtualSpace _virtual_space; // underlying the bit map
BitMap _bm; // the bit map itself
public:
Mutex* const _lock; // mutex protecting _bm;
BitMapView _bm; // the bit map itself
Mutex* const _lock; // mutex protecting _bm;
public:
// constructor

View File

@ -33,7 +33,7 @@
\
nonstatic_field(CMSBitMap, _bmWordSize, size_t) \
nonstatic_field(CMSBitMap, _shifter, const int) \
nonstatic_field(CMSBitMap, _bm, BitMap) \
nonstatic_field(CMSBitMap, _bm, BitMapView) \
nonstatic_field(CMSBitMap, _virtual_space, VirtualSpace) \
nonstatic_field(CMSCollector, _markBitMap, CMSBitMap) \
nonstatic_field(ConcurrentMarkSweepGeneration, _cmsSpace, CompactibleFreeListSpace*) \

View File

@ -95,8 +95,8 @@ size_t G1CardLiveData::live_card_bitmap_size_in_bits() const {
// information.
class G1CardLiveDataHelper VALUE_OBJ_CLASS_SPEC {
private:
BitMap _region_bm;
BitMap _card_bm;
BitMapView _region_bm;
BitMapView _card_bm;
// The card number of the bottom of the G1 heap.
// Used in biasing indices into accounting card bitmaps.
@ -393,11 +393,11 @@ void G1CardLiveData::finalize(WorkGang* workers, G1CMBitMap* mark_bitmap) {
}
class G1ClearCardLiveDataTask : public AbstractGangTask {
BitMap _bitmap;
size_t _num_chunks;
size_t _cur_chunk;
BitMapView _bitmap;
size_t _num_chunks;
size_t _cur_chunk;
public:
G1ClearCardLiveDataTask(BitMap bitmap, size_t num_tasks) :
G1ClearCardLiveDataTask(const BitMapView& bitmap, size_t num_tasks) :
AbstractGangTask("G1 Clear Card Live Data"),
_bitmap(bitmap),
_num_chunks(num_tasks),

View File

@ -65,15 +65,15 @@ private:
size_t _live_regions_size_in_bits;
// The bits in this bitmap contain for every card whether it contains
// at least part of at least one live object.
BitMap live_cards_bm() const { return BitMap(_live_cards, _live_cards_size_in_bits); }
BitMapView live_cards_bm() const { return BitMapView(_live_cards, _live_cards_size_in_bits); }
// The bits in this bitmap indicate that a given region contains some live objects.
BitMap live_regions_bm() const { return BitMap(_live_regions, _live_regions_size_in_bits); }
BitMapView live_regions_bm() const { return BitMapView(_live_regions, _live_regions_size_in_bits); }
// Allocate a "large" bitmap from virtual memory with the given size in bits.
bm_word_t* allocate_large_bitmap(size_t size_in_bits);
void free_large_bitmap(bm_word_t* map, size_t size_in_bits);
inline BitMap live_card_bitmap(uint region);
inline BitMapView live_card_bitmap(uint region);
inline bool is_card_live_at(BitMap::idx_t idx) const;

View File

@ -29,8 +29,8 @@
#include "utilities/bitMap.inline.hpp"
#include "utilities/globalDefinitions.hpp"
inline BitMap G1CardLiveData::live_card_bitmap(uint region) {
return BitMap(_live_cards + ((size_t)region * _cards_per_region >> LogBitsPerWord), _cards_per_region);
inline BitMapView G1CardLiveData::live_card_bitmap(uint region) {
return BitMapView(_live_cards + ((size_t)region * _cards_per_region >> LogBitsPerWord), _cards_per_region);
}
inline bool G1CardLiveData::is_card_live_at(BitMap::idx_t idx) const {

View File

@ -110,8 +110,7 @@ void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
_bmStartWord = heap.start();
_bmWordSize = heap.word_size();
_bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
_bm.set_size(_bmWordSize >> _shifter);
_bm = BitMapView((BitMap::bm_word_t*) storage->reserved().start(), _bmWordSize >> _shifter);
storage->set_mapping_changed_listener(&_listener);
}

View File

@ -59,10 +59,10 @@ class G1CMIsAliveClosure: public BoolObjectClosure {
class G1CMBitMapRO VALUE_OBJ_CLASS_SPEC {
protected:
HeapWord* _bmStartWord; // base address of range covered by map
size_t _bmWordSize; // map size (in #HeapWords covered)
const int _shifter; // map to char or bit
BitMap _bm; // the bit map itself
HeapWord* _bmStartWord; // base address of range covered by map
size_t _bmWordSize; // map size (in #HeapWords covered)
const int _shifter; // map to char or bit
BitMapView _bm; // the bit map itself
public:
// constructor

View File

@ -75,19 +75,15 @@ void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t
vmassert(_committed.size() == 0, "virtual space initialized more than once");
BitMap::idx_t size_in_pages = rs.size() / page_size;
_committed.resize(size_in_pages, /* in_resource_area */ false);
_committed.initialize(size_in_pages);
if (_special) {
_dirty.resize(size_in_pages, /* in_resource_area */ false);
_dirty.initialize(size_in_pages);
}
_tail_size = used_size % _page_size;
}
G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
release();
}
void G1PageBasedVirtualSpace::release() {
// This does not release memory it never reserved.
// Caller must release via rs.release();
_low_boundary = NULL;
@ -96,8 +92,6 @@ void G1PageBasedVirtualSpace::release() {
_executable = false;
_page_size = 0;
_tail_size = 0;
_committed.resize(0, false);
_dirty.resize(0, false);
}
size_t G1PageBasedVirtualSpace::committed_size() const {

View File

@ -57,13 +57,13 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
size_t _page_size;
// Bitmap used for verification of commit/uncommit operations.
BitMap _committed;
CHeapBitMap _committed;
// Bitmap used to keep track of which pages are dirty or not for _special
// spaces. This is needed because for those spaces the underlying memory
// will only be zero filled the first time it is committed. Calls to commit
// will use this bitmap and return whether or not the memory is zero filled.
BitMap _dirty;
CHeapBitMap _dirty;
// Indicates that the entire space has been committed and pinned in memory,
// os::commit_memory() or os::uncommit_memory() have no function.
@ -139,8 +139,6 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
return x;
}
void release();
void check_for_contiguity() PRODUCT_RETURN;
// Debugging

View File

@ -34,11 +34,12 @@ G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
size_t used_size,
size_t page_size,
size_t region_granularity,
size_t commit_factor,
MemoryType type) :
_storage(rs, used_size, page_size),
_region_granularity(region_granularity),
_listener(NULL),
_commit_map() {
_commit_map(rs.size() * commit_factor / region_granularity) {
guarantee(is_power_of_2(page_size), "must be");
guarantee(is_power_of_2(region_granularity), "must be");
@ -59,11 +60,10 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
size_t alloc_granularity,
size_t commit_factor,
MemoryType type) :
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type),
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
_pages_per_region(alloc_granularity / (page_size * commit_factor)) {
guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity");
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
}
virtual void commit_regions(uint start_idx, size_t num_regions) {
@ -103,12 +103,11 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
size_t alloc_granularity,
size_t commit_factor,
MemoryType type) :
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type),
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
_regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {
guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
_refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size);
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
}
virtual void commit_regions(uint start_idx, size_t num_regions) {

View File

@ -49,9 +49,9 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
size_t _region_granularity;
// Mapping management
BitMap _commit_map;
CHeapBitMap _commit_map;
G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, MemoryType type);
G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type);
void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled);
public:
@ -62,9 +62,7 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
void set_mapping_changed_listener(G1MappingChangedListener* listener) { _listener = listener; }
virtual ~G1RegionToSpaceMapper() {
_commit_map.resize(0, /* in_resource_area */ false);
}
virtual ~G1RegionToSpaceMapper() {}
bool is_committed(uintptr_t idx) const {
return _commit_map.at(idx);

View File

@ -51,8 +51,7 @@ void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
MemRegion reserved = heap_storage->reserved();
_regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
_available_map.resize(_regions.length(), false);
_available_map.clear();
_available_map.initialize(_regions.length());
}
bool HeapRegionManager::is_available(uint region) const {

View File

@ -83,7 +83,7 @@ class HeapRegionManager: public CHeapObj<mtGC> {
// Each bit in this bitmap indicates that the corresponding region is available
// for allocation.
BitMap _available_map;
CHeapBitMap _available_map;
// The number of regions committed in the heap.
uint _num_committed;

View File

@ -43,7 +43,7 @@ class PerRegionTable: public CHeapObj<mtGC> {
friend class HeapRegionRemSetIterator;
HeapRegion* _hr;
BitMap _bm;
CHeapBitMap _bm;
jint _occupied;
// next pointer for free/allocated 'all' list
@ -69,7 +69,7 @@ protected:
PerRegionTable(HeapRegion* hr) :
_hr(hr),
_occupied(0),
_bm(HeapRegion::CardsPerRegion, false /* in-resource-area */),
_bm(HeapRegion::CardsPerRegion),
_collision_list_next(NULL), _next(NULL), _prev(NULL)
{}
@ -259,8 +259,7 @@ size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
_g1h(G1CollectedHeap::heap()),
_hr(hr), _m(m),
_coarse_map(G1CollectedHeap::heap()->max_regions(),
false /* in-resource-area */),
_coarse_map(G1CollectedHeap::heap()->max_regions()),
_fine_grain_regions(NULL),
_first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
_n_fine_entries(0), _n_coarse_entries(0),

View File

@ -79,7 +79,7 @@ class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
HeapRegion* _hr;
// These are protected by "_m".
BitMap _coarse_map;
CHeapBitMap _coarse_map;
size_t _n_coarse_entries;
static jint _n_coarsenings;

View File

@ -59,10 +59,8 @@ ParMarkBitMap::initialize(MemRegion covered_region)
_region_start = covered_region.start();
_region_size = covered_region.word_size();
BitMap::bm_word_t* map = (BitMap::bm_word_t*)_virtual_space->reserved_low_addr();
_beg_bits.set_map(map);
_beg_bits.set_size(bits / 2);
_end_bits.set_map(map + words / 2);
_end_bits.set_size(bits / 2);
_beg_bits = BitMapView(map, bits / 2);
_end_bits = BitMapView(map + words / 2, bits / 2);
return true;
}

View File

@ -182,8 +182,8 @@ private:
HeapWord* _region_start;
size_t _region_size;
BitMap _beg_bits;
BitMap _end_bits;
BitMapView _beg_bits;
BitMapView _end_bits;
PSVirtualSpace* _virtual_space;
size_t _reserved_byte_size;
};

View File

@ -1929,7 +1929,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
ParCompactionManager* const cm =
ParCompactionManager::manager_array(int(i));
assert(cm->marking_stack()->is_empty(), "should be empty");
assert(cm->region_stack()->is_empty(), "should be empty");
assert(cm->region_stack()->is_empty(), "Region stack " SIZE_FORMAT " is not empty", i);
}
#endif // ASSERT
@ -2370,10 +2370,8 @@ void PSParallelCompact::enqueue_region_stealing_tasks(
// Once a thread has drained it's stack, it should try to steal regions from
// other threads.
if (parallel_gc_threads > 1) {
for (uint j = 0; j < parallel_gc_threads; j++) {
q->enqueue(new StealRegionCompactionTask(terminator_ptr));
}
for (uint j = 0; j < parallel_gc_threads; j++) {
q->enqueue(new StealRegionCompactionTask(terminator_ptr));
}
}

View File

@ -378,11 +378,10 @@ void CellTypeState::print(outputStream *os) {
// Basicblock handling methods
//
void GenerateOopMap ::initialize_bb() {
void GenerateOopMap::initialize_bb() {
_gc_points = 0;
_bb_count = 0;
_bb_hdr_bits.clear();
_bb_hdr_bits.resize(method()->code_size());
_bb_hdr_bits.reinitialize(method()->code_size());
}
void GenerateOopMap::bb_mark_fct(GenerateOopMap *c, int bci, int *data) {
@ -1041,13 +1040,7 @@ void GenerateOopMap::update_basic_blocks(int bci, int delta,
assert(new_method_size >= method()->code_size() + delta,
"new method size is too small");
BitMap::bm_word_t* new_bb_hdr_bits =
NEW_RESOURCE_ARRAY(BitMap::bm_word_t,
BitMap::word_align_up(new_method_size));
_bb_hdr_bits.set_map(new_bb_hdr_bits);
_bb_hdr_bits.set_size(new_method_size);
_bb_hdr_bits.clear();
_bb_hdr_bits.reinitialize(new_method_size);
for(int k = 0; k < _bb_count; k++) {
if (_basic_blocks[k]._bci > bci) {

View File

@ -350,7 +350,7 @@ class GenerateOopMap VALUE_OBJ_CLASS_SPEC {
BasicBlock * _basic_blocks; // Array of basicblock info
int _gc_points;
int _bb_count;
BitMap _bb_hdr_bits;
ResourceBitMap _bb_hdr_bits;
// Basicblocks methods
void initialize_bb ();

View File

@ -168,7 +168,7 @@ class Parse : public GraphKit {
// Use init_node/init_graph to initialize Blocks.
// Block() : _live_locals((uintptr_t*)NULL,0) { ShouldNotReachHere(); }
Block() : _live_locals(NULL,0) { ShouldNotReachHere(); }
Block() : _live_locals() { ShouldNotReachHere(); }
public:

View File

@ -261,7 +261,7 @@ void Parse::load_interpreter_state(Node* osr_buf) {
Node *locals_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals-1)*wordSize);
// find all the locals that the interpreter thinks contain live oops
const BitMap live_oops = method()->live_local_oops_at_bci(osr_bci());
const ResourceBitMap live_oops = method()->live_local_oops_at_bci(osr_bci());
for (index = 0; index < max_locals; index++) {
if (!live_locals.at(index)) {

View File

@ -1762,6 +1762,21 @@ methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
return callee_method;
}
address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
// The faulting unsafe accesses should be changed to throw the error
// synchronously instead. Meanwhile the faulting instruction will be
// skipped over (effectively turning it into a no-op) and an
// asynchronous exception will be raised which the thread will
// handle at a later point. If the instruction is a load it will
// return garbage.
// Request an async exception.
thread->set_pending_unsafe_access_error();
// Return address of next instruction to execute.
return next_pc;
}
#ifdef ASSERT
void SharedRuntime::check_member_name_argument_is_last_argument(const methodHandle& method,
const BasicType* sig_bt,

View File

@ -522,6 +522,8 @@ class SharedRuntime: AllStatic {
static address handle_wrong_method_abstract(JavaThread* thread);
static address handle_wrong_method_ic_miss(JavaThread* thread);
static address handle_unsafe_access(JavaThread* thread, address next_pc);
#ifndef PRODUCT
// Collect and print inline cache miss statistics

View File

@ -55,7 +55,6 @@ address StubRoutines::_throw_IncompatibleClassChangeError_entry = NULL;
address StubRoutines::_throw_NullPointerException_at_call_entry = NULL;
address StubRoutines::_throw_StackOverflowError_entry = NULL;
address StubRoutines::_throw_delayed_StackOverflowError_entry = NULL;
address StubRoutines::_handler_for_unsafe_access_entry = NULL;
jint StubRoutines::_verify_oop_count = 0;
address StubRoutines::_verify_oop_subroutine_entry = NULL;
address StubRoutines::_atomic_xchg_entry = NULL;

View File

@ -111,7 +111,6 @@ class StubRoutines: AllStatic {
static address _throw_NullPointerException_at_call_entry;
static address _throw_StackOverflowError_entry;
static address _throw_delayed_StackOverflowError_entry;
static address _handler_for_unsafe_access_entry;
static address _atomic_xchg_entry;
static address _atomic_xchg_ptr_entry;
@ -288,10 +287,6 @@ class StubRoutines: AllStatic {
static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_entry; }
static address throw_delayed_StackOverflowError_entry() { return _throw_delayed_StackOverflowError_entry; }
// Exceptions during unsafe access - should throw Java exception rather
// than crash.
static address handler_for_unsafe_access() { return _handler_for_unsafe_access_entry; }
static address atomic_xchg_entry() { return _atomic_xchg_entry; }
static address atomic_xchg_ptr_entry() { return _atomic_xchg_ptr_entry; }
static address atomic_store_entry() { return _atomic_store_entry; }

View File

@ -1594,7 +1594,6 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
declare_type(TenuredGeneration, CardGeneration) \
declare_toplevel_type(GenCollectorPolicy) \
declare_toplevel_type(Space) \
declare_toplevel_type(BitMap) \
declare_type(CompactibleSpace, Space) \
declare_type(ContiguousSpace, CompactibleSpace) \
declare_type(OffsetTableContigSpace, ContiguousSpace) \
@ -2238,6 +2237,9 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
declare_type(Array<Klass*>, MetaspaceObj) \
declare_type(Array<Method*>, MetaspaceObj) \
\
declare_toplevel_type(BitMap) \
declare_type(BitMapView, BitMap) \
\
declare_integer_type(AccessFlags) /* FIXME: wrong type (not integer) */\
declare_toplevel_type(address) /* FIXME: should this be an integer type? */\
declare_integer_type(BasicType) /* FIXME: wrong type (not integer) */\

View File

@ -289,6 +289,7 @@ unsigned int Abstract_VM_Version::nof_parallel_worker_threads(
unsigned int switch_pt) {
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
assert(ParallelGCThreads == 0, "Default ParallelGCThreads is not 0");
unsigned int threads;
// For very large machines, there are diminishing returns
// for large numbers of worker threads. Instead of
// hogging the whole system, use a fraction of the workers for every
@ -296,9 +297,20 @@ unsigned int Abstract_VM_Version::nof_parallel_worker_threads(
// and a chosen fraction of 5/8
// use 8 + (72 - 8) * (5/8) == 48 worker threads.
unsigned int ncpus = (unsigned int) os::active_processor_count();
return (ncpus <= switch_pt) ?
ncpus :
(switch_pt + ((ncpus - switch_pt) * num) / den);
threads = (ncpus <= switch_pt) ?
ncpus :
(switch_pt + ((ncpus - switch_pt) * num) / den);
#ifndef _LP64
// On 32-bit binaries the virtual address space available to the JVM
// is usually limited to 2-3 GB (depends on the platform).
// Do not use up address space with too many threads (stacks and per-thread
// data). Note that x86 apps running on Win64 have 2 stacks per thread.
// GC may more generally scale down threads by max heap size (etc), but the
// consequences of over-provisioning threads are higher on 32-bit JVMS,
// so add hard limit here:
threads = MIN2(threads, (2*switch_pt));
#endif
return threads;
} else {
return ParallelGCThreads;
}

View File

@ -28,13 +28,144 @@
#include "runtime/atomic.inline.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/copy.hpp"
#include "utilities/debug.hpp"
STATIC_ASSERT(sizeof(BitMap::bm_word_t) == BytesPerWord); // "Implementation assumption."
BitMap::BitMap(idx_t size_in_bits, bool in_resource_area) :
_map(NULL), _size(0)
{
resize(size_in_bits, in_resource_area);
typedef BitMap::bm_word_t bm_word_t;
typedef BitMap::idx_t idx_t;
class ResourceBitMapAllocator : StackObj {
public:
bm_word_t* allocate(idx_t size_in_words) const {
return NEW_RESOURCE_ARRAY(bm_word_t, size_in_words);
}
void free(bm_word_t* map, idx_t size_in_words) const {
// Don't free resource allocated arrays.
}
};
class CHeapBitMapAllocator : StackObj {
public:
bm_word_t* allocate(size_t size_in_words) const {
return ArrayAllocator<bm_word_t, mtInternal>::allocate(size_in_words);
}
void free(bm_word_t* map, idx_t size_in_words) const {
ArrayAllocator<bm_word_t, mtInternal>::free(map, size_in_words);
}
};
class ArenaBitMapAllocator : StackObj {
Arena* _arena;
public:
ArenaBitMapAllocator(Arena* arena) : _arena(arena) {}
bm_word_t* allocate(idx_t size_in_words) const {
return (bm_word_t*)_arena->Amalloc(size_in_words * BytesPerWord);
}
void free(bm_word_t* map, idx_t size_in_words) const {
// ArenaBitMaps currently don't free memory.
}
};
template <class Allocator>
BitMap::bm_word_t* BitMap::reallocate(const Allocator& allocator, bm_word_t* old_map, idx_t old_size_in_bits, idx_t new_size_in_bits) {
size_t old_size_in_words = calc_size_in_words(old_size_in_bits);
size_t new_size_in_words = calc_size_in_words(new_size_in_bits);
bm_word_t* map = NULL;
if (new_size_in_words > 0) {
map = allocator.allocate(new_size_in_words);
Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) map,
MIN2(old_size_in_words, new_size_in_words));
if (new_size_in_words > old_size_in_words) {
clear_range_of_words(map, old_size_in_words, new_size_in_words);
}
}
if (old_map != NULL) {
allocator.free(old_map, old_size_in_words);
}
return map;
}
template <class Allocator>
bm_word_t* BitMap::allocate(const Allocator& allocator, idx_t size_in_bits) {
// Reuse reallocate to ensure that the new memory is cleared.
return reallocate(allocator, NULL, 0, size_in_bits);
}
template <class Allocator>
void BitMap::free(const Allocator& allocator, bm_word_t* map, idx_t size_in_bits) {
bm_word_t* ret = reallocate(allocator, map, size_in_bits, 0);
assert(ret == NULL, "Reallocate shouldn't have allocated");
}
template <class Allocator>
void BitMap::resize(const Allocator& allocator, idx_t new_size_in_bits) {
bm_word_t* new_map = reallocate(allocator, map(), size(), new_size_in_bits);
update(new_map, new_size_in_bits);
}
template <class Allocator>
void BitMap::initialize(const Allocator& allocator, idx_t size_in_bits) {
assert(map() == NULL, "precondition");
assert(size() == 0, "precondition");
resize(allocator, size_in_bits);
}
template <class Allocator>
void BitMap::reinitialize(const Allocator& allocator, idx_t new_size_in_bits) {
// Remove previous bits.
resize(allocator, 0);
initialize(allocator, new_size_in_bits);
}
ResourceBitMap::ResourceBitMap(idx_t size_in_bits)
: BitMap(allocate(ResourceBitMapAllocator(), size_in_bits), size_in_bits) {
}
void ResourceBitMap::resize(idx_t new_size_in_bits) {
BitMap::resize(ResourceBitMapAllocator(), new_size_in_bits);
}
void ResourceBitMap::initialize(idx_t size_in_bits) {
BitMap::initialize(ResourceBitMapAllocator(), size_in_bits);
}
void ResourceBitMap::reinitialize(idx_t size_in_bits) {
BitMap::reinitialize(ResourceBitMapAllocator(), size_in_bits);
}
ArenaBitMap::ArenaBitMap(Arena* arena, idx_t size_in_bits)
: BitMap(allocate(ArenaBitMapAllocator(arena), size_in_bits), size_in_bits) {
}
CHeapBitMap::CHeapBitMap(idx_t size_in_bits)
: BitMap(allocate(CHeapBitMapAllocator(), size_in_bits), size_in_bits) {
}
CHeapBitMap::~CHeapBitMap() {
free(CHeapBitMapAllocator(), map(), size());
}
void CHeapBitMap::resize(idx_t new_size_in_bits) {
BitMap::resize(CHeapBitMapAllocator(), new_size_in_bits);
}
void CHeapBitMap::initialize(idx_t size_in_bits) {
BitMap::initialize(CHeapBitMapAllocator(), size_in_bits);
}
void CHeapBitMap::reinitialize(idx_t size_in_bits) {
BitMap::reinitialize(CHeapBitMapAllocator(), size_in_bits);
}
#ifdef ASSERT
@ -49,25 +180,6 @@ void BitMap::verify_range(idx_t beg_index, idx_t end_index) const {
}
#endif // #ifdef ASSERT
void BitMap::resize(idx_t size_in_bits, bool in_resource_area) {
idx_t old_size_in_words = size_in_words();
bm_word_t* old_map = map();
_size = size_in_bits;
idx_t new_size_in_words = size_in_words();
if (in_resource_area) {
_map = NEW_RESOURCE_ARRAY(bm_word_t, new_size_in_words);
Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map,
MIN2(old_size_in_words, new_size_in_words));
} else {
_map = ArrayAllocator<bm_word_t, mtInternal>::reallocate(old_map, old_size_in_words, new_size_in_words);
}
if (new_size_in_words > old_size_in_words) {
clear_range_of_words(old_size_in_words, new_size_in_words);
}
}
void BitMap::pretouch() {
os::pretouch_memory(word_addr(0), word_addr(size()));
}
@ -205,13 +317,6 @@ bool BitMap::par_at_put(idx_t bit, bool value) {
return value ? par_set_bit(bit) : par_clear_bit(bit);
}
void BitMap::at_put_grow(idx_t offset, bool value) {
if (offset >= size()) {
resize(2 * MAX2(size(), offset));
}
at_put(offset, value);
}
void BitMap::at_put_range(idx_t start_offset, idx_t end_offset, bool value) {
if (value) {
set_range(start_offset, end_offset);
@ -532,93 +637,116 @@ void BitMap::print_on(outputStream* st) const {
class TestBitMap : public AllStatic {
const static BitMap::idx_t BITMAP_SIZE = 1024;
static void fillBitMap(BitMap& map) {
template <class ResizableBitMapClass>
static void fillBitMap(ResizableBitMapClass& map) {
map.set_bit(1);
map.set_bit(3);
map.set_bit(17);
map.set_bit(512);
}
static void testResize(bool in_resource_area) {
{
BitMap map(0, in_resource_area);
map.resize(BITMAP_SIZE, in_resource_area);
fillBitMap(map);
BitMap map2(BITMAP_SIZE, in_resource_area);
fillBitMap(map2);
assert(map.is_same(map2), "could be");
}
{
BitMap map(128, in_resource_area);
map.resize(BITMAP_SIZE, in_resource_area);
fillBitMap(map);
BitMap map2(BITMAP_SIZE, in_resource_area);
fillBitMap(map2);
assert(map.is_same(map2), "could be");
}
{
BitMap map(BITMAP_SIZE, in_resource_area);
map.resize(BITMAP_SIZE, in_resource_area);
fillBitMap(map);
BitMap map2(BITMAP_SIZE, in_resource_area);
fillBitMap(map2);
assert(map.is_same(map2), "could be");
}
}
static void testResizeResource() {
template <class ResizableBitMapClass>
static void testResize(BitMap::idx_t start_size) {
ResourceMark rm;
testResize(true);
ResizableBitMapClass map(start_size);
map.resize(BITMAP_SIZE);
fillBitMap(map);
ResizableBitMapClass map2(BITMAP_SIZE);
fillBitMap(map2);
assert(map.is_same(map2), "could be");
}
static void testResizeNonResource() {
const size_t bitmap_bytes = BITMAP_SIZE / BitsPerByte;
template <class ResizableBitMapClass>
static void testResizeGrow() {
testResize<ResizableBitMapClass>(0);
testResize<ResizableBitMapClass>(128);
}
// Test the default behavior
testResize(false);
template <class ResizableBitMapClass>
static void testResizeSame() {
testResize<ResizableBitMapClass>(BITMAP_SIZE);
}
{
// Make sure that AllocatorMallocLimit is larger than our allocation request
// forcing it to call standard malloc()
SizeTFlagSetting fs(ArrayAllocatorMallocLimit, bitmap_bytes * 4);
testResize(false);
}
{
// Make sure that AllocatorMallocLimit is smaller than our allocation request
// forcing it to call mmap() (or equivalent)
SizeTFlagSetting fs(ArrayAllocatorMallocLimit, bitmap_bytes / 4);
testResize(false);
}
template <class ResizableBitMapClass>
static void testResizeShrink() {
testResize<ResizableBitMapClass>(BITMAP_SIZE * 2);
}
static void testResizeGrow() {
testResizeGrow<ResourceBitMap>();
testResizeGrow<CHeapBitMap>();
}
static void testResizeSame() {
testResizeSame<ResourceBitMap>();
testResizeSame<CHeapBitMap>();
}
static void testResizeShrink() {
testResizeShrink<ResourceBitMap>();
testResizeShrink<CHeapBitMap>();
}
static void testResize() {
testResizeGrow();
testResizeSame();
testResizeShrink();
}
template <class InitializableBitMapClass>
static void testInitialize() {
ResourceMark rm;
InitializableBitMapClass map;
map.initialize(BITMAP_SIZE);
fillBitMap(map);
InitializableBitMapClass map2(BITMAP_SIZE);
fillBitMap(map2);
assert(map.is_same(map2), "could be");
}
static void testInitialize() {
testInitialize<ResourceBitMap>();
testInitialize<CHeapBitMap>();
}
template <class ReinitializableBitMapClass>
static void testReinitialize(BitMap::idx_t init_size) {
ResourceMark rm;
ReinitializableBitMapClass map(init_size);
map.reinitialize(BITMAP_SIZE);
fillBitMap(map);
ReinitializableBitMapClass map2(BITMAP_SIZE);
fillBitMap(map2);
assert(map.is_same(map2), "could be");
}
template <class ReinitializableBitMapClass>
static void testReinitialize() {
testReinitialize<ReinitializableBitMapClass>(0);
testReinitialize<ReinitializableBitMapClass>(128);
testReinitialize<ReinitializableBitMapClass>(BITMAP_SIZE);
}
static void testReinitialize() {
testReinitialize<ResourceBitMap>();
}
public:
static void test() {
testResizeResource();
testResizeNonResource();
testResize();
testInitialize();
testReinitialize();
}
};
void TestBitMap_test() {
TestBitMap::test();
}
#endif
BitMap2D::BitMap2D(bm_word_t* map, idx_t size_in_slots, idx_t bits_per_slot)
: _bits_per_slot(bits_per_slot)
, _map(map, size_in_slots * bits_per_slot)
{
}
BitMap2D::BitMap2D(idx_t size_in_slots, idx_t bits_per_slot)
: _bits_per_slot(bits_per_slot)
, _map(size_in_slots * bits_per_slot)
{
}

View File

@ -33,6 +33,16 @@ class BitMapClosure;
// Operations for bitmaps represented as arrays of unsigned integers.
// Bit offsets are numbered from 0 to size-1.
// The "abstract" base BitMap class.
//
// The constructor and destructor are protected to prevent
// creation of BitMap instances outside of the BitMap class.
//
// The BitMap class doesn't use virtual calls on purpose,
// this ensures that we don't get a vtable unnecessarily.
//
// The allocation of the backing storage for the BitMap are handled by
// the subclasses. BitMap doesn't allocate or delete backing storage.
class BitMap VALUE_OBJ_CLASS_SPEC {
friend class BitMap2D;
@ -50,10 +60,6 @@ class BitMap VALUE_OBJ_CLASS_SPEC {
bm_word_t* _map; // First word in bitmap
idx_t _size; // Size of bitmap (in bits)
// Puts the given value at the given offset, using resize() to size
// the bitmap appropriately if needed using factor-of-two expansion.
void at_put_grow(idx_t index, bool value);
protected:
// Return the position of bit within the word that contains it (e.g., if
// bitmap words are 32 bits, return a number 0 <= n <= 31).
@ -97,6 +103,8 @@ class BitMap VALUE_OBJ_CLASS_SPEC {
void set_large_range_of_words (idx_t beg, idx_t end);
void clear_large_range_of_words (idx_t beg, idx_t end);
static void clear_range_of_words(bm_word_t* map, idx_t beg, idx_t end);
// The index of the first full word in a range.
idx_t word_index_round_up(idx_t bit) const;
@ -110,46 +118,69 @@ class BitMap VALUE_OBJ_CLASS_SPEC {
static idx_t num_set_bits(bm_word_t w);
static idx_t num_set_bits_from_table(unsigned char c);
public:
// Allocation Helpers.
// Constructs a bitmap with no map, and size 0.
BitMap() : _map(NULL), _size(0) {}
// Allocates and clears the bitmap memory.
template <class Allocator>
static bm_word_t* allocate(const Allocator&, idx_t size_in_bits);
// Constructs a bitmap with the given map and size.
BitMap(bm_word_t* map, idx_t size_in_bits) :_map(map), _size(size_in_bits) {}
// Reallocates and clears the new bitmap memory.
template <class Allocator>
static bm_word_t* reallocate(const Allocator&, bm_word_t* map, idx_t old_size_in_bits, idx_t new_size_in_bits);
// Constructs an empty bitmap of the given size (that is, this clears the
// new bitmap). Allocates the map array in resource area if
// "in_resource_area" is true, else in the C heap.
BitMap(idx_t size_in_bits, bool in_resource_area = true);
// Free the bitmap memory.
template <class Allocator>
static void free(const Allocator&, bm_word_t* map, idx_t size_in_bits);
// Protected functions, that are used by BitMap sub-classes that support them.
// Resize the backing bitmap memory.
//
// Old bits are transfered to the new memory
// and the extended memory is cleared.
template <class Allocator>
void resize(const Allocator& allocator, idx_t new_size_in_bits);
// Set up and clear the bitmap memory.
//
// Precondition: The bitmap was default constructed and has
// not yet had memory allocated via resize or (re)initialize.
template <class Allocator>
void initialize(const Allocator& allocator, idx_t size_in_bits);
// Set up and clear the bitmap memory.
//
// Can be called on previously initialized bitmaps.
template <class Allocator>
void reinitialize(const Allocator& allocator, idx_t new_size_in_bits);
// Set the map and size.
void set_map(bm_word_t* map) { _map = map; }
void set_size(idx_t size_in_bits) { _size = size_in_bits; }
void update(bm_word_t* map, idx_t size) {
_map = map;
_size = size;
}
// Allocates necessary data structure, either in the resource area
// or in the C heap, as indicated by "in_resource_area."
// Preserves state currently in bit map by copying data.
// Zeros any newly-addressable bits.
// If "in_resource_area" is false, frees the current map.
// (Note that this assumes that all calls to "resize" on the same BitMap
// use the same value for "in_resource_area".)
void resize(idx_t size_in_bits, bool in_resource_area = true);
// Protected constructor and destructor.
BitMap(bm_word_t* map, idx_t size_in_bits) : _map(map), _size(size_in_bits) {}
~BitMap() {}
public:
// Pretouch the entire range of memory this BitMap covers.
void pretouch();
// Accessing
idx_t size() const { return _size; }
idx_t size_in_bytes() const { return size_in_words() * BytesPerWord; }
idx_t size_in_words() const {
return calc_size_in_words(size());
}
static idx_t calc_size_in_words(size_t size_in_bits) {
return word_index(size_in_bits + BitsPerWord - 1);
}
static idx_t calc_size_in_bytes(size_t size_in_bits) {
return calc_size_in_words(size_in_bits) * BytesPerWord;
}
idx_t size() const { return _size; }
idx_t size_in_words() const { return calc_size_in_words(size()); }
idx_t size_in_bytes() const { return calc_size_in_bytes(size()); }
bool at(idx_t index) const {
verify_index(index);
return (*word_addr(index) & bit_mask(index)) != 0;
@ -279,6 +310,88 @@ class BitMap VALUE_OBJ_CLASS_SPEC {
#endif
};
// A concrete implementation of the the "abstract" BitMap class.
//
// The BitMapView is used when the backing storage is managed externally.
class BitMapView : public BitMap {
public:
BitMapView() : BitMap(NULL, 0) {}
BitMapView(bm_word_t* map, idx_t size_in_bits) : BitMap(map, size_in_bits) {}
};
// A BitMap with storage in a ResourceArea.
class ResourceBitMap : public BitMap {
friend class TestBitMap;
public:
ResourceBitMap() : BitMap(NULL, 0) {}
// Clears the bitmap memory.
ResourceBitMap(idx_t size_in_bits);
// Resize the backing bitmap memory.
//
// Old bits are transfered to the new memory
// and the extended memory is cleared.
void resize(idx_t new_size_in_bits);
// Set up and clear the bitmap memory.
//
// Precondition: The bitmap was default constructed and has
// not yet had memory allocated via resize or initialize.
void initialize(idx_t size_in_bits);
// Set up and clear the bitmap memory.
//
// Can be called on previously initialized bitmaps.
void reinitialize(idx_t size_in_bits);
};
// A BitMap with storage in a specific Arena.
class ArenaBitMap : public BitMap {
public:
// Clears the bitmap memory.
ArenaBitMap(Arena* arena, idx_t size_in_bits);
private:
// Don't allow copy or assignment.
ArenaBitMap(const ArenaBitMap&);
ArenaBitMap& operator=(const ArenaBitMap&);
};
// A BitMap with storage in the CHeap.
class CHeapBitMap : public BitMap {
friend class TestBitMap;
private:
// Don't allow copy or assignment, to prevent the
// allocated memory from leaking out to other instances.
CHeapBitMap(const CHeapBitMap&);
CHeapBitMap& operator=(const CHeapBitMap&);
public:
CHeapBitMap() : BitMap(NULL, 0) {}
// Clears the bitmap memory.
CHeapBitMap(idx_t size_in_bits);
~CHeapBitMap();
// Resize the backing bitmap memory.
//
// Old bits are transfered to the new memory
// and the extended memory is cleared.
void resize(idx_t new_size_in_bits);
// Set up and clear the bitmap memory.
//
// Precondition: The bitmap was default constructed and has
// not yet had memory allocated via resize or initialize.
void initialize(idx_t size_in_bits);
// Set up and clear the bitmap memory.
//
// Can be called on previously initialized bitmaps.
void reinitialize(idx_t size_in_bits);
};
// Convenience class wrapping BitMap which provides multiple bits per slot.
class BitMap2D VALUE_OBJ_CLASS_SPEC {
public:
@ -286,8 +399,8 @@ class BitMap2D VALUE_OBJ_CLASS_SPEC {
typedef BitMap::bm_word_t bm_word_t; // Element type of array that
// represents the bitmap.
private:
BitMap _map;
idx_t _bits_per_slot;
ResourceBitMap _map;
idx_t _bits_per_slot;
idx_t bit_index(idx_t slot_index, idx_t bit_within_slot_index) const {
return slot_index * _bits_per_slot + bit_within_slot_index;
@ -299,10 +412,12 @@ class BitMap2D VALUE_OBJ_CLASS_SPEC {
public:
// Construction. bits_per_slot must be greater than 0.
BitMap2D(bm_word_t* map, idx_t size_in_slots, idx_t bits_per_slot);
BitMap2D(idx_t bits_per_slot) :
_map(), _bits_per_slot(bits_per_slot) {}
// Allocates necessary data structure in resource area. bits_per_slot must be greater than 0.
BitMap2D(idx_t size_in_slots, idx_t bits_per_slot);
BitMap2D(idx_t size_in_slots, idx_t bits_per_slot) :
_map(size_in_slots * bits_per_slot), _bits_per_slot(bits_per_slot) {}
idx_t size_in_bits() {
return _map.size();

View File

@ -121,18 +121,18 @@ inline void BitMap::set_range_of_words(idx_t beg, idx_t end) {
for (idx_t i = beg; i < end; ++i) map[i] = ~(bm_word_t)0;
}
inline void BitMap::clear_range_of_words(idx_t beg, idx_t end) {
bm_word_t* map = _map;
inline void BitMap::clear_range_of_words(bm_word_t* map, idx_t beg, idx_t end) {
for (idx_t i = beg; i < end; ++i) map[i] = 0;
}
inline void BitMap::clear_range_of_words(idx_t beg, idx_t end) {
clear_range_of_words(_map, beg, end);
}
inline void BitMap::clear() {
clear_range_of_words(0, size_in_words());
}
inline void BitMap::par_clear_range(idx_t beg, idx_t end, RangeSizeHint hint) {
if (hint == small_range && end - beg == 1) {
par_at_put(beg, false);
@ -359,7 +359,12 @@ inline void BitMap2D::at_put(idx_t slot_index, idx_t bit_within_slot_index, bool
inline void BitMap2D::at_put_grow(idx_t slot_index, idx_t bit_within_slot_index, bool value) {
verify_bit_within_slot_index(bit_within_slot_index);
_map.at_put_grow(bit_index(slot_index, bit_within_slot_index), value);
idx_t bit = bit_index(slot_index, bit_within_slot_index);
if (bit >= _map.size()) {
_map.resize(2 * MAX2(_map.size(), bit));
}
_map.at_put(bit, value);
}
inline void BitMap2D::clear() {

View File

@ -25,6 +25,7 @@
* @test TestShrinkAuxiliaryData00
* @bug 8038423 8061715
* @summary Checks that decommitment occurs for JVM with different
* @ignore 8155957
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
* @requires vm.gc=="G1" | vm.gc=="null"
* @requires vm.opt.AggressiveOpts=="false" | vm.opt.AggressiveOpts=="null"

View File

@ -26,6 +26,7 @@
* @bug 8038423 8061715 8078405
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
* @ignore 8155957
* @requires vm.gc=="G1" | vm.gc=="null"
* @requires vm.opt.AggressiveOpts=="false" | vm.opt.AggressiveOpts=="null"
* @library /testlibrary /test/lib

View File

@ -26,6 +26,7 @@
* @bug 8038423 8061715 8078405
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
* @ignore 8155957
* @requires vm.gc=="G1" | vm.gc=="null"
* @requires vm.opt.AggressiveOpts=="false" | vm.opt.AggressiveOpts=="null"
* @library /testlibrary /test/lib

View File

@ -26,6 +26,7 @@
* @bug 8038423 8061715 8078405
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
* @ignore 8155957
* @requires vm.gc=="G1" | vm.gc=="null"
* @requires vm.opt.AggressiveOpts=="false" | vm.opt.AggressiveOpts=="null"
* @library /testlibrary /test/lib

View File

@ -26,6 +26,7 @@
* @bug 8038423 8061715 8078405
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
* @ignore 8155957
* @requires vm.gc=="G1" | vm.gc=="null"
* @requires vm.opt.AggressiveOpts=="false" | vm.opt.AggressiveOpts=="null"
* @library /testlibrary /test/lib

View File

@ -26,6 +26,7 @@
* @bug 8038423 8061715 8078405
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
* @ignore 8155957
* @requires vm.gc=="G1" | vm.gc=="null"
* @requires vm.opt.AggressiveOpts=="false" | vm.opt.AggressiveOpts=="null"
* @library /testlibrary /test/lib

View File

@ -26,6 +26,7 @@
* @bug 8038423 8061715 8078405
* @summary Checks that decommitment occurs for JVM with different
* G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
* @ignore 8155957
* @requires vm.gc=="G1" | vm.gc=="null"
* @requires vm.opt.AggressiveOpts=="false" | vm.opt.AggressiveOpts=="null"
* @library /testlibrary /test/lib