8205683: Refactor heap allocation to separate concerns

Reviewed-by: pliden, kbarrett
This commit is contained in:
Erik Österlund 2018-06-28 14:22:28 +02:00
parent 7f3a801cae
commit e4dbe9909f
22 changed files with 638 additions and 528 deletions

View File

@ -1265,10 +1265,10 @@ int java_lang_Class::oop_size(oop java_class) {
return size;
}
void java_lang_Class::set_oop_size(oop java_class, int size) {
void java_lang_Class::set_oop_size(HeapWord* java_class, int size) {
assert(_oop_size_offset != 0, "must be set");
assert(size > 0, "Oop size must be greater than zero, not %d", size);
java_class->int_field_put(_oop_size_offset, size);
*(int*)(((char*)java_class) + _oop_size_offset) = size;
}
int java_lang_Class::static_oop_field_count(oop java_class) {

View File

@ -270,7 +270,7 @@ class java_lang_Class : AllStatic {
static oop module(oop java_class);
static int oop_size(oop java_class);
static void set_oop_size(oop java_class, int size);
static void set_oop_size(HeapWord* java_class, int size);
static int static_oop_field_count(oop java_class);
static void set_static_oop_field_count(oop java_class, int size);

View File

@ -33,6 +33,7 @@
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/gcWhen.hpp"
#include "gc/shared/memAllocator.hpp"
#include "gc/shared/vmGCOperations.hpp"
#include "logging/log.hpp"
#include "memory/metaspace.hpp"
@ -46,6 +47,7 @@
#include "runtime/vmThread.hpp"
#include "services/heapDumper.hpp"
#include "utilities/align.hpp"
#include "utilities/copy.hpp"
class ClassLoaderData;
@ -327,15 +329,6 @@ MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loa
}
#ifndef PRODUCT
void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
if (CheckMemoryInitialization && ZapUnusedHeapArea) {
for (size_t slot = 0; slot < size; slot += 1) {
assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
"Found badHeapWordValue in post-allocation check");
}
}
}
void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
if (CheckMemoryInitialization && ZapUnusedHeapArea) {
for (size_t slot = 0; slot < size; slot += 1) {
@ -346,118 +339,6 @@ void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t siz
}
#endif // PRODUCT
#ifdef ASSERT
void CollectedHeap::check_for_valid_allocation_state() {
Thread *thread = Thread::current();
// How to choose between a pending exception and a potential
// OutOfMemoryError? Don't allow pending exceptions.
// This is a VM policy failure, so how do we exhaustively test it?
assert(!thread->has_pending_exception(),
"shouldn't be allocating with pending exception");
if (StrictSafepointChecks) {
assert(thread->allow_allocation(),
"Allocation done by thread for which allocation is blocked "
"by No_Allocation_Verifier!");
// Allocation of an oop can always invoke a safepoint,
// hence, the true argument
thread->check_for_valid_safepoint_state(true);
}
}
#endif
HeapWord* CollectedHeap::obj_allocate_raw(Klass* klass, size_t size,
bool* gc_overhead_limit_was_exceeded, TRAPS) {
if (UseTLAB) {
HeapWord* result = allocate_from_tlab(klass, size, THREAD);
if (result != NULL) {
return result;
}
}
return allocate_outside_tlab(klass, size, gc_overhead_limit_was_exceeded, THREAD);
}
HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS) {
HeapWord* obj = NULL;
// In assertion mode, check that there was a sampling collector present
// in the stack. This enforces checking that no path is without a sampling
// collector.
// Only check if the sampler could actually sample something in this call path.
assert(!JvmtiExport::should_post_sampled_object_alloc()
|| !JvmtiSampledObjectAllocEventCollector::object_alloc_is_safe_to_sample()
|| THREAD->heap_sampler().sampling_collector_present(),
"Sampling collector not present.");
if (ThreadHeapSampler::enabled()) {
// Try to allocate the sampled object from TLAB, it is possible a sample
// point was put and the TLAB still has space.
obj = THREAD->tlab().allocate_sampled_object(size);
if (obj != NULL) {
return obj;
}
}
ThreadLocalAllocBuffer& tlab = THREAD->tlab();
// Retain tlab and allocate object in shared space if
// the amount free in the tlab is too large to discard.
if (tlab.free() > tlab.refill_waste_limit()) {
tlab.record_slow_allocation(size);
return NULL;
}
// Discard tlab and allocate a new one.
// To minimize fragmentation, the last TLAB may be smaller than the rest.
size_t new_tlab_size = tlab.compute_size(size);
tlab.clear_before_allocation();
if (new_tlab_size == 0) {
return NULL;
}
// Allocate a new TLAB requesting new_tlab_size. Any size
// between minimal and new_tlab_size is accepted.
size_t actual_tlab_size = 0;
size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(size);
obj = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &actual_tlab_size);
if (obj == NULL) {
assert(actual_tlab_size == 0, "Allocation failed, but actual size was updated. min: " SIZE_FORMAT ", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
min_tlab_size, new_tlab_size, actual_tlab_size);
return NULL;
}
assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
p2i(obj), min_tlab_size, new_tlab_size);
AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, THREAD);
if (ZeroTLAB) {
// ..and clear it.
Copy::zero_to_words(obj, actual_tlab_size);
} else {
// ...and zap just allocated object.
#ifdef ASSERT
// Skip mangling the space corresponding to the object header to
// ensure that the returned space is not considered parsable by
// any concurrent GC thread.
size_t hdr_size = oopDesc::header_size();
Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
#endif // ASSERT
}
// Send the thread information about this allocation in case a sample is
// requested.
if (ThreadHeapSampler::enabled()) {
size_t tlab_bytes_since_last_sample = THREAD->tlab().bytes_since_last_sample_point();
THREAD->heap_sampler().check_for_sampling(obj, size, tlab_bytes_since_last_sample);
}
tlab.fill(obj, obj + size, actual_tlab_size);
return obj;
}
size_t CollectedHeap::max_tlab_size() const {
// TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
// This restriction could be removed by enabling filling with multiple arrays.
@ -509,9 +390,8 @@ CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
const size_t len = payload_size * HeapWordSize / sizeof(jint);
assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
// Set the length first for concurrent GC.
((arrayOop)start)->set_length((int)len);
post_allocation_setup_common(Universe::intArrayKlassObj(), start);
ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);
allocator.initialize(start);
DEBUG_ONLY(zap_filler_array(start, words, zap);)
}
@ -524,7 +404,8 @@ CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
fill_with_array(start, words, zap);
} else if (words > 0) {
assert(words == min_fill_size(), "unaligned size");
post_allocation_setup_common(SystemDictionary::Object_klass(), start);
ObjAllocator allocator(SystemDictionary::Object_klass(), words);
allocator.initialize(start);
}
}
@ -566,6 +447,21 @@ HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
return NULL;
}
oop CollectedHeap::obj_allocate(Klass* klass, int size, TRAPS) {
ObjAllocator allocator(klass, size, THREAD);
return allocator.allocate();
}
oop CollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
ObjArrayAllocator allocator(klass, size, length, do_zero, THREAD);
return allocator.allocate();
}
oop CollectedHeap::class_allocate(Klass* klass, int size, TRAPS) {
ClassAllocator allocator(klass, size, THREAD);
return allocator.allocate();
}
void CollectedHeap::ensure_parsability(bool retire_tlabs) {
// The second disjunct in the assertion below makes a concession
// for the start-up verification done while the VM is being

View File

@ -95,6 +95,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
friend class VMStructs;
friend class JVMCIVMStructs;
friend class IsGCActiveMark; // Block structured external access to _is_gc_active
friend class MemAllocator;
private:
#ifdef ASSERT
@ -141,13 +142,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// Reinitialize tlabs before resuming mutators.
virtual void resize_all_tlabs();
// Allocate from the current thread's TLAB, with broken-out slow path.
inline static HeapWord* allocate_from_tlab(Klass* klass, size_t size, TRAPS);
static HeapWord* allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS);
inline static HeapWord* allocate_outside_tlab(Klass* klass, size_t size,
bool* gc_overhead_limit_was_exceeded, TRAPS);
// Raw memory allocation facilities
// The obj and array allocate methods are covers for these methods.
// mem_allocate() should never be
@ -155,29 +149,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
virtual HeapWord* mem_allocate(size_t size,
bool* gc_overhead_limit_was_exceeded) = 0;
// Allocate an uninitialized block of the given size, or returns NULL if
// this is impossible.
inline static HeapWord* common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS);
// Like allocate_init, but the block returned by a successful allocation
// is guaranteed initialized to zeros.
inline static HeapWord* common_mem_allocate_init(Klass* klass, size_t size, TRAPS);
// Helper functions for (VM) allocation.
inline static void post_allocation_setup_common(Klass* klass, HeapWord* obj);
inline static void post_allocation_setup_no_klass_install(Klass* klass,
HeapWord* objPtr);
inline static void post_allocation_setup_obj(Klass* klass, HeapWord* obj, int size);
inline static void post_allocation_setup_array(Klass* klass,
HeapWord* obj, int length);
inline static void post_allocation_setup_class(Klass* klass, HeapWord* obj, int size);
// Clears an allocated object.
inline static void init_obj(HeapWord* obj, size_t size);
// Filler object utilities.
static inline size_t filler_array_hdr_size();
static inline size_t filler_array_min_size();
@ -194,21 +165,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
// Internal allocation methods.
inline static HeapWord* common_allocate_memory(Klass* klass, int size,
void (*post_setup)(Klass*, HeapWord*, int),
int size_for_post, bool init_memory,
TRAPS);
// Internal allocation method for common obj/class/array allocations.
inline static HeapWord* allocate_memory(Klass* klass, int size,
void (*post_setup)(Klass*, HeapWord*, int),
int size_for_post, bool init_memory,
TRAPS);
// Verification functions
virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
PRODUCT_RETURN;
virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
PRODUCT_RETURN;
debug_only(static void check_for_valid_allocation_state();)
@ -328,18 +285,9 @@ class CollectedHeap : public CHeapObj<mtInternal> {
}
GCCause::Cause gc_cause() { return _gc_cause; }
// General obj/array allocation facilities.
inline static oop obj_allocate(Klass* klass, int size, TRAPS);
inline static oop array_allocate(Klass* klass, int size, int length, TRAPS);
inline static oop array_allocate_nozero(Klass* klass, int size, int length, TRAPS);
inline static oop class_allocate(Klass* klass, int size, TRAPS);
// Raw memory allocation. This may or may not use TLAB allocations to satisfy the
// allocation. A GC implementation may override this function to satisfy the allocation
// in any way. But the default is to try a TLAB allocation, and otherwise perform
// mem_allocate.
virtual HeapWord* obj_allocate_raw(Klass* klass, size_t size,
bool* gc_overhead_limit_was_exceeded, TRAPS);
virtual oop obj_allocate(Klass* klass, int size, TRAPS);
virtual oop array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS);
virtual oop class_allocate(Klass* klass, int size, TRAPS);
// Utilities for turning raw memory into filler objects.
//

View File

@ -25,297 +25,9 @@
#ifndef SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
#define SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
#include "classfile/javaClasses.hpp"
#include "gc/shared/allocTracer.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
#include "memory/universe.hpp"
#include "oops/arrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/thread.inline.hpp"
#include "services/lowMemoryDetector.hpp"
#include "utilities/align.hpp"
#include "utilities/copy.hpp"
// Inline allocation implementations.
void CollectedHeap::post_allocation_setup_common(Klass* klass,
HeapWord* obj_ptr) {
post_allocation_setup_no_klass_install(klass, obj_ptr);
oop obj = (oop)obj_ptr;
#if (INCLUDE_G1GC || INCLUDE_CMSGC)
// Need a release store to ensure array/class length, mark word, and
// object zeroing are visible before setting the klass non-NULL, for
// concurrent collectors.
obj->release_set_klass(klass);
#else
obj->set_klass(klass);
#endif
}
void CollectedHeap::post_allocation_setup_no_klass_install(Klass* klass,
HeapWord* obj_ptr) {
oop obj = (oop)obj_ptr;
assert(obj != NULL, "NULL object pointer");
if (UseBiasedLocking && (klass != NULL)) {
obj->set_mark_raw(klass->prototype_header());
} else {
// May be bootstrapping
obj->set_mark_raw(markOopDesc::prototype());
}
}
// Support for jvmti and dtrace
inline void post_allocation_notify(Klass* klass, oop obj, int size) {
// support low memory notifications (no-op if not enabled)
LowMemoryDetector::detect_low_memory_for_collected_pools();
// support for JVMTI VMObjectAlloc event (no-op if not enabled)
JvmtiExport::vm_object_alloc_event_collector(obj);
if (DTraceAllocProbes) {
// support for Dtrace object alloc event (no-op most of the time)
if (klass != NULL && klass->name() != NULL) {
SharedRuntime::dtrace_object_alloc(obj, size);
}
}
}
void CollectedHeap::post_allocation_setup_obj(Klass* klass,
HeapWord* obj_ptr,
int size) {
post_allocation_setup_common(klass, obj_ptr);
oop obj = (oop)obj_ptr;
assert(Universe::is_bootstrapping() ||
!obj->is_array(), "must not be an array");
// notify jvmti and dtrace
post_allocation_notify(klass, obj, size);
}
void CollectedHeap::post_allocation_setup_class(Klass* klass,
HeapWord* obj_ptr,
int size) {
// Set oop_size field before setting the _klass field because a
// non-NULL _klass field indicates that the object is parsable by
// concurrent GC.
oop new_cls = (oop)obj_ptr;
assert(size > 0, "oop_size must be positive.");
java_lang_Class::set_oop_size(new_cls, size);
post_allocation_setup_common(klass, obj_ptr);
assert(Universe::is_bootstrapping() ||
!new_cls->is_array(), "must not be an array");
// notify jvmti and dtrace
post_allocation_notify(klass, new_cls, size);
}
void CollectedHeap::post_allocation_setup_array(Klass* klass,
HeapWord* obj_ptr,
int length) {
// Set array length before setting the _klass field because a
// non-NULL klass field indicates that the object is parsable by
// concurrent GC.
assert(length >= 0, "length should be non-negative");
((arrayOop)obj_ptr)->set_length(length);
post_allocation_setup_common(klass, obj_ptr);
oop new_obj = (oop)obj_ptr;
assert(new_obj->is_array(), "must be an array");
// notify jvmti and dtrace (must be after length is set for dtrace)
post_allocation_notify(klass, new_obj, new_obj->size());
}
HeapWord* CollectedHeap::common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS) {
// Clear unhandled oops for memory allocation. Memory allocation might
// not take out a lock if from tlab, so clear here.
CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();)
if (HAS_PENDING_EXCEPTION) {
NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending"));
return NULL; // caller does a CHECK_0 too
}
bool gc_overhead_limit_was_exceeded = false;
CollectedHeap* heap = Universe::heap();
HeapWord* result = heap->obj_allocate_raw(klass, size, &gc_overhead_limit_was_exceeded, THREAD);
if (result != NULL) {
return result;
}
if (!gc_overhead_limit_was_exceeded) {
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
report_java_out_of_memory("Java heap space");
if (JvmtiExport::should_post_resource_exhausted()) {
JvmtiExport::post_resource_exhausted(
JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
"Java heap space");
}
THROW_OOP_0(Universe::out_of_memory_error_java_heap());
} else {
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
report_java_out_of_memory("GC overhead limit exceeded");
if (JvmtiExport::should_post_resource_exhausted()) {
JvmtiExport::post_resource_exhausted(
JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
"GC overhead limit exceeded");
}
THROW_OOP_0(Universe::out_of_memory_error_gc_overhead_limit());
}
}
HeapWord* CollectedHeap::common_mem_allocate_init(Klass* klass, size_t size, TRAPS) {
HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
init_obj(obj, size);
return obj;
}
HeapWord* CollectedHeap::allocate_from_tlab(Klass* klass, size_t size, TRAPS) {
assert(UseTLAB, "should use UseTLAB");
HeapWord* obj = THREAD->tlab().allocate(size);
if (obj != NULL) {
return obj;
}
// Otherwise...
obj = allocate_from_tlab_slow(klass, size, THREAD);
assert(obj == NULL || !HAS_PENDING_EXCEPTION,
"Unexpected exception, will result in uninitialized storage");
return obj;
}
HeapWord* CollectedHeap::allocate_outside_tlab(Klass* klass, size_t size,
bool* gc_overhead_limit_was_exceeded, TRAPS) {
HeapWord* result = Universe::heap()->mem_allocate(size, gc_overhead_limit_was_exceeded);
if (result == NULL) {
return result;
}
NOT_PRODUCT(Universe::heap()->check_for_non_bad_heap_word_value(result, size));
assert(!HAS_PENDING_EXCEPTION,
"Unexpected exception, will result in uninitialized storage");
size_t size_in_bytes = size * HeapWordSize;
THREAD->incr_allocated_bytes(size_in_bytes);
AllocTracer::send_allocation_outside_tlab(klass, result, size_in_bytes, THREAD);
if (ThreadHeapSampler::enabled()) {
THREAD->heap_sampler().check_for_sampling(result, size_in_bytes);
}
return result;
}
void CollectedHeap::init_obj(HeapWord* obj, size_t size) {
assert(obj != NULL, "cannot initialize NULL object");
const size_t hs = oopDesc::header_size();
assert(size >= hs, "unexpected object size");
((oop)obj)->set_klass_gap(0);
Copy::fill_to_aligned_words(obj + hs, size - hs);
}
HeapWord* CollectedHeap::common_allocate_memory(Klass* klass, int size,
void (*post_setup)(Klass*, HeapWord*, int),
int size_for_post, bool init_memory,
TRAPS) {
HeapWord* obj;
if (init_memory) {
obj = common_mem_allocate_init(klass, size, CHECK_NULL);
} else {
obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
}
post_setup(klass, obj, size_for_post);
return obj;
}
HeapWord* CollectedHeap::allocate_memory(Klass* klass, int size,
void (*post_setup)(Klass*, HeapWord*, int),
int size_for_post, bool init_memory,
TRAPS) {
HeapWord* obj;
assert(JavaThread::current()->heap_sampler().add_sampling_collector(),
"Should never return false.");
if (JvmtiExport::should_post_sampled_object_alloc()) {
HandleMark hm(THREAD);
Handle obj_h;
{
JvmtiSampledObjectAllocEventCollector collector;
obj = common_allocate_memory(klass, size, post_setup, size_for_post,
init_memory, CHECK_NULL);
// If we want to be sampling, protect the allocated object with a Handle
// before doing the callback. The callback is done in the destructor of
// the JvmtiSampledObjectAllocEventCollector.
obj_h = Handle(THREAD, (oop) obj);
}
obj = (HeapWord*) obj_h();
} else {
obj = common_allocate_memory(klass, size, post_setup, size_for_post,
init_memory, CHECK_NULL);
}
assert(JavaThread::current()->heap_sampler().remove_sampling_collector(),
"Should never return false.");
return obj;
}
oop CollectedHeap::obj_allocate(Klass* klass, int size, TRAPS) {
debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = allocate_memory(klass, size, post_allocation_setup_obj,
size, true, CHECK_NULL);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj;
}
oop CollectedHeap::class_allocate(Klass* klass, int size, TRAPS) {
debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = allocate_memory(klass, size, post_allocation_setup_class,
size, true, CHECK_NULL);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj;
}
oop CollectedHeap::array_allocate(Klass* klass,
int size,
int length,
TRAPS) {
debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = allocate_memory(klass, size, post_allocation_setup_array,
length, true, CHECK_NULL);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj;
}
oop CollectedHeap::array_allocate_nozero(Klass* klass,
int size,
int length,
TRAPS) {
debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = allocate_memory(klass, size, post_allocation_setup_array,
length, false, CHECK_NULL);
#ifndef PRODUCT
const size_t hs = oopDesc::header_size()+1;
Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs);
#endif
return (oop)obj;
}
inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr,
HeapWord* end,

View File

@ -275,9 +275,6 @@ HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool is_tlab
HeapWord* GenCollectedHeap::mem_allocate_work(size_t size,
bool is_tlab,
bool* gc_overhead_limit_was_exceeded) {
debug_only(check_for_valid_allocation_state());
assert(no_gc_in_progress(), "Allocation during gc not allowed");
// In general gc_overhead_limit_was_exceeded should be false so
// set it so here and reset it to true only if the gc time
// limit is being exceeded as checked below.

View File

@ -0,0 +1,442 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "gc/shared/allocTracer.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/memAllocator.hpp"
#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
#include "memory/universe.hpp"
#include "oops/arrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/thread.inline.hpp"
#include "services/lowMemoryDetector.hpp"
#include "utilities/align.hpp"
#include "utilities/copy.hpp"
class MemAllocator::Allocation: StackObj {
friend class MemAllocator;
const MemAllocator& _allocator;
Thread* _thread;
oop* _obj_ptr;
bool _overhead_limit_exceeded;
bool _allocated_outside_tlab;
size_t _allocated_tlab_size;
bool _tlab_end_reset_for_sample;
bool check_out_of_memory();
void verify_before();
void verify_after();
void notify_allocation();
void notify_allocation_jvmti_allocation_event();
void notify_allocation_jvmti_sampler();
void notify_allocation_low_memory_detector();
void notify_allocation_jfr_sampler();
void notify_allocation_dtrace_sampler();
void check_for_bad_heap_word_value() const;
#ifdef ASSERT
void check_for_valid_allocation_state() const;
#endif
class PreserveObj;
public:
Allocation(const MemAllocator& allocator, oop* obj_ptr)
: _allocator(allocator),
_thread(Thread::current()),
_obj_ptr(obj_ptr),
_overhead_limit_exceeded(false),
_allocated_outside_tlab(false),
_allocated_tlab_size(0),
_tlab_end_reset_for_sample(false)
{
verify_before();
}
~Allocation() {
if (!check_out_of_memory()) {
verify_after();
notify_allocation();
}
}
oop obj() const { return *_obj_ptr; }
};
class MemAllocator::Allocation::PreserveObj: StackObj {
HandleMark _handle_mark;
Handle _handle;
oop* const _obj_ptr;
public:
PreserveObj(Thread* thread, oop* obj_ptr)
: _handle_mark(thread),
_handle(thread, *obj_ptr),
_obj_ptr(obj_ptr)
{
*obj_ptr = NULL;
}
~PreserveObj() {
*_obj_ptr = _handle();
}
oop operator()() const {
return _handle();
}
};
bool MemAllocator::Allocation::check_out_of_memory() {
Thread* THREAD = _thread;
assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage");
if (obj() != NULL) {
return false;
}
if (!_overhead_limit_exceeded) {
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
report_java_out_of_memory("Java heap space");
if (JvmtiExport::should_post_resource_exhausted()) {
JvmtiExport::post_resource_exhausted(
JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
"Java heap space");
}
THROW_OOP_(Universe::out_of_memory_error_java_heap(), true);
} else {
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
report_java_out_of_memory("GC overhead limit exceeded");
if (JvmtiExport::should_post_resource_exhausted()) {
JvmtiExport::post_resource_exhausted(
JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
"GC overhead limit exceeded");
}
THROW_OOP_(Universe::out_of_memory_error_gc_overhead_limit(), true);
}
}
void MemAllocator::Allocation::verify_before() {
// Clear unhandled oops for memory allocation. Memory allocation might
// not take out a lock if from tlab, so clear here.
Thread* THREAD = _thread;
CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();)
assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending");
debug_only(check_for_valid_allocation_state());
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
}
void MemAllocator::Allocation::verify_after() {
NOT_PRODUCT(check_for_bad_heap_word_value();)
}
void MemAllocator::Allocation::check_for_bad_heap_word_value() const {
MemRegion obj_range = _allocator.obj_memory_range(obj());
HeapWord* addr = obj_range.start();
size_t size = obj_range.word_size();
if (CheckMemoryInitialization && ZapUnusedHeapArea) {
for (size_t slot = 0; slot < size; slot += 1) {
assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
"Found badHeapWordValue in post-allocation check");
}
}
}
#ifdef ASSERT
void MemAllocator::Allocation::check_for_valid_allocation_state() const {
// How to choose between a pending exception and a potential
// OutOfMemoryError? Don't allow pending exceptions.
// This is a VM policy failure, so how do we exhaustively test it?
assert(!_thread->has_pending_exception(),
"shouldn't be allocating with pending exception");
if (StrictSafepointChecks) {
assert(_thread->allow_allocation(),
"Allocation done by thread for which allocation is blocked "
"by No_Allocation_Verifier!");
// Allocation of an oop can always invoke a safepoint,
// hence, the true argument
_thread->check_for_valid_safepoint_state(true);
}
}
#endif
void MemAllocator::Allocation::notify_allocation_jvmti_sampler() {
// support for JVMTI VMObjectAlloc event (no-op if not enabled)
JvmtiExport::vm_object_alloc_event_collector(obj());
if (!ThreadHeapSampler::enabled()) {
// Sampling disabled
return;
}
if (!_allocated_outside_tlab && _allocated_tlab_size == 0 && !_tlab_end_reset_for_sample) {
// Sample if it's a non-TLAB allocation, or a TLAB allocation that either refills the TLAB
// or expands it due to taking a sampler induced slow path.
return;
}
assert(JavaThread::current()->heap_sampler().add_sampling_collector(),
"Should never return false.");
// Only check if the sampler could actually sample something in this path.
assert(!JvmtiExport::should_post_sampled_object_alloc() ||
!JvmtiSampledObjectAllocEventCollector::object_alloc_is_safe_to_sample() ||
_thread->heap_sampler().sampling_collector_present(),
"Sampling collector not present.");
if (JvmtiExport::should_post_sampled_object_alloc()) {
// If we want to be sampling, protect the allocated object with a Handle
// before doing the callback. The callback is done in the destructor of
// the JvmtiSampledObjectAllocEventCollector.
PreserveObj obj_h(_thread, _obj_ptr);
JvmtiSampledObjectAllocEventCollector collector;
size_t size_in_bytes = _allocator._word_size * HeapWordSize;
ThreadLocalAllocBuffer& tlab = _thread->tlab();
size_t bytes_since_last = _allocated_outside_tlab ? 0 : tlab.bytes_since_last_sample_point();
_thread->heap_sampler().check_for_sampling(obj_h(), size_in_bytes, bytes_since_last);
}
assert(JavaThread::current()->heap_sampler().remove_sampling_collector(), "Should never return false.");
if (_tlab_end_reset_for_sample || _allocated_tlab_size != 0) {
_thread->tlab().set_sample_end();
}
}
void MemAllocator::Allocation::notify_allocation_low_memory_detector() {
// support low memory notifications (no-op if not enabled)
LowMemoryDetector::detect_low_memory_for_collected_pools();
}
void MemAllocator::Allocation::notify_allocation_jfr_sampler() {
HeapWord* mem = (HeapWord*)obj();
size_t size_in_bytes = _allocator._word_size * HeapWordSize;
if (_allocated_outside_tlab) {
AllocTracer::send_allocation_outside_tlab(_allocator._klass, mem, size_in_bytes, _thread);
} else if (_allocated_tlab_size != 0) {
// TLAB was refilled
AllocTracer::send_allocation_in_new_tlab(_allocator._klass, mem, _allocated_tlab_size * HeapWordSize,
size_in_bytes, _thread);
}
}
void MemAllocator::Allocation::notify_allocation_dtrace_sampler() {
if (DTraceAllocProbes) {
// support for Dtrace object alloc event (no-op most of the time)
Klass* klass = _allocator._klass;
size_t word_size = _allocator._word_size;
if (klass != NULL && klass->name() != NULL) {
SharedRuntime::dtrace_object_alloc(obj(), (int)word_size);
}
}
}
void MemAllocator::Allocation::notify_allocation() {
notify_allocation_low_memory_detector();
notify_allocation_jfr_sampler();
notify_allocation_dtrace_sampler();
notify_allocation_jvmti_sampler();
}
HeapWord* MemAllocator::allocate_outside_tlab(Allocation& allocation) const {
allocation._allocated_outside_tlab = true;
HeapWord* mem = _heap->mem_allocate(_word_size, &allocation._overhead_limit_exceeded);
if (mem == NULL) {
return mem;
}
NOT_PRODUCT(_heap->check_for_non_bad_heap_word_value(mem, _word_size));
size_t size_in_bytes = _word_size * HeapWordSize;
_thread->incr_allocated_bytes(size_in_bytes);
return mem;
}
HeapWord* MemAllocator::allocate_inside_tlab(Allocation& allocation) const {
assert(UseTLAB, "should use UseTLAB");
// Try allocating from an existing TLAB.
HeapWord* mem = _thread->tlab().allocate(_word_size);
if (mem != NULL) {
return mem;
}
// Try refilling the TLAB and allocating the object in it.
return allocate_inside_tlab_slow(allocation);
}
HeapWord* MemAllocator::allocate_inside_tlab_slow(Allocation& allocation) const {
HeapWord* mem = NULL;
ThreadLocalAllocBuffer& tlab = _thread->tlab();
if (ThreadHeapSampler::enabled()) {
// Try to allocate the sampled object from TLAB, it is possible a sample
// point was put and the TLAB still has space.
tlab.set_back_allocation_end();
mem = tlab.allocate(_word_size);
if (mem != NULL) {
allocation._tlab_end_reset_for_sample = true;
return mem;
}
}
// Retain tlab and allocate object in shared space if
// the amount free in the tlab is too large to discard.
if (tlab.free() > tlab.refill_waste_limit()) {
tlab.record_slow_allocation(_word_size);
return NULL;
}
// Discard tlab and allocate a new one.
// To minimize fragmentation, the last TLAB may be smaller than the rest.
size_t new_tlab_size = tlab.compute_size(_word_size);
tlab.clear_before_allocation();
if (new_tlab_size == 0) {
return NULL;
}
// Allocate a new TLAB requesting new_tlab_size. Any size
// between minimal and new_tlab_size is accepted.
size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(_word_size);
mem = _heap->allocate_new_tlab(min_tlab_size, new_tlab_size, &allocation._allocated_tlab_size);
if (mem == NULL) {
assert(allocation._allocated_tlab_size == 0,
"Allocation failed, but actual size was updated. min: " SIZE_FORMAT
", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
min_tlab_size, new_tlab_size, allocation._allocated_tlab_size);
return NULL;
}
assert(allocation._allocated_tlab_size != 0, "Allocation succeeded but actual size not updated. mem at: "
PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
p2i(mem), min_tlab_size, new_tlab_size);
if (ZeroTLAB) {
// ..and clear it.
Copy::zero_to_words(mem, allocation._allocated_tlab_size);
} else {
// ...and zap just allocated object.
#ifdef ASSERT
// Skip mangling the space corresponding to the object header to
// ensure that the returned space is not considered parsable by
// any concurrent GC thread.
size_t hdr_size = oopDesc::header_size();
Copy::fill_to_words(mem + hdr_size, allocation._allocated_tlab_size - hdr_size, badHeapWordVal);
#endif // ASSERT
}
tlab.fill(mem, mem + _word_size, allocation._allocated_tlab_size);
return mem;
}
HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const {
if (UseTLAB) {
HeapWord* result = allocate_inside_tlab(allocation);
if (result != NULL) {
return result;
}
}
return allocate_outside_tlab(allocation);
}
oop MemAllocator::allocate() const {
oop obj = NULL;
{
Allocation allocation(*this, &obj);
HeapWord* mem = mem_allocate(allocation);
if (mem != NULL) {
obj = initialize(mem);
}
}
return obj;
}
void MemAllocator::mem_clear(HeapWord* mem) const {
assert(mem != NULL, "cannot initialize NULL object");
const size_t hs = oopDesc::header_size();
assert(_word_size >= hs, "unexpected object size");
oopDesc::set_klass_gap(mem, 0);
Copy::fill_to_aligned_words(mem + hs, _word_size - hs);
}
oop MemAllocator::finish(HeapWord* mem) const {
assert(mem != NULL, "NULL object pointer");
if (UseBiasedLocking) {
oopDesc::set_mark_raw(mem, _klass->prototype_header());
} else {
// May be bootstrapping
oopDesc::set_mark_raw(mem, markOopDesc::prototype());
}
// Need a release store to ensure array/class length, mark word, and
// object zeroing are visible before setting the klass non-NULL, for
// concurrent collectors.
oopDesc::release_set_klass(mem, _klass);
return oop(mem);
}
oop ObjAllocator::initialize(HeapWord* mem) const {
mem_clear(mem);
return finish(mem);
}
MemRegion ObjArrayAllocator::obj_memory_range(oop obj) const {
if (_do_zero) {
return MemAllocator::obj_memory_range(obj);
}
ArrayKlass* array_klass = ArrayKlass::cast(_klass);
const size_t hs = arrayOopDesc::header_size(array_klass->element_type());
return MemRegion(((HeapWord*)obj) + hs, _word_size - hs);
}
oop ObjArrayAllocator::initialize(HeapWord* mem) const {
// Set array length before setting the _klass field because a
// non-NULL klass field indicates that the object is parsable by
// concurrent GC.
assert(_length >= 0, "length should be non-negative");
if (_do_zero) {
mem_clear(mem);
}
arrayOopDesc::set_length(mem, _length);
return finish(mem);
}
oop ClassAllocator::initialize(HeapWord* mem) const {
// Set oop_size field before setting the _klass field because a
// non-NULL _klass field indicates that the object is parsable by
// concurrent GC.
assert(_word_size > 0, "oop_size must be positive.");
mem_clear(mem);
java_lang_Class::set_oop_size(mem, (int)_word_size);
return finish(mem);
}

View File

@ -0,0 +1,110 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHARED_MEM_ALLOCATOR_HPP
#define SHARE_GC_SHARED_MEM_ALLOCATOR_HPP
#include "gc/shared/collectedHeap.hpp"
#include "memory/memRegion.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
// These fascilities are used for allocating, and initializing newly allocated objects.
class MemAllocator: StackObj {
class Allocation;
protected:
CollectedHeap* const _heap;
Thread* const _thread;
Klass* const _klass;
const size_t _word_size;
private:
// Allocate from the current thread's TLAB, with broken-out slow path.
HeapWord* allocate_inside_tlab(Allocation& allocation) const;
HeapWord* allocate_inside_tlab_slow(Allocation& allocation) const;
HeapWord* allocate_outside_tlab(Allocation& allocation) const;
protected:
MemAllocator(Klass* klass, size_t word_size, Thread* thread)
: _heap(Universe::heap()),
_thread(thread),
_klass(klass),
_word_size(word_size)
{ }
// This function clears the memory of the object
void mem_clear(HeapWord* mem) const;
// This finish constructing an oop by installing the mark word and the Klass* pointer
// last. At the point when the Klass pointer is initialized, this is a constructed object
// that must be parseable as an oop by concurrent collectors.
oop finish(HeapWord* mem) const;
// Raw memory allocation. This may or may not use TLAB allocations to satisfy the
// allocation. A GC implementation may override this function to satisfy the allocation
// in any way. But the default is to try a TLAB allocation, and otherwise perform
// mem_allocate.
virtual HeapWord* mem_allocate(Allocation& allocation) const;
virtual MemRegion obj_memory_range(oop obj) const {
return MemRegion((HeapWord*)obj, _word_size);
}
public:
oop allocate() const;
virtual oop initialize(HeapWord* mem) const = 0;
};
class ObjAllocator: public MemAllocator {
public:
ObjAllocator(Klass* klass, size_t word_size, Thread* thread = Thread::current())
: MemAllocator(klass, word_size, thread) {}
virtual oop initialize(HeapWord* mem) const;
};
class ObjArrayAllocator: public MemAllocator {
const int _length;
const bool _do_zero;
protected:
virtual MemRegion obj_memory_range(oop obj) const;
public:
ObjArrayAllocator(Klass* klass, size_t word_size, int length, bool do_zero,
Thread* thread = Thread::current())
: MemAllocator(klass, word_size, thread),
_length(length),
_do_zero(do_zero) {}
virtual oop initialize(HeapWord* mem) const;
};
class ClassAllocator: public MemAllocator {
public:
ClassAllocator(Klass* klass, size_t word_size, Thread* thread = Thread::current())
: MemAllocator(klass, word_size, thread) {}
virtual oop initialize(HeapWord* mem) const;
};
#endif // SHARE_GC_SHARED_MEM_ALLOCATOR_HPP

View File

@ -185,10 +185,6 @@ void ThreadLocalAllocBuffer::fill(HeapWord* start,
initialize(start, top, start + new_size - alignment_reserve());
if (ThreadHeapSampler::enabled()) {
set_sample_end();
}
// Reset amount of internal fragmentation
set_refill_waste_limit(initial_refill_waste_limit());
}
@ -325,14 +321,14 @@ void ThreadLocalAllocBuffer::verify() {
void ThreadLocalAllocBuffer::set_sample_end() {
size_t heap_words_remaining = pointer_delta(_end, _top);
size_t bytes_until_sample = myThread()->heap_sampler().bytes_until_sample();
size_t words_until_sample = bytes_until_sample / HeapWordSize;;
size_t words_until_sample = bytes_until_sample / HeapWordSize;
if (heap_words_remaining > words_until_sample) {
HeapWord* new_end = _top + words_until_sample;
set_end(new_end);
_bytes_since_last_sample_point = bytes_until_sample;
} else {
_bytes_since_last_sample_point = heap_words_remaining * HeapWordSize;;
_bytes_since_last_sample_point = heap_words_remaining * HeapWordSize;
}
}
@ -346,18 +342,6 @@ void ThreadLocalAllocBuffer::set_back_allocation_end() {
_end = _allocation_end;
}
HeapWord* ThreadLocalAllocBuffer::allocate_sampled_object(size_t size) {
set_back_allocation_end();
HeapWord* result = allocate(size);
if (result) {
myThread()->heap_sampler().check_for_sampling(result, size * HeapWordSize, _bytes_since_last_sample_point);
set_sample_end();
}
return result;
}
HeapWord* ThreadLocalAllocBuffer::hard_end() {
return _allocation_end + alignment_reserve();
}

View File

@ -139,7 +139,6 @@ public:
// Allocate size HeapWords. The memory is NOT initialized to zero.
inline HeapWord* allocate(size_t size);
HeapWord* allocate_sampled_object(size_t size);
// Reserve space at the end of TLAB
static size_t end_reserve() {

View File

@ -142,8 +142,8 @@ objArrayOop ArrayKlass::allocate_arrayArray(int n, int length, TRAPS) {
int size = objArrayOopDesc::object_size(length);
Klass* k = array_klass(n+dimension(), CHECK_0);
ArrayKlass* ak = ArrayKlass::cast(k);
objArrayOop o =
(objArrayOop)CollectedHeap::array_allocate(ak, size, length, CHECK_0);
objArrayOop o = (objArrayOop)Universe::heap()->array_allocate(ak, size, length,
/* do_zero */ true, CHECK_0);
// initialization to NULL not necessary, area already cleared
return o;
}

View File

@ -109,7 +109,10 @@ class arrayOopDesc : public oopDesc {
return *(int*)(((intptr_t)this) + length_offset_in_bytes());
}
void set_length(int length) {
*(int*)(((intptr_t)this) + length_offset_in_bytes()) = length;
set_length((HeapWord*)this, length);
}
static void set_length(HeapWord* mem, int length) {
*(int*)(((char*)mem) + length_offset_in_bytes()) = length;
}
// Should only be called with constants as argument

View File

@ -1184,8 +1184,8 @@ objArrayOop InstanceKlass::allocate_objArray(int n, int length, TRAPS) {
}
int size = objArrayOopDesc::object_size(length);
Klass* ak = array_klass(n, CHECK_NULL);
objArrayOop o =
(objArrayOop)CollectedHeap::array_allocate(ak, size, length, CHECK_NULL);
objArrayOop o = (objArrayOop)Universe::heap()->array_allocate(ak, size, length,
/* do_zero */ true, CHECK_NULL);
return o;
}
@ -1210,7 +1210,7 @@ instanceOop InstanceKlass::allocate_instance(TRAPS) {
instanceOop i;
i = (instanceOop)CollectedHeap::obj_allocate(this, size, CHECK_NULL);
i = (instanceOop)Universe::heap()->obj_allocate(this, size, CHECK_NULL);
if (has_finalizer_flag && !RegisterFinalizersAtInit) {
i = register_finalizer(i, CHECK_NULL);
}

View File

@ -52,7 +52,7 @@ instanceOop InstanceMirrorKlass::allocate_instance(Klass* k, TRAPS) {
// Since mirrors can be variable sized because of the static fields, store
// the size in the mirror itself.
return (instanceOop)CollectedHeap::class_allocate(this, size, CHECK_NULL);
return (instanceOop)Universe::heap()->class_allocate(this, size, CHECK_NULL);
}
int InstanceMirrorKlass::oop_size(oop obj) const {

View File

@ -173,7 +173,8 @@ objArrayOop ObjArrayKlass::allocate(int length, TRAPS) {
if (length >= 0) {
if (length <= arrayOopDesc::max_array_length(T_OBJECT)) {
int size = objArrayOopDesc::object_size(length);
return (objArrayOop)CollectedHeap::array_allocate(this, size, length, THREAD);
return (objArrayOop)Universe::heap()->array_allocate(this, size, length,
/* do_zero */ true, THREAD);
} else {
report_java_out_of_memory("Requested array size exceeds VM limit");
JvmtiExport::post_array_size_exhausted();

View File

@ -69,6 +69,7 @@ class oopDesc {
inline void set_mark(volatile markOop m);
inline void set_mark_raw(volatile markOop m);
static inline void set_mark_raw(HeapWord* mem, markOop m);
inline void release_set_mark(markOop m);
inline markOop cas_set_mark(markOop new_mark, markOop old_mark);
@ -82,15 +83,18 @@ class oopDesc {
inline Klass* klass() const;
inline Klass* klass_or_null() const volatile;
inline Klass* klass_or_null_acquire() const volatile;
static inline Klass** klass_addr(HeapWord* mem);
static inline narrowKlass* compressed_klass_addr(HeapWord* mem);
inline Klass** klass_addr();
inline narrowKlass* compressed_klass_addr();
inline void set_klass(Klass* k);
inline void release_set_klass(Klass* k);
static inline void release_set_klass(HeapWord* mem, Klass* klass);
// For klass field compression
inline int klass_gap() const;
inline void set_klass_gap(int z);
static inline void set_klass_gap(HeapWord* mem, int z);
// For when the klass pointer is being used as a linked list "next" field.
inline void set_klass_to_list_ptr(oop k);
inline oop list_ptr_from_klass();

View File

@ -63,6 +63,10 @@ void oopDesc::set_mark_raw(volatile markOop m) {
_mark = m;
}
void oopDesc::set_mark_raw(HeapWord* mem, markOop m) {
*(markOop*)(((char*)mem) + mark_offset_in_bytes()) = m;
}
void oopDesc::release_set_mark(markOop m) {
HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m);
}
@ -110,16 +114,26 @@ Klass* oopDesc::klass_or_null_acquire() const volatile {
}
}
Klass** oopDesc::klass_addr() {
Klass** oopDesc::klass_addr(HeapWord* mem) {
// Only used internally and with CMS and will not work with
// UseCompressedOops
assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
return (Klass**) &_metadata._klass;
ByteSize offset = byte_offset_of(oopDesc, _metadata._klass);
return (Klass**) (((char*)mem) + in_bytes(offset));
}
narrowKlass* oopDesc::compressed_klass_addr(HeapWord* mem) {
assert(UseCompressedClassPointers, "only called by compressed klass pointers");
ByteSize offset = byte_offset_of(oopDesc, _metadata._compressed_klass);
return (narrowKlass*) (((char*)mem) + in_bytes(offset));
}
Klass** oopDesc::klass_addr() {
return klass_addr((HeapWord*)this);
}
narrowKlass* oopDesc::compressed_klass_addr() {
assert(UseCompressedClassPointers, "only called by compressed klass pointers");
return &_metadata._compressed_klass;
return compressed_klass_addr((HeapWord*)this);
}
#define CHECK_SET_KLASS(k) \
@ -137,13 +151,13 @@ void oopDesc::set_klass(Klass* k) {
}
}
void oopDesc::release_set_klass(Klass* k) {
CHECK_SET_KLASS(k);
void oopDesc::release_set_klass(HeapWord* mem, Klass* klass) {
CHECK_SET_KLASS(klass);
if (UseCompressedClassPointers) {
OrderAccess::release_store(compressed_klass_addr(),
Klass::encode_klass_not_null(k));
OrderAccess::release_store(compressed_klass_addr(mem),
Klass::encode_klass_not_null(klass));
} else {
OrderAccess::release_store(klass_addr(), k);
OrderAccess::release_store(klass_addr(mem), klass);
}
}
@ -153,12 +167,16 @@ int oopDesc::klass_gap() const {
return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
}
void oopDesc::set_klass_gap(int v) {
void oopDesc::set_klass_gap(HeapWord* mem, int v) {
if (UseCompressedClassPointers) {
*(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
*(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
}
}
void oopDesc::set_klass_gap(int v) {
set_klass_gap((HeapWord*)this, v);
}
void oopDesc::set_klass_to_list_ptr(oop k) {
// This is only to be used during GC, for from-space objects, so no
// barrier is needed.

View File

@ -102,14 +102,8 @@ typeArrayOop TypeArrayKlass::allocate_common(int length, bool do_zero, TRAPS) {
if (length >= 0) {
if (length <= max_length()) {
size_t size = typeArrayOopDesc::object_size(layout_helper(), length);
typeArrayOop t;
CollectedHeap* ch = Universe::heap();
if (do_zero) {
t = (typeArrayOop)CollectedHeap::array_allocate(this, (int)size, length, CHECK_NULL);
} else {
t = (typeArrayOop)CollectedHeap::array_allocate_nozero(this, (int)size, length, CHECK_NULL);
}
return t;
return (typeArrayOop)Universe::heap()->array_allocate(this, (int)size, length,
do_zero, CHECK_NULL);
} else {
report_java_out_of_memory("Requested array size exceeds VM limit");
JvmtiExport::post_array_size_exhausted();

View File

@ -38,6 +38,7 @@
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/bytecode.hpp"
#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
#include "memory/oopFactory.hpp"
#include "memory/referenceType.hpp"
#include "memory/resourceArea.hpp"
@ -661,9 +662,10 @@ JVM_ENTRY(jobject, JVM_Clone(JNIEnv* env, jobject handle))
oop new_obj_oop = NULL;
if (obj->is_array()) {
const int length = ((arrayOop)obj())->length();
new_obj_oop = CollectedHeap::array_allocate(klass, size, length, CHECK_NULL);
new_obj_oop = Universe::heap()->array_allocate(klass, size, length,
/* do_zero */ true, CHECK_NULL);
} else {
new_obj_oop = CollectedHeap::obj_allocate(klass, size, CHECK_NULL);
new_obj_oop = Universe::heap()->obj_allocate(klass, size, CHECK_NULL);
}
HeapAccess<>::clone(obj(), new_obj_oop, size);

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/atomic.hpp"
#include "runtime/frame.inline.hpp"

View File

@ -116,8 +116,7 @@ void ThreadHeapSampler::pick_next_sample(size_t overflowed_bytes) {
}
}
void ThreadHeapSampler::check_for_sampling(HeapWord* ptr, size_t allocation_size, size_t bytes_since_allocation) {
oopDesc* oop = reinterpret_cast<oopDesc*>(ptr);
void ThreadHeapSampler::check_for_sampling(oop obj, size_t allocation_size, size_t bytes_since_allocation) {
size_t total_allocated_bytes = bytes_since_allocation + allocation_size;
// If not yet time for a sample, skip it.
@ -126,7 +125,7 @@ void ThreadHeapSampler::check_for_sampling(HeapWord* ptr, size_t allocation_size
return;
}
JvmtiExport::sampled_object_alloc_event_collector(oop);
JvmtiExport::sampled_object_alloc_event_collector(obj);
size_t overflow_bytes = total_allocated_bytes - _bytes_until_sample;
pick_next_sample(overflow_bytes);

View File

@ -57,7 +57,7 @@ class ThreadHeapSampler {
size_t bytes_until_sample() { return _bytes_until_sample; }
void set_bytes_until_sample(size_t bytes) { _bytes_until_sample = bytes; }
void check_for_sampling(HeapWord* obj, size_t size_in_bytes, size_t bytes_allocated_before = 0);
void check_for_sampling(oop obj, size_t size_in_bytes, size_t bytes_allocated_before);
static int enabled();
static void enable();