2007-12-01 00:00:00 +00:00
|
|
|
/*
|
2013-02-27 09:40:30 +01:00
|
|
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
2007-12-01 00:00:00 +00:00
|
|
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
|
|
*
|
|
|
|
* This code is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 only, as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* version 2 for more details (a copy is included in the LICENSE file that
|
|
|
|
* accompanied this code).
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License version
|
|
|
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*
|
2010-05-27 19:08:38 -07:00
|
|
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
|
|
* or visit www.oracle.com if you need additional information or have any
|
|
|
|
* questions.
|
2007-12-01 00:00:00 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2010-11-23 13:22:55 -08:00
|
|
|
#ifndef SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
|
|
|
|
#define SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
|
|
|
|
|
2013-02-27 09:40:30 +01:00
|
|
|
#include "runtime/atomic.inline.hpp"
|
2010-11-23 13:22:55 -08:00
|
|
|
#include "runtime/os.hpp"
|
|
|
|
|
2007-12-01 00:00:00 +00:00
|
|
|
// Explicit C-heap memory management
|
|
|
|
|
|
|
|
void trace_heap_malloc(size_t size, const char* name, void *p);
|
|
|
|
void trace_heap_free(void *p);
|
|
|
|
|
2011-02-10 19:34:48 -08:00
|
|
|
#ifndef PRODUCT
|
2011-02-07 10:34:39 -08:00
|
|
|
// Increments unsigned long value for statistics (not atomic on MP).
|
|
|
|
inline void inc_stat_counter(volatile julong* dest, julong add_value) {
|
2011-02-10 19:34:48 -08:00
|
|
|
#if defined(SPARC) || defined(X86)
|
|
|
|
// Sparc and X86 have atomic jlong (8 bytes) instructions
|
2011-02-07 10:34:39 -08:00
|
|
|
julong value = Atomic::load((volatile jlong*)dest);
|
|
|
|
value += add_value;
|
|
|
|
Atomic::store((jlong)value, (volatile jlong*)dest);
|
2011-02-10 19:34:48 -08:00
|
|
|
#else
|
|
|
|
// possible word-tearing during load/store
|
|
|
|
*dest += add_value;
|
|
|
|
#endif
|
2011-02-07 10:34:39 -08:00
|
|
|
}
|
2011-02-10 19:34:48 -08:00
|
|
|
#endif
|
2007-12-01 00:00:00 +00:00
|
|
|
|
|
|
|
// allocate using malloc; will fail if no memory available
|
2012-10-17 17:36:48 +02:00
|
|
|
inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0,
|
|
|
|
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
2012-06-28 17:03:16 -04:00
|
|
|
if (pc == 0) {
|
|
|
|
pc = CURRENT_PC;
|
|
|
|
}
|
|
|
|
char* p = (char*) os::malloc(size, flags, pc);
|
2007-12-01 00:00:00 +00:00
|
|
|
#ifdef ASSERT
|
2012-06-28 17:03:16 -04:00
|
|
|
if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
|
2007-12-01 00:00:00 +00:00
|
|
|
#endif
|
2013-04-30 11:56:52 -07:00
|
|
|
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
|
|
|
|
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
|
|
|
|
}
|
2007-12-01 00:00:00 +00:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2012-10-17 17:36:48 +02:00
|
|
|
inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags,
|
|
|
|
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
|
2012-06-28 17:03:16 -04:00
|
|
|
char* p = (char*) os::realloc(old, size, flags, CURRENT_PC);
|
2007-12-01 00:00:00 +00:00
|
|
|
#ifdef ASSERT
|
2012-06-28 17:03:16 -04:00
|
|
|
if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
|
2007-12-01 00:00:00 +00:00
|
|
|
#endif
|
2013-04-30 11:56:52 -07:00
|
|
|
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
|
|
|
|
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
|
|
|
|
}
|
2007-12-01 00:00:00 +00:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2012-06-28 17:03:16 -04:00
|
|
|
inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) {
|
2007-12-01 00:00:00 +00:00
|
|
|
#ifdef ASSERT
|
|
|
|
if (PrintMallocFree) trace_heap_free(p);
|
|
|
|
#endif
|
2012-06-28 17:03:16 -04:00
|
|
|
os::free(p, memflags);
|
2007-12-01 00:00:00 +00:00
|
|
|
}
|
2010-11-23 13:22:55 -08:00
|
|
|
|
2012-06-28 17:03:16 -04:00
|
|
|
|
|
|
|
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
|
2013-08-29 18:56:29 -04:00
|
|
|
address caller_pc) throw() {
|
2012-06-28 17:03:16 -04:00
|
|
|
void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
|
2013-05-14 09:41:12 -07:00
|
|
|
#ifdef ASSERT
|
2012-06-28 17:03:16 -04:00
|
|
|
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
|
|
|
|
#endif
|
2013-05-14 09:41:12 -07:00
|
|
|
return p;
|
2012-06-28 17:03:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
|
2013-08-29 18:56:29 -04:00
|
|
|
const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
|
2012-10-17 17:36:48 +02:00
|
|
|
void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
|
|
|
|
AllocFailStrategy::RETURN_NULL);
|
2013-05-14 09:41:12 -07:00
|
|
|
#ifdef ASSERT
|
2012-06-28 17:03:16 -04:00
|
|
|
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
|
|
|
|
#endif
|
2013-05-14 09:41:12 -07:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
|
2013-08-29 18:56:29 -04:00
|
|
|
address caller_pc) throw() {
|
2013-05-14 09:41:12 -07:00
|
|
|
return CHeapObj<F>::operator new(size, caller_pc);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
|
2013-08-29 18:56:29 -04:00
|
|
|
const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
|
2013-05-14 09:41:12 -07:00
|
|
|
return CHeapObj<F>::operator new(size, nothrow_constant, caller_pc);
|
2012-06-28 17:03:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
|
2013-05-14 09:41:12 -07:00
|
|
|
FreeHeap(p, F);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <MEMFLAGS F> void CHeapObj<F>::operator delete [](void* p){
|
|
|
|
FreeHeap(p, F);
|
2012-06-28 17:03:16 -04:00
|
|
|
}
|
|
|
|
|
2013-04-08 07:49:28 +02:00
|
|
|
template <class E, MEMFLAGS F>
|
|
|
|
E* ArrayAllocator<E, F>::allocate(size_t length) {
|
|
|
|
assert(_addr == NULL, "Already in use");
|
|
|
|
|
|
|
|
_size = sizeof(E) * length;
|
|
|
|
_use_malloc = _size < ArrayAllocatorMallocLimit;
|
|
|
|
|
|
|
|
if (_use_malloc) {
|
|
|
|
_addr = AllocateHeap(_size, F);
|
|
|
|
if (_addr == NULL && _size >= (size_t)os::vm_allocation_granularity()) {
|
|
|
|
// malloc failed let's try with mmap instead
|
|
|
|
_use_malloc = false;
|
|
|
|
} else {
|
|
|
|
return (E*)_addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int alignment = os::vm_allocation_granularity();
|
|
|
|
_size = align_size_up(_size, alignment);
|
|
|
|
|
2013-05-06 11:15:13 -04:00
|
|
|
_addr = os::reserve_memory(_size, NULL, alignment, F);
|
2013-04-08 07:49:28 +02:00
|
|
|
if (_addr == NULL) {
|
2013-04-30 11:56:52 -07:00
|
|
|
vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
|
2013-04-08 07:49:28 +02:00
|
|
|
}
|
|
|
|
|
2013-06-13 11:16:38 -07:00
|
|
|
os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)");
|
2013-04-08 07:49:28 +02:00
|
|
|
|
|
|
|
return (E*)_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
template<class E, MEMFLAGS F>
|
|
|
|
void ArrayAllocator<E, F>::free() {
|
|
|
|
if (_addr != NULL) {
|
|
|
|
if (_use_malloc) {
|
|
|
|
FreeHeap(_addr, F);
|
|
|
|
} else {
|
|
|
|
os::release_memory(_addr, _size);
|
|
|
|
}
|
|
|
|
_addr = NULL;
|
|
|
|
}
|
|
|
|
}
|
2012-06-28 17:03:16 -04:00
|
|
|
|
2010-11-23 13:22:55 -08:00
|
|
|
#endif // SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
|