8297766: Remove UseMallocOnly development option

Reviewed-by: coleenp, stuefe, dholmes
This commit is contained in:
Afshin Zafari 2022-12-05 13:31:15 +00:00 committed by Thomas Stuefe
parent b9eec96889
commit bd381886e0
9 changed files with 23 additions and 219 deletions

View File

@ -273,10 +273,6 @@ Arena::~Arena() {
// Destroy this arenas contents and reset to empty
void Arena::destruct_contents() {
if (UseMallocOnly && _first != NULL) {
char* end = _first->next() ? _first->top() : _hwm;
free_malloced_objects(_first, _first->bottom(), end, _hwm);
}
// reset size before chop to avoid a rare racing condition
// that can have total arena memory exceed total chunk memory
set_size_in_bytes(0);
@ -342,19 +338,6 @@ void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFail
assert(old_size == 0, "sanity");
return Amalloc(new_size, alloc_failmode); // as with realloc(3), a NULL old ptr is equivalent to malloc(3)
}
#ifdef ASSERT
if (UseMallocOnly) {
// always allocate a new object (otherwise we'll free this one twice)
char* copy = (char*)Amalloc(new_size, alloc_failmode);
if (copy == NULL) {
return NULL;
}
size_t n = MIN2(old_size, new_size);
if (n > 0) memcpy(copy, old_ptr, n);
Afree(old_ptr,old_size); // Mostly done to keep stats accurate
return copy;
}
#endif
char *c_old = (char*)old_ptr; // Handy name
// Stupid fast special case
if( new_size <= old_size ) { // Shrink in-place
@ -386,24 +369,6 @@ void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFail
// Determine if pointer belongs to this Arena or not.
bool Arena::contains( const void *ptr ) const {
#ifdef ASSERT
if (UseMallocOnly) {
// really slow, but not easy to make fast
if (_chunk == NULL) return false;
char** bottom = (char**)_chunk->bottom();
for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
if (*p == ptr) return true;
}
for (Chunk *c = _first; c != NULL; c = c->next()) {
if (c == _chunk) continue; // current chunk has been processed
char** bottom = (char**)c->bottom();
for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
if (*p == ptr) return true;
}
}
return false;
}
#endif
if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
return true; // Check for in this chunk
for (Chunk *c = _first; c; c = c->next()) {
@ -414,51 +379,3 @@ bool Arena::contains( const void *ptr ) const {
}
return false; // Not in any Chunk, so not in Arena
}
#ifdef ASSERT
void* Arena::malloc(size_t size) {
assert(UseMallocOnly, "shouldn't call");
// use malloc, but save pointer in res. area for later freeing
char** save = (char**)internal_amalloc(sizeof(char*));
return (*save = (char*)os::malloc(size, mtChunk));
}
#endif
//--------------------------------------------------------------------------------------
// Non-product code
#ifndef PRODUCT
// debugging code
inline void Arena::free_all(char** start, char** end) {
for (char** p = start; p < end; p++) if (*p) os::free(*p);
}
void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
assert(UseMallocOnly, "should not call");
// free all objects malloced since resource mark was created; resource area
// contains their addresses
if (chunk->next()) {
// this chunk is full, and some others too
for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
char* top = c->top();
if (c->next() == NULL) {
top = hwm2; // last junk is only used up to hwm2
assert(c->contains(hwm2), "bad hwm2");
}
free_all((char**)c->bottom(), (char**)top);
}
assert(chunk->contains(hwm), "bad hwm");
assert(chunk->contains(max), "bad max");
free_all((char**)hwm, (char**)max);
} else {
// this chunk was partially used
assert(chunk->contains(hwm), "bad hwm");
assert(chunk->contains(hwm2), "bad hwm2");
free_all((char**)hwm, (char**)hwm2);
}
}
#endif // Non-product

View File

@ -125,7 +125,6 @@ protected:
// on both 32 and 64 bit platforms. Required for atomic jlong operations on 32 bits.
void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
x = ARENA_ALIGN(x); // note for 32 bits this should align _hwm as well.
debug_only(if (UseMallocOnly) return malloc(x);)
// Amalloc guarantees 64-bit alignment and we need to ensure that in case the preceding
// allocation was AmallocWords. Only needed on 32-bit - on 64-bit Amalloc and AmallocWords are
// identical.
@ -138,7 +137,6 @@ protected:
// is 4 bytes on 32 bits, hence the name.
void* AmallocWords(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
assert(is_aligned(x, BytesPerWord), "misaligned size");
debug_only(if (UseMallocOnly) return malloc(x);)
return internal_amalloc(x, alloc_failmode);
}
@ -149,7 +147,6 @@ protected:
}
#ifdef ASSERT
if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
if (UseMallocOnly) return true;
#endif
if (((char*)ptr) + size == _hwm) {
_hwm = (char*)ptr;

View File

@ -105,10 +105,6 @@ public:
assert(_nesting > state._nesting, "rollback to inactive mark");
assert((_nesting - state._nesting) == 1, "rollback across another mark");
if (UseMallocOnly) {
free_malloced_objects(state._chunk, state._hwm, state._max, _hwm);
}
if (state._chunk->next() != nullptr) { // Delete later chunks.
// Reset size before deleting chunks. Otherwise, the total
// size could exceed the total chunk size.

View File

@ -32,11 +32,6 @@
inline char* ResourceArea::allocate_bytes(size_t size, AllocFailType alloc_failmode) {
#ifdef ASSERT
verify_has_resource_mark();
if (UseMallocOnly) {
// use malloc, but save pointer in res. area for later freeing
char** save = (char**)internal_amalloc(sizeof(char*));
return (*save = (char*)os::malloc(size, mtThread, CURRENT_PC));
}
#endif // ASSERT
return (char*)Amalloc(size, alloc_failmode);
}

View File

@ -455,10 +455,6 @@ const int ObjectAlignmentInBytes = 8;
notproduct(bool, VerifyCodeCache, false, \
"Verify code cache on memory allocation/deallocation") \
\
develop(bool, UseMallocOnly, false, \
"Use only malloc/free for allocation (no resource area/arena). " \
"Used to help diagnose memory stomping bugs.") \
\
develop(bool, ZapResourceArea, trueInDebug, \
"Zap freed resource/arena space") \
\

View File

@ -196,7 +196,6 @@ class HandleArea: public Arena {
// Handle allocation
private:
oop* real_allocate_handle(oop obj) {
// Ignore UseMallocOnly by allocating only in arena.
oop* handle = (oop*)internal_amalloc(oopSize);
*handle = obj;
return handle;

View File

@ -57,17 +57,13 @@ TEST_VM(Arena, alloc_size_0) {
void* p = ar.Amalloc(0);
ASSERT_NOT_NULL(p);
ASSERT_ALIGN_AMALLOC(p);
if (!UseMallocOnly) {
// contains works differently for malloced mode (and there its broken anyway)
ASSERT_FALSE(ar.contains(p));
}
ASSERT_FALSE(ar.contains(p));
// Allocate again. The new allocations should have the same position as the 0-sized
// first one.
if (!UseMallocOnly) {
void* p2 = ar.Amalloc(1);
ASSERT_AMALLOC(ar, p2);
ASSERT_EQ(p2, p);
}
void* p2 = ar.Amalloc(1);
ASSERT_AMALLOC(ar, p2);
ASSERT_EQ(p2, p);
}
// Test behavior for Arealloc(p, 0)
@ -81,10 +77,8 @@ TEST_VM(Arena, realloc_size_0) {
ASSERT_NULL(p2);
// a subsequent allocation should get the same pointer
if (!UseMallocOnly) {
void* p3 = ar.Amalloc(0x20);
ASSERT_EQ(p3, p1);
}
void* p3 = ar.Amalloc(0x20);
ASSERT_EQ(p3, p1);
}
// Realloc equal sizes is a noop
@ -96,9 +90,7 @@ TEST_VM(Arena, realloc_same_size) {
void* p2 = ar.Arealloc(p1, 0x200, 0x200);
if (!UseMallocOnly) {
ASSERT_EQ(p2, p1);
}
ASSERT_EQ(p2, p1);
ASSERT_RANGE_IS_MARKED(p2, 0x200);
}
@ -157,29 +149,26 @@ TEST_VM(Arena, free_top) {
DEBUG_ONLY(ASSERT_RANGE_IS_MARKED_WITH(p, 0x10, badResourceValue);)
// a subsequent allocation should get the same pointer
if (!UseMallocOnly) {
void* p2 = ar.Amalloc(0x20);
ASSERT_EQ(p2, p);
}
void* p2 = ar.Amalloc(0x20);
ASSERT_EQ(p2, p);
}
// In-place shrinking.
TEST_VM(Arena, realloc_top_shrink) {
if (!UseMallocOnly) {
Arena ar(mtTest);
Arena ar(mtTest);
void* p1 = ar.Amalloc(0x200);
ASSERT_AMALLOC(ar, p1);
GtestUtils::mark_range(p1, 0x200);
void* p1 = ar.Amalloc(0x200);
ASSERT_AMALLOC(ar, p1);
GtestUtils::mark_range(p1, 0x200);
void* p2 = ar.Arealloc(p1, 0x200, 0x100);
ASSERT_EQ(p1, p2);
ASSERT_RANGE_IS_MARKED(p2, 0x100); // realloc should preserve old content
void* p2 = ar.Arealloc(p1, 0x200, 0x100);
ASSERT_EQ(p1, p2);
ASSERT_RANGE_IS_MARKED(p2, 0x100); // realloc should preserve old content
// A subsequent allocation should be placed right after the end of the first, shrunk, allocation
void* p3 = ar.Amalloc(1);
ASSERT_EQ(p3, ((char*)p1) + 0x100);
}
// A subsequent allocation should be placed right after the end of the first, shrunk, allocation
void* p3 = ar.Amalloc(1);
ASSERT_EQ(p3, ((char*)p1) + 0x100);
}
// not-in-place shrinking.
@ -193,9 +182,7 @@ TEST_VM(Arena, realloc_nontop_shrink) {
void* p_other = ar.Amalloc(20); // new top, p1 not top anymore
void* p2 = ar.Arealloc(p1, 200, 100);
if (!UseMallocOnly) {
ASSERT_EQ(p1, p2); // should still shrink in place
}
ASSERT_EQ(p1, p2); // should still shrink in place
ASSERT_RANGE_IS_MARKED(p2, 100); // realloc should preserve old content
}
@ -208,9 +195,7 @@ TEST_VM(Arena, realloc_top_grow) {
GtestUtils::mark_range(p1, 0x10);
void* p2 = ar.Arealloc(p1, 0x10, 0x20);
if (!UseMallocOnly) {
ASSERT_EQ(p1, p2);
}
ASSERT_EQ(p1, p2);
ASSERT_RANGE_IS_MARKED(p2, 0x10); // realloc should preserve old content
}

View File

@ -1,40 +0,0 @@
/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* Note: This runs the Arena portion of the gtests with UseMallocOnly
* (restricted to debug since UseMallocOnly is debug-only)
*/
/* @test
* @bug 8271242
* @summary Run arena tests with UseMallocOnly
* @requires vm.debug
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.xml
* @requires vm.flagless
* @run main/native GTestWrapper --gtest_filter=Arena* -XX:+UseMallocOnly
*/

View File

@ -1,41 +0,0 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8007475
* @summary Test memory stomp in stack map test
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseMallocOnly StackMapFrameTest
*/
public class StackMapFrameTest {
public static void foo() {
Object o = new Object();
}
public static void main(String args[]) {
for (int i = 0; i < 25000; i++) {
foo();
}
}
}