8324492: Remove Atomic support for OopHandle

Reviewed-by: aboldtch, coleenp
This commit is contained in:
Kim Barrett 2024-01-25 18:35:20 +00:00
parent 12b89cd2ee
commit 39b756a0d1
5 changed files with 81 additions and 71 deletions

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,15 +75,4 @@ public:
oop* ptr_raw() const { return _obj; }
};
// Convert OopHandle to oop*
template<>
struct PrimitiveConversions::Translate<OopHandle> : public std::true_type {
typedef OopHandle Value;
typedef oop* Decayed;
static Decayed decay(Value x) { return x.ptr_raw(); }
static Value recover(Decayed x) { return OopHandle(x); }
};
#endif // SHARE_OOPS_OOPHANDLE_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,8 +41,13 @@
#include "services/gcNotifier.hpp"
#include "utilities/dtrace.hpp"
MemoryManager::MemoryManager(const char* name) :
_num_pools(0), _name(name) {}
MemoryManager::MemoryManager(const char* name)
: _pools(),
_num_pools(0),
_name(name),
_memory_mgr_obj(),
_memory_mgr_obj_initialized(false)
{}
int MemoryManager::add_pool(MemoryPool* pool) {
int index = _num_pools;
@ -56,7 +61,11 @@ int MemoryManager::add_pool(MemoryPool* pool) {
}
bool MemoryManager::is_manager(instanceHandle mh) const {
return mh() == Atomic::load(&_memory_mgr_obj).resolve();
if (Atomic::load_acquire(&_memory_mgr_obj_initialized)) {
return mh() == _memory_mgr_obj.resolve();
} else {
return false;
}
}
MemoryManager* MemoryManager::get_code_cache_memory_manager() {
@ -68,10 +77,10 @@ MemoryManager* MemoryManager::get_metaspace_memory_manager() {
}
instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
// Lazily create the manager object.
// Must do an acquire so as to force ordering of subsequent
// loads from anything _memory_mgr_obj points to or implies.
oop mgr_obj = Atomic::load_acquire(&_memory_mgr_obj).resolve();
if (mgr_obj == nullptr) {
if (!Atomic::load_acquire(&_memory_mgr_obj_initialized)) {
// It's ok for more than one thread to execute the code up to the locked region.
// Extra manager instances will just be gc'ed.
Klass* k = Management::sun_management_ManagementFactoryHelper_klass(CHECK_NULL);
@ -114,34 +123,36 @@ instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
&args,
CHECK_NULL);
instanceOop m = (instanceOop) result.get_oop();
instanceHandle mgr(THREAD, m);
// Verify we didn't get a null manager. If that could happen then we'd
// need to return immediately rather than continuing on and recording the
// manager has been created.
oop m = result.get_oop();
guarantee(m != nullptr, "Manager creation returned null");
instanceHandle mgr(THREAD, (instanceOop)m);
{
// Get lock before setting _memory_mgr_obj
// since another thread may have created the instance
MutexLocker ml(THREAD, Management_lock);
// Allocate global handle outside lock, to avoid any lock nesting issues
// with the Management_lock.
OopHandle mgr_handle(Universe::vm_global(), mgr());
// Check if another thread has created the management object. We reload
// _memory_mgr_obj here because some other thread may have initialized
// it while we were executing the code before the lock.
mgr_obj = Atomic::load(&_memory_mgr_obj).resolve();
if (mgr_obj != nullptr) {
return (instanceOop)mgr_obj;
}
// Get lock since another thread may have created and installed the instance.
MutexLocker ml(THREAD, Management_lock);
// Get the address of the object we created via call_special.
mgr_obj = mgr();
// Use store barrier to make sure the memory accesses associated
// with creating the management object are visible before publishing
// its address. The unlock will publish the store to _memory_mgr_obj
// because it does a release first.
Atomic::release_store(&_memory_mgr_obj, OopHandle(Universe::vm_global(), mgr_obj));
if (Atomic::load(&_memory_mgr_obj_initialized)) {
// Some other thread won the race. Release the handle we allocated and
// use the other one. Relaxed load is sufficient because flag update is
// under the lock.
mgr_handle.release(Universe::vm_global());
} else {
// Record the object we created via call_special.
assert(_memory_mgr_obj.is_empty(), "already set manager obj");
_memory_mgr_obj = mgr_handle;
// Record manager has been created. Release matching unlocked acquire,
// to safely publish the manager object.
Atomic::release_store(&_memory_mgr_obj_initialized, true);
}
}
return (instanceOop)mgr_obj;
return (instanceOop)_memory_mgr_obj.resolve();
}
GCStatInfo::GCStatInfo(int num_pools) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,11 +57,12 @@ private:
const char* _name;
protected:
volatile OopHandle _memory_mgr_obj;
OopHandle _memory_mgr_obj;
volatile bool _memory_mgr_obj_initialized;
public:
MemoryManager(const char* name);
public:
int num_memory_pools() const { return _num_pools; }
MemoryPool* get_memory_pool(int index) {
assert(index >= 0 && index < _num_pools, "Invalid index");

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,11 +63,16 @@ MemoryPool::MemoryPool(const char* name,
_gc_usage_threshold(new ThresholdSupport(support_gc_threshold, support_gc_threshold)),
_usage_sensor(),
_gc_usage_sensor(),
_memory_pool_obj()
_memory_pool_obj(),
_memory_pool_obj_initialized(false)
{}
bool MemoryPool::is_pool(instanceHandle pool) const {
return pool() == Atomic::load(&_memory_pool_obj).resolve();
if (Atomic::load_acquire(&_memory_pool_obj_initialized)) {
return pool() == _memory_pool_obj.resolve();
} else {
return false;
}
}
void MemoryPool::add_manager(MemoryManager* mgr) {
@ -83,10 +88,10 @@ void MemoryPool::add_manager(MemoryManager* mgr) {
// It creates a MemoryPool instance when the first time
// this function is called.
instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
// Lazily create the pool object.
// Must do an acquire so as to force ordering of subsequent
// loads from anything _memory_pool_obj points to or implies.
oop pool_obj = Atomic::load_acquire(&_memory_pool_obj).resolve();
if (pool_obj == nullptr) {
if (!Atomic::load_acquire(&_memory_pool_obj_initialized)) {
// It's ok for more than one thread to execute the code up to the locked region.
// Extra pool instances will just be gc'ed.
InstanceKlass* ik = Management::sun_management_ManagementFactoryHelper_klass(CHECK_NULL);
@ -113,33 +118,36 @@ instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
&args,
CHECK_NULL);
instanceOop p = (instanceOop) result.get_oop();
instanceHandle pool(THREAD, p);
// Verify we didn't get a null pool. If that could happen then we'd
// need to return immediately rather than continuing on and recording the
// pool has been created.
oop p = result.get_oop();
guarantee(p != nullptr, "Pool creation returns null");
instanceHandle pool(THREAD, (instanceOop)p);
{
// Get lock since another thread may have create the instance
MutexLocker ml(THREAD, Management_lock);
// Allocate global handle outside lock, to avoid any lock nesting issues
// with the Management_lock.
OopHandle pool_handle(Universe::vm_global(), pool());
// Check if another thread has created the pool. We reload
// _memory_pool_obj here because some other thread may have
// initialized it while we were executing the code before the lock.
pool_obj = Atomic::load(&_memory_pool_obj).resolve();
if (pool_obj != nullptr) {
return (instanceOop)pool_obj;
}
// Get lock since another thread may have created and installed the instance.
MutexLocker ml(THREAD, Management_lock);
// Get the address of the object we created via call_special.
pool_obj = pool();
// Use store barrier to make sure the memory accesses associated
// with creating the pool are visible before publishing its address.
// The unlock will publish the store to _memory_pool_obj because
// it does a release first.
Atomic::release_store(&_memory_pool_obj, OopHandle(Universe::vm_global(), pool_obj));
if (Atomic::load(&_memory_pool_obj_initialized)) {
// Some other thread won the race. Release the handle we allocated and
// use the other one. Relaxed load is sufficient because flag update is
// under the lock.
pool_handle.release(Universe::vm_global());
} else {
// Record the object we created via call_special.
assert(_memory_pool_obj.is_empty(), "already set pool obj");
_memory_pool_obj = pool_handle;
// Record pool has been created. Release matching unlocked acquire, to
// safely publish the pool object.
Atomic::release_store(&_memory_pool_obj_initialized, true);
}
}
return (instanceOop)pool_obj;
return (instanceOop)_memory_pool_obj.resolve();
}
inline static size_t get_max_value(size_t val1, size_t val2) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,7 +73,8 @@ class MemoryPool : public CHeapObj<mtInternal> {
SensorInfo* _usage_sensor;
SensorInfo* _gc_usage_sensor;
volatile OopHandle _memory_pool_obj;
OopHandle _memory_pool_obj;
volatile bool _memory_pool_obj_initialized;
void add_manager(MemoryManager* mgr);