Merge
This commit is contained in:
commit
eb6c04d2f7
hotspot/src
cpu/aarch64/vm
os/posix/vm
os_cpu
aix_ppc/vm
bsd_zero/vm
linux_ppc/vm
linux_s390/vm
linux_sparc/vm
linux_zero/vm
share/vm
@ -3630,6 +3630,12 @@ void MacroAssembler::store_heap_oop_null(Address dst) {
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
/*
|
||||
* g1_write_barrier_pre -- G1GC pre-write barrier for store of new_val at
|
||||
* store_addr.
|
||||
*
|
||||
* Allocates rscratch1
|
||||
*/
|
||||
void MacroAssembler::g1_write_barrier_pre(Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
@ -3645,10 +3651,8 @@ void MacroAssembler::g1_write_barrier_pre(Register obj,
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
assert(pre_val != noreg, "check this code");
|
||||
|
||||
if (obj != noreg)
|
||||
assert_different_registers(obj, pre_val, tmp);
|
||||
assert_different_registers(obj, pre_val, tmp, rscratch1);
|
||||
assert(pre_val != noreg && tmp != noreg, "expecting a register");
|
||||
|
||||
Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
|
||||
SATBMarkQueue::byte_offset_of_active()));
|
||||
@ -3722,12 +3726,22 @@ void MacroAssembler::g1_write_barrier_pre(Register obj,
|
||||
bind(done);
|
||||
}
|
||||
|
||||
/*
|
||||
* g1_write_barrier_post -- G1GC post-write barrier for store of new_val at
|
||||
* store_addr
|
||||
*
|
||||
* Allocates rscratch1
|
||||
*/
|
||||
void MacroAssembler::g1_write_barrier_post(Register store_addr,
|
||||
Register new_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
Register tmp2) {
|
||||
assert(thread == rthread, "must be");
|
||||
assert_different_registers(store_addr, new_val, thread, tmp, tmp2,
|
||||
rscratch1);
|
||||
assert(store_addr != noreg && new_val != noreg && tmp != noreg
|
||||
&& tmp2 != noreg, "expecting a register");
|
||||
|
||||
Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
|
||||
DirtyCardQueue::byte_offset_of_index()));
|
||||
|
@ -2067,7 +2067,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ g1_write_barrier_pre(noreg /* obj */,
|
||||
r0 /* pre_val */,
|
||||
rthread /* thread */,
|
||||
rscratch1 /* tmp */,
|
||||
rscratch2 /* tmp */,
|
||||
true /* tosca_live */,
|
||||
true /* expand_call */);
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
// G1 barrier needs uncompressed oop for region cross check.
|
||||
Register new_val = val;
|
||||
if (UseCompressedOops) {
|
||||
new_val = rscratch1;
|
||||
new_val = rscratch2;
|
||||
__ mov(new_val, val);
|
||||
}
|
||||
__ store_heap_oop(Address(r3, 0), val);
|
||||
|
@ -24,7 +24,6 @@
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "prims/jvm.h"
|
||||
#include "semaphore_posix.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
@ -32,6 +31,11 @@
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/vmError.hpp"
|
||||
|
||||
#ifndef __APPLE__
|
||||
// POSIX unamed semaphores are not supported on OS X.
|
||||
#include "semaphore_posix.hpp"
|
||||
#endif
|
||||
|
||||
#include <dlfcn.h>
|
||||
#include <pthread.h>
|
||||
#include <semaphore.h>
|
||||
|
@ -106,8 +106,8 @@ struct Atomic::PlatformAdd
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(4 == sizeof(I));
|
||||
STATIC_CAST(4 == sizeof(D));
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
|
||||
D result;
|
||||
|
||||
@ -129,8 +129,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(8 == sizeof(I));
|
||||
STATIC_CAST(8 == sizeof(D));
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
|
||||
D result;
|
||||
|
||||
|
@ -184,8 +184,8 @@ struct Atomic::PlatformAdd
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(4 == sizeof(I));
|
||||
STATIC_CAST(4 == sizeof(D));
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
|
||||
#ifdef ARM
|
||||
return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
|
||||
@ -201,8 +201,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(8 == sizeof(I));
|
||||
STATIC_CAST(8 == sizeof(D));
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
}
|
||||
@ -283,7 +283,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
STATIC_CAST(4 == sizeof(T));
|
||||
STATIC_ASSERT(4 == sizeof(T));
|
||||
#ifdef ARM
|
||||
return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
|
||||
#else
|
||||
@ -301,7 +301,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
T volatile* dest,
|
||||
T compare_value,
|
||||
cmpxchg_memory_order order) const {
|
||||
STATIC_CAST(8 == sizeof(T));
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
|
||||
}
|
||||
|
||||
|
@ -104,8 +104,8 @@ struct Atomic::PlatformAdd
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(4 == sizeof(I));
|
||||
STATIC_CAST(4 == sizeof(D));
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
|
||||
D result;
|
||||
|
||||
@ -127,8 +127,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(8 == sizeof(I));
|
||||
STATIC_CAST(8 == sizeof(D));
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
|
||||
D result;
|
||||
|
||||
|
@ -92,9 +92,9 @@ struct Atomic::PlatformAdd
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(4 == sizeof(I));
|
||||
STATIC_CAST(4 == sizeof(D));
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
|
||||
D old, upd;
|
||||
|
||||
@ -143,9 +143,9 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(8 == sizeof(I));
|
||||
STATIC_CAST(8 == sizeof(D));
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
|
||||
D old, upd;
|
||||
|
||||
|
@ -62,8 +62,8 @@ struct Atomic::PlatformAdd
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(4 == sizeof(I));
|
||||
STATIC_CAST(4 == sizeof(D));
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
|
||||
D rv;
|
||||
__asm__ volatile(
|
||||
@ -81,10 +81,11 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
return rv;
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(8 == sizeof(I));
|
||||
STATIC_CAST(8 == sizeof(D));
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
|
||||
D rv;
|
||||
__asm__ volatile(
|
||||
|
@ -178,8 +178,8 @@ struct Atomic::PlatformAdd
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(4 == sizeof(I));
|
||||
STATIC_CAST(4 == sizeof(D));
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
|
||||
#ifdef ARM
|
||||
return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
|
||||
@ -195,8 +195,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(8 == sizeof(I));
|
||||
STATIC_CAST(8 == sizeof(D));
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
}
|
||||
|
@ -75,6 +75,9 @@
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#if INCLUDE_TRACE
|
||||
#include "trace/tracing.hpp"
|
||||
#endif
|
||||
@ -764,6 +767,25 @@ OopHandle ClassLoaderData::add_handle(Handle h) {
|
||||
return OopHandle(_handles.add(h()));
|
||||
}
|
||||
|
||||
void ClassLoaderData::remove_handle(OopHandle h) {
|
||||
oop* ptr = h.ptr_raw();
|
||||
if (ptr != NULL) {
|
||||
assert(_handles.contains(ptr), "Got unexpected handle " PTR_FORMAT, p2i(ptr));
|
||||
#if INCLUDE_ALL_GCS
|
||||
// This barrier is used by G1 to remember the old oop values, so
|
||||
// that we don't forget any objects that were live at the snapshot at
|
||||
// the beginning.
|
||||
if (UseG1GC) {
|
||||
oop obj = *ptr;
|
||||
if (obj != NULL) {
|
||||
G1SATBCardTableModRefBS::enqueue(obj);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
*ptr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderData::init_handle_locked(OopHandle& dest, Handle h) {
|
||||
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
|
||||
if (dest.resolve() != NULL) {
|
||||
|
@ -364,6 +364,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
const char* loader_name();
|
||||
|
||||
OopHandle add_handle(Handle h);
|
||||
void remove_handle(OopHandle h);
|
||||
void init_handle_locked(OopHandle& pd, Handle h); // used for concurrent access to ModuleEntry::_pd field
|
||||
void add_class(Klass* k, bool publicize = true);
|
||||
void remove_class(Klass* k);
|
||||
|
@ -85,6 +85,7 @@ DictionaryEntry* Dictionary::new_entry(unsigned int hash, InstanceKlass* klass)
|
||||
|
||||
void Dictionary::free_entry(DictionaryEntry* entry) {
|
||||
// avoid recursion when deleting linked list
|
||||
// pd_set is accessed during a safepoint.
|
||||
while (entry->pd_set() != NULL) {
|
||||
ProtectionDomainEntry* to_delete = entry->pd_set();
|
||||
entry->set_pd_set(to_delete->next());
|
||||
@ -101,7 +102,7 @@ bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
|
||||
if (protection_domain == instance_klass()->protection_domain()) {
|
||||
// Ensure this doesn't show up in the pd_set (invariant)
|
||||
bool in_pd_set = false;
|
||||
for (ProtectionDomainEntry* current = _pd_set;
|
||||
for (ProtectionDomainEntry* current = pd_set_acquire();
|
||||
current != NULL;
|
||||
current = current->next()) {
|
||||
if (current->protection_domain() == protection_domain) {
|
||||
@ -121,7 +122,7 @@ bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
for (ProtectionDomainEntry* current = _pd_set;
|
||||
for (ProtectionDomainEntry* current = pd_set_acquire();
|
||||
current != NULL;
|
||||
current = current->next()) {
|
||||
if (current->protection_domain() == protection_domain) return true;
|
||||
@ -135,12 +136,12 @@ void DictionaryEntry::add_protection_domain(Dictionary* dict, Handle protection_
|
||||
if (!contains_protection_domain(protection_domain())) {
|
||||
ProtectionDomainCacheEntry* entry = SystemDictionary::cache_get(protection_domain);
|
||||
ProtectionDomainEntry* new_head =
|
||||
new ProtectionDomainEntry(entry, _pd_set);
|
||||
new ProtectionDomainEntry(entry, pd_set());
|
||||
// Warning: Preserve store ordering. The SystemDictionary is read
|
||||
// without locks. The new ProtectionDomainEntry must be
|
||||
// complete before other threads can be allowed to see it
|
||||
// via a store to _pd_set.
|
||||
OrderAccess::release_store_ptr(&_pd_set, new_head);
|
||||
release_set_pd_set(new_head);
|
||||
}
|
||||
LogTarget(Trace, protectiondomain) lt;
|
||||
if (lt.is_enabled()) {
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
@ -134,7 +135,7 @@ class DictionaryEntry : public HashtableEntry<InstanceKlass*, mtClass> {
|
||||
// It is essentially a cache to avoid repeated Java up-calls to
|
||||
// ClassLoader.checkPackageAccess().
|
||||
//
|
||||
ProtectionDomainEntry* _pd_set;
|
||||
ProtectionDomainEntry* volatile _pd_set;
|
||||
|
||||
public:
|
||||
// Tells whether a protection is in the approved set.
|
||||
@ -153,8 +154,15 @@ class DictionaryEntry : public HashtableEntry<InstanceKlass*, mtClass> {
|
||||
return (DictionaryEntry**)HashtableEntry<InstanceKlass*, mtClass>::next_addr();
|
||||
}
|
||||
|
||||
ProtectionDomainEntry* pd_set() const { return _pd_set; }
|
||||
void set_pd_set(ProtectionDomainEntry* pd_set) { _pd_set = pd_set; }
|
||||
ProtectionDomainEntry* pd_set() const { return _pd_set; }
|
||||
void set_pd_set(ProtectionDomainEntry* new_head) { _pd_set = new_head; }
|
||||
|
||||
ProtectionDomainEntry* pd_set_acquire() const {
|
||||
return (ProtectionDomainEntry*)OrderAccess::load_ptr_acquire(&_pd_set);
|
||||
}
|
||||
void release_set_pd_set(ProtectionDomainEntry* new_head) {
|
||||
OrderAccess::release_store_ptr(&_pd_set, new_head);
|
||||
}
|
||||
|
||||
// Tells whether the initiating class' protection domain can access the klass in this entry
|
||||
bool is_valid_protection_domain(Handle protection_domain) {
|
||||
@ -167,7 +175,7 @@ class DictionaryEntry : public HashtableEntry<InstanceKlass*, mtClass> {
|
||||
}
|
||||
|
||||
void verify_protection_domain_set() {
|
||||
for (ProtectionDomainEntry* current = _pd_set;
|
||||
for (ProtectionDomainEntry* current = pd_set(); // accessed at a safepoint
|
||||
current != NULL;
|
||||
current = current->_next) {
|
||||
current->_pd_cache->protection_domain()->verify();
|
||||
@ -181,7 +189,7 @@ class DictionaryEntry : public HashtableEntry<InstanceKlass*, mtClass> {
|
||||
|
||||
void print_count(outputStream *st) {
|
||||
int count = 0;
|
||||
for (ProtectionDomainEntry* current = _pd_set;
|
||||
for (ProtectionDomainEntry* current = pd_set(); // accessed inside SD lock
|
||||
current != NULL;
|
||||
current = current->_next) {
|
||||
count++;
|
||||
|
@ -910,12 +910,9 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
|
||||
if (protection_domain() == NULL) return k;
|
||||
|
||||
// Check the protection domain has the right access
|
||||
{
|
||||
MutexLocker mu(SystemDictionary_lock, THREAD);
|
||||
if (dictionary->is_valid_protection_domain(d_index, d_hash, name,
|
||||
protection_domain)) {
|
||||
return k;
|
||||
}
|
||||
if (dictionary->is_valid_protection_domain(d_index, d_hash, name,
|
||||
protection_domain)) {
|
||||
return k;
|
||||
}
|
||||
|
||||
// Verify protection domain. If it fails an exception is thrown
|
||||
|
@ -1220,7 +1220,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
||||
// for stack scanning.
|
||||
if (state == not_entrant) {
|
||||
mark_as_seen_on_stack();
|
||||
OrderAccess::storestore();
|
||||
OrderAccess::storestore(); // _stack_traversal_mark and _state
|
||||
}
|
||||
|
||||
// Change state
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -136,7 +136,7 @@ class nmethod : public CompiledMethod {
|
||||
// stack. An not_entrant method can be removed when there are no
|
||||
// more activations, i.e., when the _stack_traversal_mark is less than
|
||||
// current sweep traversal index.
|
||||
volatile jlong _stack_traversal_mark;
|
||||
volatile long _stack_traversal_mark;
|
||||
|
||||
// The _hotness_counter indicates the hotness of a method. The higher
|
||||
// the value the hotter the method. The hotness counter of a nmethod is
|
||||
@ -396,8 +396,8 @@ public:
|
||||
public:
|
||||
|
||||
// Sweeper support
|
||||
jlong stack_traversal_mark() { return OrderAccess::load_acquire(&_stack_traversal_mark); }
|
||||
void set_stack_traversal_mark(jlong l) { OrderAccess::release_store(&_stack_traversal_mark, l); }
|
||||
long stack_traversal_mark() { return _stack_traversal_mark; }
|
||||
void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
|
||||
|
||||
// implicit exceptions support
|
||||
address continuation_for_implicit_exception(address pc);
|
||||
|
@ -1719,7 +1719,6 @@ jint G1CollectedHeap::initialize() {
|
||||
G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
|
||||
G1BlockOffsetTable::heap_map_factor());
|
||||
|
||||
ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
|
||||
G1RegionToSpaceMapper* cardtable_storage =
|
||||
create_aux_memory_mapper("Card Table",
|
||||
G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
|
||||
|
@ -89,8 +89,6 @@ ConstantPool::ConstantPool(Array<u1>* tags) :
|
||||
|
||||
void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) {
|
||||
if (cache() != NULL) {
|
||||
MetadataFactory::free_array<u2>(loader_data, reference_map());
|
||||
set_reference_map(NULL);
|
||||
MetadataFactory::free_metadata(loader_data, cache());
|
||||
set_cache(NULL);
|
||||
}
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/rewriter.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.inline.hpp"
|
||||
@ -608,6 +609,14 @@ void ConstantPoolCache::initialize(const intArray& inverse_index_map,
|
||||
}
|
||||
}
|
||||
|
||||
void ConstantPoolCache::deallocate_contents(ClassLoaderData* data) {
|
||||
assert(!is_shared(), "shared caches are not deallocated");
|
||||
data->remove_handle(_resolved_references);
|
||||
set_resolved_references(NULL);
|
||||
MetadataFactory::free_array<u2>(data, _reference_map);
|
||||
set_reference_map(NULL);
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
oop ConstantPoolCache::archived_references() {
|
||||
assert(UseSharedSpaces, "UseSharedSpaces expected.");
|
||||
|
@ -510,9 +510,9 @@ class ConstantPoolCache: public MetaspaceObj {
|
||||
void dump_cache();
|
||||
#endif // INCLUDE_JVMTI
|
||||
|
||||
// Deallocate - no fields to deallocate
|
||||
// RedefineClasses support
|
||||
DEBUG_ONLY(bool on_stack() { return false; })
|
||||
void deallocate_contents(ClassLoaderData* data) {}
|
||||
void deallocate_contents(ClassLoaderData* data);
|
||||
bool is_klass() const { return false; }
|
||||
|
||||
// Printing
|
||||
|
@ -46,6 +46,9 @@ public:
|
||||
OopHandle(oop* w) : _obj(w) {}
|
||||
|
||||
oop resolve() const { return (_obj == NULL) ? (oop)NULL : *_obj; }
|
||||
|
||||
// Used only for removing handle.
|
||||
oop* ptr_raw() { return _obj; }
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_OOPS_OOPHANDLE_HPP
|
||||
|
@ -53,7 +53,7 @@ class SweeperRecord {
|
||||
public:
|
||||
int traversal;
|
||||
int compile_id;
|
||||
jlong traversal_mark;
|
||||
long traversal_mark;
|
||||
int state;
|
||||
const char* kind;
|
||||
address vep;
|
||||
@ -62,7 +62,7 @@ class SweeperRecord {
|
||||
|
||||
void print() {
|
||||
tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
|
||||
PTR_FORMAT " state = %d traversal_mark "JLONG_FORMAT" line = %d",
|
||||
PTR_FORMAT " state = %d traversal_mark %ld line = %d",
|
||||
traversal,
|
||||
compile_id,
|
||||
kind == NULL ? "" : kind,
|
||||
@ -629,6 +629,7 @@ NMethodSweeper::MethodStateChange NMethodSweeper::process_compiled_method(Compil
|
||||
} else if (cm->is_not_entrant()) {
|
||||
// If there are no current activations of this method on the
|
||||
// stack we can safely convert it to a zombie method
|
||||
OrderAccess::loadload(); // _stack_traversal_mark and _state
|
||||
if (cm->can_convert_to_zombie()) {
|
||||
// Clear ICStubs to prevent back patching stubs of zombie or flushed
|
||||
// nmethods during the next safepoint (see ICStub::finalize).
|
||||
|
@ -841,7 +841,7 @@ typedef RehashableHashtable<Symbol*, mtSymbol> RehashableSymbolHashtable;
|
||||
nonstatic_field(nmethod, _verified_entry_point, address) \
|
||||
nonstatic_field(nmethod, _osr_entry_point, address) \
|
||||
volatile_nonstatic_field(nmethod, _lock_count, jint) \
|
||||
volatile_nonstatic_field(nmethod, _stack_traversal_mark, jlong) \
|
||||
volatile_nonstatic_field(nmethod, _stack_traversal_mark, long) \
|
||||
nonstatic_field(nmethod, _compile_id, int) \
|
||||
nonstatic_field(nmethod, _comp_level, int) \
|
||||
\
|
||||
|
Loading…
x
Reference in New Issue
Block a user