8232604: ZGC: Make ZVerifyViews mapping and unmapping precise
Reviewed-by: pliden, eosterlund
This commit is contained in:
parent
ecb66d2c55
commit
5e549ca805
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,7 +34,10 @@ private:
|
||||
T _value;
|
||||
|
||||
public:
|
||||
ZFuture();
|
||||
|
||||
void set(T value);
|
||||
T peek();
|
||||
T get();
|
||||
};
|
||||
|
||||
|
@ -28,6 +28,10 @@
|
||||
#include "runtime/semaphore.inline.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
template <typename T>
|
||||
inline ZFuture<T>::ZFuture() :
|
||||
_value() {}
|
||||
|
||||
template <typename T>
|
||||
inline void ZFuture<T>::set(T value) {
|
||||
// Set value
|
||||
@ -37,6 +41,11 @@ inline void ZFuture<T>::set(T value) {
|
||||
_sema.signal();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T ZFuture<T>::peek() {
|
||||
return _value;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T ZFuture<T>::get() {
|
||||
// Wait for notification
|
||||
|
@ -244,34 +244,14 @@ uint64_t ZHeap::uncommit(uint64_t delay) {
|
||||
return _page_allocator.uncommit(delay);
|
||||
}
|
||||
|
||||
void ZHeap::before_flip() {
|
||||
if (ZVerifyViews) {
|
||||
// Unmap all pages
|
||||
_page_allocator.debug_unmap_all_pages();
|
||||
}
|
||||
}
|
||||
|
||||
void ZHeap::after_flip() {
|
||||
if (ZVerifyViews) {
|
||||
// Map all pages
|
||||
ZPageTableIterator iter(&_page_table);
|
||||
for (ZPage* page; iter.next(&page);) {
|
||||
_page_allocator.debug_map_page(page);
|
||||
}
|
||||
_page_allocator.debug_map_cached_pages();
|
||||
}
|
||||
}
|
||||
|
||||
void ZHeap::flip_to_marked() {
|
||||
before_flip();
|
||||
ZVerifyViewsFlip flip(&_page_allocator);
|
||||
ZAddress::flip_to_marked();
|
||||
after_flip();
|
||||
}
|
||||
|
||||
void ZHeap::flip_to_remapped() {
|
||||
before_flip();
|
||||
ZVerifyViewsFlip flip(&_page_allocator);
|
||||
ZAddress::flip_to_remapped();
|
||||
after_flip();
|
||||
}
|
||||
|
||||
void ZHeap::mark_start() {
|
||||
@ -460,6 +440,14 @@ void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
|
||||
iter.objects_do(cl, visit_weaks);
|
||||
}
|
||||
|
||||
void ZHeap::pages_do(ZPageClosure* cl) {
|
||||
ZPageTableIterator iter(&_page_table);
|
||||
for (ZPage* page; iter.next(&page);) {
|
||||
cl->do_page(page);
|
||||
}
|
||||
_page_allocator.pages_do(cl);
|
||||
}
|
||||
|
||||
void ZHeap::serviceability_initialize() {
|
||||
_serviceability.initialize();
|
||||
}
|
||||
|
@ -63,9 +63,6 @@ private:
|
||||
size_t heap_max_size() const;
|
||||
size_t heap_max_reserve_size() const;
|
||||
|
||||
void before_flip();
|
||||
void after_flip();
|
||||
|
||||
void flip_to_marked();
|
||||
void flip_to_remapped();
|
||||
|
||||
@ -151,6 +148,7 @@ public:
|
||||
|
||||
// Iteration
|
||||
void object_iterate(ObjectClosure* cl, bool visit_weaks);
|
||||
void pages_do(ZPageClosure* cl);
|
||||
|
||||
// Serviceability
|
||||
void serviceability_initialize();
|
||||
|
@ -112,4 +112,9 @@ public:
|
||||
void print() const;
|
||||
};
|
||||
|
||||
class ZPageClosure {
|
||||
public:
|
||||
virtual void do_page(const ZPage* page) = 0;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZPAGE_HPP
|
||||
|
@ -60,7 +60,9 @@ public:
|
||||
_type(type),
|
||||
_size(size),
|
||||
_flags(flags),
|
||||
_total_collections(total_collections) {}
|
||||
_total_collections(total_collections),
|
||||
_node(),
|
||||
_result() {}
|
||||
|
||||
uint8_t type() const {
|
||||
return _type;
|
||||
@ -78,6 +80,10 @@ public:
|
||||
return _total_collections;
|
||||
}
|
||||
|
||||
ZPage* peek() {
|
||||
return _result.peek();
|
||||
}
|
||||
|
||||
ZPage* wait() {
|
||||
return _result.get();
|
||||
}
|
||||
@ -108,6 +114,7 @@ ZPageAllocator::ZPageAllocator(size_t min_capacity,
|
||||
_allocated(0),
|
||||
_reclaimed(0),
|
||||
_queue(),
|
||||
_satisfied(),
|
||||
_safe_delete(),
|
||||
_uncommit(false),
|
||||
_initialized(false) {
|
||||
@ -289,11 +296,7 @@ void ZPageAllocator::destroy_page(ZPage* page) {
|
||||
|
||||
void ZPageAllocator::map_page(const ZPage* page) const {
|
||||
// Map physical memory
|
||||
if (!page->is_mapped()) {
|
||||
_physical.map(page->physical_memory(), page->start());
|
||||
} else if (ZVerifyViews) {
|
||||
_physical.debug_map(page->physical_memory(), page->start());
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::max_available(bool no_reserve) const {
|
||||
@ -433,14 +436,21 @@ ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocatio
|
||||
} while (page == gc_marker);
|
||||
|
||||
{
|
||||
// Guard deletion of underlying semaphore. This is a workaround for a
|
||||
// bug in sem_post() in glibc < 2.21, where it's not safe to destroy
|
||||
//
|
||||
// We grab the lock here for two different reasons:
|
||||
//
|
||||
// 1) Guard deletion of underlying semaphore. This is a workaround for
|
||||
// a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
|
||||
// the semaphore immediately after returning from sem_wait(). The
|
||||
// reason is that sem_post() can touch the semaphore after a waiting
|
||||
// thread have returned from sem_wait(). To avoid this race we are
|
||||
// forcing the waiting thread to acquire/release the lock held by the
|
||||
// posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
|
||||
//
|
||||
// 2) Guard the list of satisfied pages.
|
||||
//
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
_satisfied.remove(&request);
|
||||
}
|
||||
}
|
||||
|
||||
@ -462,7 +472,9 @@ ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags fl
|
||||
}
|
||||
|
||||
// Map page if needed
|
||||
if (!page->is_mapped()) {
|
||||
map_page(page);
|
||||
}
|
||||
|
||||
// Reset page. This updates the page's sequence number and must
|
||||
// be done after page allocation, which potentially blocked in
|
||||
@ -500,6 +512,7 @@ void ZPageAllocator::satisfy_alloc_queue() {
|
||||
// the dequeue operation must happen first, since the request
|
||||
// will immediately be deallocated once it has been satisfied.
|
||||
_queue.remove(request);
|
||||
_satisfied.insert_first(request);
|
||||
request->satisfy(page);
|
||||
}
|
||||
}
|
||||
@ -686,28 +699,21 @@ void ZPageAllocator::debug_map_page(const ZPage* page) const {
|
||||
_physical.debug_map(page->physical_memory(), page->start());
|
||||
}
|
||||
|
||||
class ZPageCacheDebugMapClosure : public StackObj {
|
||||
private:
|
||||
const ZPageAllocator* const _allocator;
|
||||
|
||||
public:
|
||||
ZPageCacheDebugMapClosure(const ZPageAllocator* allocator) :
|
||||
_allocator(allocator) {}
|
||||
|
||||
virtual void do_page(const ZPage* page) {
|
||||
_allocator->debug_map_page(page);
|
||||
}
|
||||
};
|
||||
|
||||
void ZPageAllocator::debug_map_cached_pages() const {
|
||||
void ZPageAllocator::debug_unmap_page(const ZPage* page) const {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
ZPageCacheDebugMapClosure cl(this);
|
||||
_cache.pages_do(&cl);
|
||||
_physical.debug_unmap(page->physical_memory(), page->start());
|
||||
}
|
||||
|
||||
void ZPageAllocator::debug_unmap_all_pages() const {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
_physical.debug_unmap(ZPhysicalMemorySegment(0 /* start */, ZAddressOffsetMax), 0 /* offset */);
|
||||
void ZPageAllocator::pages_do(ZPageClosure* cl) const {
|
||||
ZListIterator<ZPageAllocRequest> iter(&_satisfied);
|
||||
for (ZPageAllocRequest* request; iter.next(&request);) {
|
||||
const ZPage* const page = request->peek();
|
||||
if (page != NULL) {
|
||||
cl->do_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
_cache.pages_do(cl);
|
||||
}
|
||||
|
||||
bool ZPageAllocator::is_alloc_stalled() const {
|
||||
@ -728,7 +734,8 @@ void ZPageAllocator::check_out_of_memory() {
|
||||
}
|
||||
|
||||
// Out of memory, fail allocation request
|
||||
_queue.remove_first();
|
||||
_queue.remove(request);
|
||||
_satisfied.insert_first(request);
|
||||
request->satisfy(NULL);
|
||||
}
|
||||
}
|
||||
|
@ -54,6 +54,7 @@ private:
|
||||
size_t _allocated;
|
||||
ssize_t _reclaimed;
|
||||
ZList<ZPageAllocRequest> _queue;
|
||||
ZList<ZPageAllocRequest> _satisfied;
|
||||
mutable ZSafeDelete<ZPage> _safe_delete;
|
||||
bool _uncommit;
|
||||
bool _initialized;
|
||||
@ -117,11 +118,12 @@ public:
|
||||
void map_page(const ZPage* page) const;
|
||||
|
||||
void debug_map_page(const ZPage* page) const;
|
||||
void debug_map_cached_pages() const;
|
||||
void debug_unmap_all_pages() const;
|
||||
void debug_unmap_page(const ZPage* page) const;
|
||||
|
||||
bool is_alloc_stalled() const;
|
||||
void check_out_of_memory();
|
||||
|
||||
void pages_do(ZPageClosure* cl) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZPAGEALLOCATOR_HPP
|
||||
|
@ -240,3 +240,26 @@ void ZPageCache::flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to) {
|
||||
flush_list(cl, &_medium, to);
|
||||
flush_per_numa_lists(cl, &_small, to);
|
||||
}
|
||||
|
||||
void ZPageCache::pages_do(ZPageClosure* cl) const {
|
||||
// Small
|
||||
ZPerNUMAConstIterator<ZList<ZPage> > iter_numa(&_small);
|
||||
for (const ZList<ZPage>* list; iter_numa.next(&list);) {
|
||||
ZListIterator<ZPage> iter_small(list);
|
||||
for (ZPage* page; iter_small.next(&page);) {
|
||||
cl->do_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
// Medium
|
||||
ZListIterator<ZPage> iter_medium(&_medium);
|
||||
for (ZPage* page; iter_medium.next(&page);) {
|
||||
cl->do_page(page);
|
||||
}
|
||||
|
||||
// Large
|
||||
ZListIterator<ZPage> iter_large(&_large);
|
||||
for (ZPage* page; iter_large.next(&page);) {
|
||||
cl->do_page(page);
|
||||
}
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ public:
|
||||
|
||||
void flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to);
|
||||
|
||||
template <typename Closure> void pages_do(Closure* cl) const;
|
||||
void pages_do(ZPageClosure* cl) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZPAGECACHE_HPP
|
||||
|
@ -32,28 +32,4 @@ inline size_t ZPageCache::available() const {
|
||||
return _available;
|
||||
}
|
||||
|
||||
template <typename Closure>
|
||||
inline void ZPageCache::pages_do(Closure* cl) const {
|
||||
// Small
|
||||
ZPerNUMAConstIterator<ZList<ZPage> > iter_numa(&_small);
|
||||
for (const ZList<ZPage>* list; iter_numa.next(&list);) {
|
||||
ZListIterator<ZPage> iter_small(list);
|
||||
for (ZPage* page; iter_small.next(&page);) {
|
||||
cl->do_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
// Medium
|
||||
ZListIterator<ZPage> iter_medium(&_medium);
|
||||
for (ZPage* page; iter_medium.next(&page);) {
|
||||
cl->do_page(page);
|
||||
}
|
||||
|
||||
// Large
|
||||
ZListIterator<ZPage> iter_large(&_large);
|
||||
for (ZPage* page; iter_large.next(&page);) {
|
||||
cl->do_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZPAGECACHE_INLINE_HPP
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zHeap.inline.hpp"
|
||||
#include "gc/z/zOop.hpp"
|
||||
#include "gc/z/zPageAllocator.hpp"
|
||||
#include "gc/z/zResurrection.hpp"
|
||||
#include "gc/z/zRootsIterator.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
@ -170,3 +171,38 @@ void ZVerify::after_weak_processing() {
|
||||
ZStatTimerDisable disable;
|
||||
roots_and_objects(true /* verify_weaks */);
|
||||
}
|
||||
|
||||
template <bool Map>
|
||||
class ZPageDebugMapOrUnmapClosure : public ZPageClosure {
|
||||
private:
|
||||
const ZPageAllocator* const _allocator;
|
||||
|
||||
public:
|
||||
ZPageDebugMapOrUnmapClosure(const ZPageAllocator* allocator) :
|
||||
_allocator(allocator) {}
|
||||
|
||||
void do_page(const ZPage* page) {
|
||||
if (Map) {
|
||||
_allocator->debug_map_page(page);
|
||||
} else {
|
||||
_allocator->debug_unmap_page(page);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ZVerifyViewsFlip::ZVerifyViewsFlip(const ZPageAllocator* allocator) :
|
||||
_allocator(allocator) {
|
||||
if (ZVerifyViews) {
|
||||
// Unmap all pages
|
||||
ZPageDebugMapOrUnmapClosure<false /* Map */> cl(_allocator);
|
||||
ZHeap::heap()->pages_do(&cl);
|
||||
}
|
||||
}
|
||||
|
||||
ZVerifyViewsFlip::~ZVerifyViewsFlip() {
|
||||
if (ZVerifyViews) {
|
||||
// Map all pages
|
||||
ZPageDebugMapOrUnmapClosure<true /* Map */> cl(_allocator);
|
||||
ZHeap::heap()->pages_do(&cl);
|
||||
}
|
||||
}
|
||||
|
@ -26,6 +26,8 @@
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class ZPageAllocator;
|
||||
|
||||
class ZVerify : public AllStatic {
|
||||
private:
|
||||
template <typename RootsIterator> static void roots();
|
||||
@ -45,4 +47,13 @@ public:
|
||||
static void after_weak_processing();
|
||||
};
|
||||
|
||||
class ZVerifyViewsFlip {
|
||||
private:
|
||||
const ZPageAllocator* const _allocator;
|
||||
|
||||
public:
|
||||
ZVerifyViewsFlip(const ZPageAllocator* allocator);
|
||||
~ZVerifyViewsFlip();
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZVERIFY_HPP
|
||||
|
Loading…
x
Reference in New Issue
Block a user