8265127: ZGC: Fix incorrect reporting of reclaimed memory
Reviewed-by: ayang, sjohanss
This commit is contained in:
parent
1580a473d0
commit
a9367dbd84
@ -429,7 +429,7 @@ void ZHeap::relocate() {
|
||||
_relocate.relocate(&_relocation_set);
|
||||
|
||||
// Update statistics
|
||||
ZStatHeap::set_at_relocate_end(_page_allocator.stats());
|
||||
ZStatHeap::set_at_relocate_end(_page_allocator.stats(), _object_allocator.relocated());
|
||||
}
|
||||
|
||||
void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
|
||||
|
@ -114,8 +114,8 @@ public:
|
||||
// Object allocation
|
||||
uintptr_t alloc_tlab(size_t size);
|
||||
uintptr_t alloc_object(size_t size);
|
||||
uintptr_t alloc_object_non_blocking(size_t size);
|
||||
void undo_alloc_object(uintptr_t addr, size_t size);
|
||||
uintptr_t alloc_object_for_relocation(size_t size);
|
||||
void undo_alloc_object_for_relocation(uintptr_t addr, size_t size);
|
||||
bool is_alloc_stalled() const;
|
||||
void check_out_of_memory();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -79,15 +79,15 @@ inline uintptr_t ZHeap::alloc_object(size_t size) {
|
||||
return addr;
|
||||
}
|
||||
|
||||
inline uintptr_t ZHeap::alloc_object_non_blocking(size_t size) {
|
||||
uintptr_t addr = _object_allocator.alloc_object_non_blocking(size);
|
||||
inline uintptr_t ZHeap::alloc_object_for_relocation(size_t size) {
|
||||
const uintptr_t addr = _object_allocator.alloc_object_for_relocation(&_page_table, size);
|
||||
assert(ZAddress::is_good_or_null(addr), "Bad address");
|
||||
return addr;
|
||||
}
|
||||
|
||||
inline void ZHeap::undo_alloc_object(uintptr_t addr, size_t size) {
|
||||
inline void ZHeap::undo_alloc_object_for_relocation(uintptr_t addr, size_t size) {
|
||||
ZPage* const page = _page_table.get(addr);
|
||||
_object_allocator.undo_alloc_object(page, addr, size);
|
||||
_object_allocator.undo_alloc_object_for_relocation(page, addr, size);
|
||||
}
|
||||
|
||||
inline uintptr_t ZHeap::relocate_object(uintptr_t addr) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
#include "gc/z/zHeuristics.hpp"
|
||||
#include "gc/z/zObjectAllocator.hpp"
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
#include "gc/z/zPageTable.inline.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zThread.inline.hpp"
|
||||
#include "gc/z/zValue.inline.hpp"
|
||||
@ -43,6 +44,8 @@ ZObjectAllocator::ZObjectAllocator() :
|
||||
_use_per_cpu_shared_small_pages(ZHeuristics::use_per_cpu_shared_small_pages()),
|
||||
_used(0),
|
||||
_undone(0),
|
||||
_alloc_for_relocation(0),
|
||||
_undo_alloc_for_relocation(0),
|
||||
_shared_medium_page(NULL),
|
||||
_shared_small_page(NULL) {}
|
||||
|
||||
@ -54,6 +57,17 @@ ZPage* const* ZObjectAllocator::shared_small_page_addr() const {
|
||||
return _use_per_cpu_shared_small_pages ? _shared_small_page.addr() : _shared_small_page.addr(0);
|
||||
}
|
||||
|
||||
void ZObjectAllocator::register_alloc_for_relocation(const ZPageTable* page_table, uintptr_t addr, size_t size) {
|
||||
const ZPage* const page = page_table->get(addr);
|
||||
const size_t aligned_size = align_up(size, page->object_alignment());
|
||||
Atomic::add(_alloc_for_relocation.addr(), aligned_size);
|
||||
}
|
||||
|
||||
void ZObjectAllocator::register_undo_alloc_for_relocation(const ZPage* page, size_t size) {
|
||||
const size_t aligned_size = align_up(size, page->object_alignment());
|
||||
Atomic::add(_undo_alloc_for_relocation.addr(), aligned_size);
|
||||
}
|
||||
|
||||
ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
|
||||
ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
|
||||
if (page != NULL) {
|
||||
@ -160,20 +174,28 @@ uintptr_t ZObjectAllocator::alloc_object(size_t size) {
|
||||
return alloc_object(size, flags);
|
||||
}
|
||||
|
||||
uintptr_t ZObjectAllocator::alloc_object_non_blocking(size_t size) {
|
||||
uintptr_t ZObjectAllocator::alloc_object_for_relocation(const ZPageTable* page_table, size_t size) {
|
||||
ZAllocationFlags flags;
|
||||
flags.set_non_blocking();
|
||||
return alloc_object(size, flags);
|
||||
|
||||
const uintptr_t addr = alloc_object(size, flags);
|
||||
if (addr != 0) {
|
||||
register_alloc_for_relocation(page_table, addr, size);
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
void ZObjectAllocator::undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) {
|
||||
void ZObjectAllocator::undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size) {
|
||||
const uint8_t type = page->type();
|
||||
|
||||
if (type == ZPageTypeLarge) {
|
||||
register_undo_alloc_for_relocation(page, size);
|
||||
undo_alloc_page(page);
|
||||
ZStatInc(ZCounterUndoObjectAllocationSucceeded);
|
||||
} else {
|
||||
if (page->undo_alloc_object_atomic(addr, size)) {
|
||||
register_undo_alloc_for_relocation(page, size);
|
||||
ZStatInc(ZCounterUndoObjectAllocationSucceeded);
|
||||
} else {
|
||||
ZStatInc(ZCounterUndoObjectAllocationFailed);
|
||||
@ -209,6 +231,25 @@ size_t ZObjectAllocator::remaining() const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t ZObjectAllocator::relocated() const {
|
||||
size_t total_alloc = 0;
|
||||
size_t total_undo_alloc = 0;
|
||||
|
||||
ZPerCPUConstIterator<size_t> iter_alloc(&_alloc_for_relocation);
|
||||
for (const size_t* alloc; iter_alloc.next(&alloc);) {
|
||||
total_alloc += Atomic::load(alloc);
|
||||
}
|
||||
|
||||
ZPerCPUConstIterator<size_t> iter_undo_alloc(&_undo_alloc_for_relocation);
|
||||
for (const size_t* undo_alloc; iter_undo_alloc.next(&undo_alloc);) {
|
||||
total_undo_alloc += Atomic::load(undo_alloc);
|
||||
}
|
||||
|
||||
assert(total_alloc >= total_undo_alloc, "Mismatch");
|
||||
|
||||
return total_alloc - total_undo_alloc;
|
||||
}
|
||||
|
||||
void ZObjectAllocator::retire_pages() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
|
||||
@ -216,6 +257,10 @@ void ZObjectAllocator::retire_pages() {
|
||||
_used.set_all(0);
|
||||
_undone.set_all(0);
|
||||
|
||||
// Reset relocated bytes
|
||||
_alloc_for_relocation.set_all(0);
|
||||
_undo_alloc_for_relocation.set_all(0);
|
||||
|
||||
// Reset allocation pages
|
||||
_shared_medium_page.set(NULL);
|
||||
_shared_small_page.set_all(NULL);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,18 +28,24 @@
|
||||
#include "gc/z/zValue.hpp"
|
||||
|
||||
class ZPage;
|
||||
class ZPageTable;
|
||||
|
||||
class ZObjectAllocator {
|
||||
private:
|
||||
const bool _use_per_cpu_shared_small_pages;
|
||||
ZPerCPU<size_t> _used;
|
||||
ZPerCPU<size_t> _undone;
|
||||
ZPerCPU<size_t> _alloc_for_relocation;
|
||||
ZPerCPU<size_t> _undo_alloc_for_relocation;
|
||||
ZContended<ZPage*> _shared_medium_page;
|
||||
ZPerCPU<ZPage*> _shared_small_page;
|
||||
|
||||
ZPage** shared_small_page_addr();
|
||||
ZPage* const* shared_small_page_addr() const;
|
||||
|
||||
void register_alloc_for_relocation(const ZPageTable* page_table, uintptr_t addr, size_t size);
|
||||
void register_undo_alloc_for_relocation(const ZPage* page, size_t size);
|
||||
|
||||
ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
|
||||
void undo_alloc_page(ZPage* page);
|
||||
|
||||
@ -60,11 +66,12 @@ public:
|
||||
ZObjectAllocator();
|
||||
|
||||
uintptr_t alloc_object(size_t size);
|
||||
uintptr_t alloc_object_non_blocking(size_t size);
|
||||
void undo_alloc_object(ZPage* page, uintptr_t addr, size_t size);
|
||||
uintptr_t alloc_object_for_relocation(const ZPageTable* page_table, size_t size);
|
||||
void undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size);
|
||||
|
||||
size_t used() const;
|
||||
size_t remaining() const;
|
||||
size_t relocated() const;
|
||||
|
||||
void retire_pages();
|
||||
};
|
||||
|
@ -64,7 +64,7 @@ static uintptr_t relocate_object_inner(ZForwarding* forwarding, uintptr_t from_a
|
||||
|
||||
// Allocate object
|
||||
const size_t size = ZUtils::object_size(from_addr);
|
||||
const uintptr_t to_addr = ZHeap::heap()->alloc_object_non_blocking(size);
|
||||
const uintptr_t to_addr = ZHeap::heap()->alloc_object_for_relocation(size);
|
||||
if (to_addr == 0) {
|
||||
// Allocation failed
|
||||
return 0;
|
||||
@ -77,7 +77,7 @@ static uintptr_t relocate_object_inner(ZForwarding* forwarding, uintptr_t from_a
|
||||
const uintptr_t to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, cursor);
|
||||
if (to_addr_final != to_addr) {
|
||||
// Already relocated, try undo allocation
|
||||
ZHeap::heap()->undo_alloc_object(to_addr, size);
|
||||
ZHeap::heap()->undo_alloc_object_for_relocation(to_addr, size);
|
||||
}
|
||||
|
||||
return to_addr_final;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,7 +43,6 @@ public:
|
||||
uintptr_t relocate_object(ZForwarding* forwarding, uintptr_t from_addr) const;
|
||||
uintptr_t forward_object(ZForwarding* forwarding, uintptr_t from_addr) const;
|
||||
|
||||
void start();
|
||||
void relocate(ZRelocationSet* relocation_set);
|
||||
};
|
||||
|
||||
|
@ -1326,7 +1326,9 @@ void ZStatHeap::set_at_relocate_start(const ZPageAllocatorStats& stats) {
|
||||
_at_relocate_start.reclaimed = stats.reclaimed();
|
||||
}
|
||||
|
||||
void ZStatHeap::set_at_relocate_end(const ZPageAllocatorStats& stats) {
|
||||
void ZStatHeap::set_at_relocate_end(const ZPageAllocatorStats& stats, size_t non_worker_relocated) {
|
||||
const size_t reclaimed = stats.reclaimed() - MIN2(non_worker_relocated, stats.reclaimed());
|
||||
|
||||
_at_relocate_end.capacity = stats.capacity();
|
||||
_at_relocate_end.capacity_high = capacity_high();
|
||||
_at_relocate_end.capacity_low = capacity_low();
|
||||
@ -1336,9 +1338,9 @@ void ZStatHeap::set_at_relocate_end(const ZPageAllocatorStats& stats) {
|
||||
_at_relocate_end.used = stats.used();
|
||||
_at_relocate_end.used_high = stats.used_high();
|
||||
_at_relocate_end.used_low = stats.used_low();
|
||||
_at_relocate_end.allocated = allocated(stats.used(), stats.reclaimed());
|
||||
_at_relocate_end.garbage = garbage(stats.reclaimed());
|
||||
_at_relocate_end.reclaimed = stats.reclaimed();
|
||||
_at_relocate_end.allocated = allocated(stats.used(), reclaimed);
|
||||
_at_relocate_end.garbage = garbage(reclaimed);
|
||||
_at_relocate_end.reclaimed = reclaimed;
|
||||
}
|
||||
|
||||
size_t ZStatHeap::max_capacity() {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -541,7 +541,7 @@ public:
|
||||
static void set_at_mark_end(const ZPageAllocatorStats& stats);
|
||||
static void set_at_select_relocation_set(const ZRelocationSetSelectorStats& stats);
|
||||
static void set_at_relocate_start(const ZPageAllocatorStats& stats);
|
||||
static void set_at_relocate_end(const ZPageAllocatorStats& stats);
|
||||
static void set_at_relocate_end(const ZPageAllocatorStats& stats, size_t non_worker_relocated);
|
||||
|
||||
static size_t max_capacity();
|
||||
static size_t used_at_mark_start();
|
||||
|
Loading…
x
Reference in New Issue
Block a user