8220589: ZGC: Remove superfluous ZPageTableEntry

Reviewed-by: stefank, eosterlund
This commit is contained in:
Per Lidén 2019-03-18 11:50:39 +01:00
parent 0ce7c21d33
commit b29bc9f1da
12 changed files with 56 additions and 182 deletions

View File

@ -53,7 +53,7 @@ public:
const int* _ZObjectAlignmentSmall;
};
typedef ZGranuleMap<ZPageTableEntry> ZGranuleMapForPageTable;
typedef ZGranuleMap<ZPage*> ZGranuleMapForPageTable;
#define VM_STRUCTS_ZGC(nonstatic_field, volatile_nonstatic_field, static_field) \
static_field(ZGlobalsForVMStructs, _instance_p, ZGlobalsForVMStructs*) \
@ -68,7 +68,7 @@ typedef ZGranuleMap<ZPageTableEntry> ZGranuleMapForPageTable;
nonstatic_field(ZCollectedHeap, _heap, ZHeap) \
\
nonstatic_field(ZHeap, _page_allocator, ZPageAllocator) \
nonstatic_field(ZHeap, _pagetable, ZPageTable) \
nonstatic_field(ZHeap, _page_table, ZPageTable) \
\
nonstatic_field(ZPage, _type, const uint8_t) \
nonstatic_field(ZPage, _seqnum, uint32_t) \
@ -80,7 +80,7 @@ typedef ZGranuleMap<ZPageTableEntry> ZGranuleMapForPageTable;
\
nonstatic_field(ZPageTable, _map, ZGranuleMapForPageTable) \
\
nonstatic_field(ZGranuleMapForPageTable, _map, ZPageTableEntry* const) \
nonstatic_field(ZGranuleMapForPageTable, _map, ZPage** const) \
\
nonstatic_field(ZVirtualMemory, _start, uintptr_t) \
nonstatic_field(ZVirtualMemory, _end, uintptr_t) \
@ -115,7 +115,6 @@ typedef ZGranuleMap<ZPageTableEntry> ZGranuleMapForPageTable;
declare_toplevel_type(ZPage) \
declare_toplevel_type(ZPageAllocator) \
declare_toplevel_type(ZPageTable) \
declare_toplevel_type(ZPageTableEntry) \
declare_toplevel_type(ZGranuleMapForPageTable) \
declare_toplevel_type(ZVirtualMemory) \
declare_toplevel_type(ZForwardingTable) \

View File

@ -53,17 +53,17 @@ define zpo
printf "Mark: 0x%016llx\tKlass: %s\n", (uintptr_t)$obj->_mark, (char*)$obj->_metadata->_klass->_name->_body
end
# Print heap page by pagetable index
# Print heap page by page table index
define zpp
set $page = (ZPage*)((uintptr_t)ZHeap::_heap._pagetable._map._map[($arg0)] & ~1)
set $page = (ZPage*)((uintptr_t)ZHeap::_heap._page_table._map._map[($arg0)] & ~1)
printf "Page %p\n", $page
print *$page
end
# Print pagetable
# Print page_table
define zpt
printf "Pagetable (first 128 slots)\n"
x/128gx ZHeap::_heap._pagetable._map._map
x/128gx ZHeap::_heap._page_table._map._map
end
# Print live map
@ -100,7 +100,7 @@ define zmarked
set $addr = $arg0
set $obj = ((uintptr_t)$addr & ZAddressOffsetMask)
set $page_index = $obj >> ZGranuleSizeShift
set $page_entry = (uintptr_t)ZHeap::_heap._pagetable._map._map[$page_index]
set $page_entry = (uintptr_t)ZHeap::_heap._page_table._map._map[$page_index]
set $page = (ZPage*)($page_entry & ~1)
set $page_start = (uintptr_t)$page._virtual._start
set $page_end = (uintptr_t)$page._virtual._end

View File

@ -63,9 +63,9 @@ ZHeap::ZHeap() :
_workers(),
_object_allocator(_workers.nworkers()),
_page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
_pagetable(),
_page_table(),
_forwarding_table(),
_mark(&_workers, &_pagetable),
_mark(&_workers, &_page_table),
_reference_processor(&_workers),
_weak_roots_processor(&_workers),
_relocate(&_workers),
@ -173,7 +173,7 @@ bool ZHeap::is_in(uintptr_t addr) const {
return false;
}
const ZPage* const page = _pagetable.get(addr);
const ZPage* const page = _page_table.get(addr);
if (page != NULL) {
return page->is_in(addr);
}
@ -182,12 +182,12 @@ bool ZHeap::is_in(uintptr_t addr) const {
}
uintptr_t ZHeap::block_start(uintptr_t addr) const {
const ZPage* const page = _pagetable.get(addr);
const ZPage* const page = _page_table.get(addr);
return page->block_start(addr);
}
bool ZHeap::block_is_obj(uintptr_t addr) const {
const ZPage* const page = _pagetable.get(addr);
const ZPage* const page = _page_table.get(addr);
return page->block_is_obj(addr);
}
@ -221,8 +221,8 @@ void ZHeap::out_of_memory() {
ZPage* ZHeap::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
ZPage* const page = _page_allocator.alloc_page(type, size, flags);
if (page != NULL) {
// Update pagetable
_pagetable.insert(page);
// Update page table
_page_table.insert(page);
}
return page;
@ -252,7 +252,7 @@ void ZHeap::before_flip() {
void ZHeap::after_flip() {
if (ZVerifyViews) {
// Map all pages
ZPageTableIterator iter(&_pagetable);
ZPageTableIterator iter(&_page_table);
for (ZPage* page; iter.next(&page);) {
if (!page->is_detached()) {
_page_allocator.map_page(page);
@ -406,8 +406,8 @@ void ZHeap::destroy_detached_pages() {
_page_allocator.flush_detached_pages(&list);
for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
// Remove pagetable entry
_pagetable.remove(page);
// Remove page table entry
_page_table.remove(page);
// Delete the page
_page_allocator.destroy_page(page);
@ -417,7 +417,7 @@ void ZHeap::destroy_detached_pages() {
void ZHeap::select_relocation_set() {
// Register relocatable pages with selector
ZRelocationSetSelector selector;
ZPageTableIterator pt_iter(&_pagetable);
ZPageTableIterator pt_iter(&_page_table);
for (ZPage* page; pt_iter.next(&page);) {
if (!page->is_relocatable()) {
// Not relocatable, don't register
@ -529,7 +529,7 @@ void ZHeap::print_extended_on(outputStream* st) const {
print_on(st);
st->cr();
ZPageTableIterator iter(&_pagetable);
ZPageTableIterator iter(&_page_table);
for (ZPage* page; iter.next(&page);) {
page->print_on(st);
}

View File

@ -55,7 +55,7 @@ private:
ZWorkers _workers;
ZObjectAllocator _object_allocator;
ZPageAllocator _page_allocator;
ZPageTable _pagetable;
ZPageTable _page_table;
ZForwardingTable _forwarding_table;
ZMark _mark;
ZReferenceProcessor _reference_processor;

View File

@ -45,12 +45,12 @@ inline ReferenceDiscoverer* ZHeap::reference_discoverer() {
}
inline bool ZHeap::is_object_live(uintptr_t addr) const {
ZPage* page = _pagetable.get(addr);
ZPage* page = _page_table.get(addr);
return page->is_object_live(addr);
}
inline bool ZHeap::is_object_strongly_live(uintptr_t addr) const {
ZPage* page = _pagetable.get(addr);
ZPage* page = _page_table.get(addr);
return page->is_object_strongly_live(addr);
}
@ -83,7 +83,7 @@ inline uintptr_t ZHeap::alloc_object_for_relocation(size_t size) {
}
inline void ZHeap::undo_alloc_object_for_relocation(uintptr_t addr, size_t size) {
ZPage* const page = _pagetable.get(addr);
ZPage* const page = _page_table.get(addr);
_object_allocator.undo_alloc_object_for_relocation(page, addr, size);
}

View File

@ -56,9 +56,9 @@ static const ZStatSubPhase ZSubPhaseConcurrentMarkIdle("Concurrent Mark Idle");
static const ZStatSubPhase ZSubPhaseConcurrentMarkTryTerminate("Concurrent Mark Try Terminate");
static const ZStatSubPhase ZSubPhaseMarkTryComplete("Pause Mark Try Complete");
ZMark::ZMark(ZWorkers* workers, ZPageTable* pagetable) :
ZMark::ZMark(ZWorkers* workers, ZPageTable* page_table) :
_workers(workers),
_pagetable(pagetable),
_page_table(page_table),
_allocator(),
_stripes(),
_terminate(),
@ -307,7 +307,7 @@ void ZMark::follow_object(oop obj, bool finalizable) {
}
bool ZMark::try_mark_object(ZMarkCache* cache, uintptr_t addr, bool finalizable) {
ZPage* const page = _pagetable->get(addr);
ZPage* const page = _page_table->get(addr);
if (page->is_allocating()) {
// Newly allocated objects are implicitly marked
return false;

View File

@ -42,7 +42,7 @@ class ZMark {
private:
ZWorkers* const _workers;
ZPageTable* const _pagetable;
ZPageTable* const _page_table;
ZMarkStackAllocator _allocator;
ZMarkStripeSet _stripes;
ZMarkTerminate _terminate;
@ -101,7 +101,7 @@ private:
void verify_all_stacks_empty() const;
public:
ZMark(ZWorkers* workers, ZPageTable* pagetable);
ZMark(ZWorkers* workers, ZPageTable* page_table);
bool is_initialized() const;

View File

@ -23,6 +23,7 @@
#include "precompiled.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zGranuleMap.inline.hpp"
#include "gc/z/zPage.inline.hpp"
#include "gc/z/zPageTable.inline.hpp"
#include "runtime/orderAccess.hpp"
@ -31,65 +32,26 @@
ZPageTable::ZPageTable() :
_map() {}
ZPageTableEntry ZPageTable::get_entry(ZPage* page) const {
const uintptr_t addr = ZAddress::good(page->start());
return _map.get(addr);
}
void ZPageTable::put_entry(ZPage* page, ZPageTableEntry entry) {
// Make sure a newly created page is globally visible before
// updating the pagetable.
OrderAccess::storestore();
const uintptr_t start = ZAddress::good(page->start());
const uintptr_t end = start + page->size();
for (uintptr_t addr = start; addr < end; addr += ZGranuleSize) {
_map.put(addr, entry);
}
}
void ZPageTable::insert(ZPage* page) {
assert(get_entry(page).page() == NULL ||
get_entry(page).page() == page, "Invalid entry");
const uintptr_t addr = ZAddress::good(page->start());
const size_t size = page->size();
// Cached pages stays in the pagetable and we must not re-insert
// those when they get re-allocated because they might also be
// relocating and we don't want to clear their relocating bit.
if (get_entry(page).page() == NULL) {
ZPageTableEntry entry(page, false /* relocating */);
put_entry(page, entry);
// Cached pages stays in the page table.
// Don't re-insert if it's already present.
if (get(addr) == NULL) {
// Make sure a newly created page is
// visible before updating the page table.
OrderAccess::storestore();
_map.put(addr, size, page);
}
assert(get_entry(page).page() == page, "Invalid entry");
assert(get(addr) == page, "Invalid entry");
}
void ZPageTable::remove(ZPage* page) {
assert(get_entry(page).page() == page, "Invalid entry");
const uintptr_t addr = ZAddress::good(page->start());
const size_t size = page->size();
ZPageTableEntry entry;
put_entry(page, entry);
assert(get_entry(page).page() == NULL, "Invalid entry");
}
void ZPageTable::set_relocating(ZPage* page) {
assert(get_entry(page).page() == page, "Invalid entry");
assert(!get_entry(page).relocating(), "Invalid entry");
ZPageTableEntry entry(page, true /* relocating */);
put_entry(page, entry);
assert(get_entry(page).page() == page, "Invalid entry");
assert(get_entry(page).relocating(), "Invalid entry");
}
void ZPageTable::clear_relocating(ZPage* page) {
assert(get_entry(page).page() == page, "Invalid entry");
assert(get_entry(page).relocating(), "Invalid entry");
ZPageTableEntry entry(page, false /* relocating */);
put_entry(page, entry);
assert(get_entry(page).page() == page, "Invalid entry");
assert(!get_entry(page).relocating(), "Invalid entry");
assert(get(addr) == page, "Invalid entry");
_map.put(addr, size, NULL);
}

View File

@ -25,7 +25,6 @@
#define SHARE_GC_Z_ZPAGETABLE_HPP
#include "gc/z/zGranuleMap.hpp"
#include "gc/z/zPageTableEntry.hpp"
#include "memory/allocation.hpp"
class ZPage;
@ -35,30 +34,24 @@ class ZPageTable {
friend class ZPageTableIterator;
private:
ZGranuleMap<ZPageTableEntry> _map;
ZPageTableEntry get_entry(ZPage* page) const;
void put_entry(ZPage* page, ZPageTableEntry entry);
ZGranuleMap<ZPage*> _map;
public:
ZPageTable();
ZPage* get(uintptr_t addr) const;
void insert(ZPage* page);
void remove(ZPage* page);
bool is_relocating(uintptr_t addr) const;
void set_relocating(ZPage* page);
void clear_relocating(ZPage* page);
};
class ZPageTableIterator : public StackObj {
private:
ZGranuleMapIterator<ZPageTableEntry> _iter;
ZPage* _prev;
ZGranuleMapIterator<ZPage*> _iter;
ZPage* _prev;
public:
ZPageTableIterator(const ZPageTable* pagetable);
ZPageTableIterator(const ZPageTable* page_table);
bool next(ZPage** page);
};

View File

@ -29,25 +29,18 @@
#include "gc/z/zPageTable.hpp"
inline ZPage* ZPageTable::get(uintptr_t addr) const {
return _map.get(addr).page();
return _map.get(addr);
}
inline bool ZPageTable::is_relocating(uintptr_t addr) const {
return _map.get(addr).relocating();
}
inline ZPageTableIterator::ZPageTableIterator(const ZPageTable* pagetable) :
_iter(&pagetable->_map),
inline ZPageTableIterator::ZPageTableIterator(const ZPageTable* page_table) :
_iter(&page_table->_map),
_prev(NULL) {}
inline bool ZPageTableIterator::next(ZPage** page) {
ZPageTableEntry entry;
while (_iter.next(&entry)) {
ZPage* const next = entry.page();
if (next != NULL && next != _prev) {
for (ZPage* entry; _iter.next(&entry);) {
if (entry != NULL && entry != _prev) {
// Next page found
*page = _prev = next;
*page = _prev = entry;
return true;
}
}

View File

@ -1,73 +0,0 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZPAGETABLEENTRY_HPP
#define SHARE_GC_Z_ZPAGETABLEENTRY_HPP
#include "gc/z/zBitField.hpp"
#include "memory/allocation.hpp"
//
// Page table entry layout
// -----------------------
//
// 6
// 3 1 0
// +----------------------------------------------------------------------+-+
// |11111111 11111111 11111111 11111111 11111111 11111111 11111111 1111111|1|
// +----------------------------------------------------------------------+-+
// | |
// | 0-0 Relocating Flag (1-bit) *
// |
// |
// |
// * 63-1 Page address (63-bits)
//
class ZPage;
class ZPageTableEntry {
private:
typedef ZBitField<uint64_t, bool, 0, 1> field_relocating;
typedef ZBitField<uint64_t, ZPage*, 1, 63, 1> field_page;
uint64_t _entry;
public:
ZPageTableEntry() :
_entry(0) {}
ZPageTableEntry(ZPage* page, bool relocating) :
_entry(field_page::encode(page) |
field_relocating::encode(relocating)) {}
bool relocating() const {
return field_relocating::decode(_entry);
}
ZPage* page() const {
return field_page::decode(_entry);
}
};
#endif // SHARE_GC_Z_ZPAGETABLEENTRY_HPP

View File

@ -48,7 +48,7 @@ public class ZHeap extends VMObject {
Type type = db.lookupType("ZHeap");
pageAllocatorFieldOffset = type.getAddressField("_page_allocator").getOffset();
pageTableFieldOffset = type.getAddressField("_pagetable").getOffset();
pageTableFieldOffset = type.getAddressField("_page_table").getOffset();
}
public ZHeap(Address addr) {